darkfi/blockchain/
header_store.rs

1/* This file is part of DarkFi (https://dark.fi)
2 *
3 * Copyright (C) 2020-2025 Dyne.org foundation
4 *
5 * This program is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU Affero General Public License as
7 * published by the Free Software Foundation, either version 3 of the
8 * License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13 * GNU Affero General Public License for more details.
14 *
15 * You should have received a copy of the GNU Affero General Public License
16 * along with this program.  If not, see <https://www.gnu.org/licenses/>.
17 */
18
19use std::{fmt, str::FromStr};
20
21use darkfi_sdk::{
22    blockchain::block_version,
23    crypto::{MerkleNode, MerkleTree},
24    hex::decode_hex_arr,
25    AsHex,
26};
27#[cfg(feature = "async-serial")]
28use darkfi_serial::async_trait;
29use darkfi_serial::{deserialize, serialize, Encodable, SerialDecodable, SerialEncodable};
30use sled_overlay::{
31    serial::{parse_record, parse_u32_key_record},
32    sled,
33};
34
35use crate::{util::time::Timestamp, Error, Result};
36
37use super::SledDbOverlayPtr;
38
39#[derive(Copy, Clone, Debug, Eq, PartialEq, SerialEncodable, SerialDecodable)]
40// We have to introduce a type rather than using an alias so we can restrict API access
41pub struct HeaderHash(pub [u8; 32]);
42
43impl HeaderHash {
44    pub fn new(data: [u8; 32]) -> Self {
45        Self(data)
46    }
47
48    #[inline]
49    pub fn inner(&self) -> &[u8; 32] {
50        &self.0
51    }
52
53    pub fn as_string(&self) -> String {
54        self.0.hex().to_string()
55    }
56}
57
58impl FromStr for HeaderHash {
59    type Err = Error;
60
61    fn from_str(header_hash_str: &str) -> Result<Self> {
62        Ok(Self(decode_hex_arr(header_hash_str)?))
63    }
64}
65
66impl fmt::Display for HeaderHash {
67    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
68        write!(f, "{}", self.0.hex())
69    }
70}
71
72/// This struct represents a tuple of the form (version, previous, height, timestamp, nonce, merkle_tree).
73#[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
74pub struct Header {
75    /// Block version
76    pub version: u8,
77    /// Previous block hash
78    pub previous: HeaderHash,
79    /// Block height
80    pub height: u32,
81    /// Block creation timestamp
82    pub timestamp: Timestamp,
83    /// The block's nonce. This value changes arbitrarily with mining.
84    pub nonce: u64,
85    /// Merkle tree root of the transactions hashes contained in this block
86    pub root: MerkleNode,
87}
88
89impl Header {
90    pub fn new(previous: HeaderHash, height: u32, timestamp: Timestamp, nonce: u64) -> Self {
91        let version = block_version(height);
92        let root = MerkleTree::new(1).root(0).unwrap();
93        Self { version, previous, height, timestamp, nonce, root }
94    }
95
96    /// Compute the header's hash
97    pub fn hash(&self) -> HeaderHash {
98        let mut hasher = blake3::Hasher::new();
99
100        // Blake3 hasher .update() method never fails.
101        // This call returns a Result due to how the Write trait is specified.
102        // Calling unwrap() here should be safe.
103        self.encode(&mut hasher).expect("blake3 hasher");
104
105        HeaderHash(hasher.finalize().into())
106    }
107}
108
109impl Default for Header {
110    /// Represents the genesis header on current timestamp
111    fn default() -> Self {
112        Header::new(
113            HeaderHash::new(blake3::hash(b"Let there be dark!").into()),
114            0u32,
115            Timestamp::current_time(),
116            0u64,
117        )
118    }
119}
120
121impl fmt::Display for Header {
122    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
123        let s = format!(
124            "{} {{\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n}}",
125            "Header",
126            "Hash",
127            self.hash(),
128            "Version",
129            self.version,
130            "Previous",
131            self.previous,
132            "Height",
133            self.height,
134            "Timestamp",
135            self.timestamp,
136            "Nonce",
137            self.nonce,
138            "Root",
139            self.root,
140        );
141
142        write!(f, "{}", s)
143    }
144}
145
146pub const SLED_HEADER_TREE: &[u8] = b"_headers";
147pub const SLED_SYNC_HEADER_TREE: &[u8] = b"_sync_headers";
148
149/// The `HeaderStore` is a structure representing all `sled` trees related
150/// to storing the blockchain's blocks's header information.
151#[derive(Clone)]
152pub struct HeaderStore {
153    /// Main `sled` tree, storing all the blockchain's blocks' headers,
154    /// where the key is the headers' hash, and value is the serialized header.
155    pub main: sled::Tree,
156    /// The `sled` tree storing all the node pending headers while syncing,
157    /// where the key is the height number, and the value is the serialized
158    /// header.
159    pub sync: sled::Tree,
160}
161
162impl HeaderStore {
163    /// Opens a new or existing `HeaderStore` on the given sled database.
164    pub fn new(db: &sled::Db) -> Result<Self> {
165        let main = db.open_tree(SLED_HEADER_TREE)?;
166        let sync = db.open_tree(SLED_SYNC_HEADER_TREE)?;
167        Ok(Self { main, sync })
168    }
169
170    /// Insert a slice of [`Header`] into the store's main tree.
171    pub fn insert(&self, headers: &[Header]) -> Result<Vec<HeaderHash>> {
172        let (batch, ret) = self.insert_batch(headers);
173        self.main.apply_batch(batch)?;
174        Ok(ret)
175    }
176
177    /// Insert a slice of [`Header`] into the store's sync tree.
178    pub fn insert_sync(&self, headers: &[Header]) -> Result<()> {
179        let batch = self.insert_batch_sync(headers);
180        self.sync.apply_batch(batch)?;
181        Ok(())
182    }
183
184    /// Generate the sled batch corresponding to an insert to the main
185    /// tree, so caller can handle the write operation.
186    /// The header's hash() function output is used as the key,
187    /// while value is the serialized [`Header`] itself.
188    /// On success, the function returns the header hashes in the same
189    /// order, along with the corresponding operation batch.
190    pub fn insert_batch(&self, headers: &[Header]) -> (sled::Batch, Vec<HeaderHash>) {
191        let mut ret = Vec::with_capacity(headers.len());
192        let mut batch = sled::Batch::default();
193
194        for header in headers {
195            let headerhash = header.hash();
196            batch.insert(headerhash.inner(), serialize(header));
197            ret.push(headerhash);
198        }
199
200        (batch, ret)
201    }
202
203    /// Generate the sled batch corresponding to an insert to the sync
204    /// tree, so caller can handle the write operation.
205    /// The header height is used as the key, while value is the serialized
206    /// [`Header`] itself.
207    pub fn insert_batch_sync(&self, headers: &[Header]) -> sled::Batch {
208        let mut batch = sled::Batch::default();
209
210        for header in headers {
211            batch.insert(&header.height.to_be_bytes(), serialize(header));
212        }
213
214        batch
215    }
216
217    /// Check if the store's main tree contains a given header hash.
218    pub fn contains(&self, headerhash: &HeaderHash) -> Result<bool> {
219        Ok(self.main.contains_key(headerhash.inner())?)
220    }
221
222    /// Fetch given header hashes from the store's main tree.
223    /// The resulting vector contains `Option`, which is `Some` if the header
224    /// was found in the store's main tree, and otherwise it is `None`, if it
225    /// has not. The second parameter is a boolean which tells the function to
226    /// fail in case at least one header was not found.
227    pub fn get(&self, headerhashes: &[HeaderHash], strict: bool) -> Result<Vec<Option<Header>>> {
228        let mut ret = Vec::with_capacity(headerhashes.len());
229
230        for hash in headerhashes {
231            if let Some(found) = self.main.get(hash.inner())? {
232                let header = deserialize(&found)?;
233                ret.push(Some(header));
234                continue
235            }
236            if strict {
237                return Err(Error::HeaderNotFound(hash.inner().hex()))
238            }
239            ret.push(None);
240        }
241
242        Ok(ret)
243    }
244
245    /// Retrieve all headers from the store's main tree in the form of a tuple
246    /// (`headerhash`, `header`).
247    /// Be careful as this will try to load everything in memory.
248    pub fn get_all(&self) -> Result<Vec<(HeaderHash, Header)>> {
249        let mut headers = vec![];
250
251        for header in self.main.iter() {
252            headers.push(parse_record(header.unwrap())?);
253        }
254
255        Ok(headers)
256    }
257
258    /// Retrieve all headers from the store's sync tree in the form of a tuple
259    /// (`height`, `header`).
260    /// Be careful as this will try to load everything in memory.
261    pub fn get_all_sync(&self) -> Result<Vec<(u32, Header)>> {
262        let mut headers = vec![];
263
264        for record in self.sync.iter() {
265            headers.push(parse_u32_key_record(record.unwrap())?);
266        }
267
268        Ok(headers)
269    }
270
271    /// Fetch the fisrt header in the store's sync tree, based on the `Ord`
272    /// implementation for `Vec<u8>`.
273    pub fn get_first_sync(&self) -> Result<Option<Header>> {
274        let Some(found) = self.sync.first()? else { return Ok(None) };
275        let (_, header) = parse_u32_key_record(found)?;
276
277        Ok(Some(header))
278    }
279
280    /// Fetch the last header in the store's sync tree, based on the `Ord`
281    /// implementation for `Vec<u8>`.
282    pub fn get_last_sync(&self) -> Result<Option<Header>> {
283        let Some(found) = self.sync.last()? else { return Ok(None) };
284        let (_, header) = parse_u32_key_record(found)?;
285
286        Ok(Some(header))
287    }
288
289    /// Fetch n hashes after given height. In the iteration, if a header
290    /// height is not found, the iteration stops and the function returns what
291    /// it has found so far in the store's sync tree.
292    pub fn get_after_sync(&self, height: u32, n: usize) -> Result<Vec<Header>> {
293        let mut ret = vec![];
294
295        let mut key = height;
296        let mut counter = 0;
297        while counter < n {
298            if let Some(found) = self.sync.get_gt(key.to_be_bytes())? {
299                let (height, hash) = parse_u32_key_record(found)?;
300                key = height;
301                ret.push(hash);
302                counter += 1;
303                continue
304            }
305            break
306        }
307
308        Ok(ret)
309    }
310
311    /// Retrieve store's sync tree records count.
312    pub fn len_sync(&self) -> usize {
313        self.sync.len()
314    }
315
316    /// Check if store's sync tree contains any records.
317    pub fn is_empty_sync(&self) -> bool {
318        self.sync.is_empty()
319    }
320
321    /// Remove a slice of [`u32`] from the store's sync tree.
322    pub fn remove_sync(&self, heights: &[u32]) -> Result<()> {
323        let batch = self.remove_batch_sync(heights);
324        self.sync.apply_batch(batch)?;
325        Ok(())
326    }
327
328    /// Remove all records from the store's sync tree.
329    pub fn remove_all_sync(&self) -> Result<()> {
330        let headers = self.get_all_sync()?;
331        let heights = headers.iter().map(|h| h.0).collect::<Vec<u32>>();
332        let batch = self.remove_batch_sync(&heights);
333        self.sync.apply_batch(batch)?;
334        Ok(())
335    }
336
337    /// Generate the sled batch corresponding to a remove from the store's sync
338    /// tree, so caller can handle the write operation.
339    pub fn remove_batch_sync(&self, heights: &[u32]) -> sled::Batch {
340        let mut batch = sled::Batch::default();
341
342        for height in heights {
343            batch.remove(&height.to_be_bytes());
344        }
345
346        batch
347    }
348}
349
350/// Overlay structure over a [`HeaderStore`] instance.
351pub struct HeaderStoreOverlay(SledDbOverlayPtr);
352
353impl HeaderStoreOverlay {
354    pub fn new(overlay: &SledDbOverlayPtr) -> Result<Self> {
355        overlay.lock().unwrap().open_tree(SLED_HEADER_TREE, true)?;
356        Ok(Self(overlay.clone()))
357    }
358
359    /// Insert a slice of [`Header`] into the overlay.
360    /// The header's hash() function output is used as the key,
361    /// while value is the serialized [`Header`] itself.
362    /// On success, the function returns the header hashes in the same order.
363    pub fn insert(&self, headers: &[Header]) -> Result<Vec<HeaderHash>> {
364        let mut ret = Vec::with_capacity(headers.len());
365        let mut lock = self.0.lock().unwrap();
366
367        for header in headers {
368            let headerhash = header.hash();
369            lock.insert(SLED_HEADER_TREE, headerhash.inner(), &serialize(header))?;
370            ret.push(headerhash);
371        }
372
373        Ok(ret)
374    }
375
376    /// Fetch given headerhashes from the overlay.
377    /// The resulting vector contains `Option`, which is `Some` if the header
378    /// was found in the overlay, and otherwise it is `None`, if it has not.
379    /// The second parameter is a boolean which tells the function to fail in
380    /// case at least one header was not found.
381    pub fn get(&self, headerhashes: &[HeaderHash], strict: bool) -> Result<Vec<Option<Header>>> {
382        let mut ret = Vec::with_capacity(headerhashes.len());
383        let lock = self.0.lock().unwrap();
384
385        for hash in headerhashes {
386            if let Some(found) = lock.get(SLED_HEADER_TREE, hash.inner())? {
387                let header = deserialize(&found)?;
388                ret.push(Some(header));
389                continue
390            }
391            if strict {
392                return Err(Error::HeaderNotFound(hash.inner().hex()))
393            }
394            ret.push(None);
395        }
396
397        Ok(ret)
398    }
399}