1use std::{fmt, str::FromStr};
20
21use darkfi_sdk::{
22 blockchain::block_version,
23 crypto::{MerkleNode, MerkleTree},
24 hex::decode_hex_arr,
25 AsHex,
26};
27#[cfg(feature = "async-serial")]
28use darkfi_serial::async_trait;
29use darkfi_serial::{deserialize, serialize, Encodable, SerialDecodable, SerialEncodable};
30use sled_overlay::{
31 serial::{parse_record, parse_u32_key_record},
32 sled,
33};
34
35use crate::{util::time::Timestamp, Error, Result};
36
37use super::SledDbOverlayPtr;
38
39#[derive(Copy, Clone, Debug, Eq, PartialEq, SerialEncodable, SerialDecodable)]
40pub struct HeaderHash(pub [u8; 32]);
42
43impl HeaderHash {
44 pub fn new(data: [u8; 32]) -> Self {
45 Self(data)
46 }
47
48 #[inline]
49 pub fn inner(&self) -> &[u8; 32] {
50 &self.0
51 }
52
53 pub fn as_string(&self) -> String {
54 self.0.hex().to_string()
55 }
56}
57
58impl FromStr for HeaderHash {
59 type Err = Error;
60
61 fn from_str(header_hash_str: &str) -> Result<Self> {
62 Ok(Self(decode_hex_arr(header_hash_str)?))
63 }
64}
65
66impl fmt::Display for HeaderHash {
67 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
68 write!(f, "{}", self.0.hex())
69 }
70}
71
72#[derive(Debug, Clone, PartialEq, Eq, SerialEncodable, SerialDecodable)]
74pub struct Header {
75 pub version: u8,
77 pub previous: HeaderHash,
79 pub height: u32,
81 pub timestamp: Timestamp,
83 pub nonce: u64,
85 pub root: MerkleNode,
87}
88
89impl Header {
90 pub fn new(previous: HeaderHash, height: u32, timestamp: Timestamp, nonce: u64) -> Self {
91 let version = block_version(height);
92 let root = MerkleTree::new(1).root(0).unwrap();
93 Self { version, previous, height, timestamp, nonce, root }
94 }
95
96 pub fn hash(&self) -> HeaderHash {
98 let mut hasher = blake3::Hasher::new();
99
100 self.encode(&mut hasher).expect("blake3 hasher");
104
105 HeaderHash(hasher.finalize().into())
106 }
107}
108
109impl Default for Header {
110 fn default() -> Self {
112 Header::new(
113 HeaderHash::new(blake3::hash(b"Let there be dark!").into()),
114 0u32,
115 Timestamp::current_time(),
116 0u64,
117 )
118 }
119}
120
121impl fmt::Display for Header {
122 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
123 let s = format!(
124 "{} {{\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n\t{}: {}\n}}",
125 "Header",
126 "Hash",
127 self.hash(),
128 "Version",
129 self.version,
130 "Previous",
131 self.previous,
132 "Height",
133 self.height,
134 "Timestamp",
135 self.timestamp,
136 "Nonce",
137 self.nonce,
138 "Root",
139 self.root,
140 );
141
142 write!(f, "{}", s)
143 }
144}
145
146pub const SLED_HEADER_TREE: &[u8] = b"_headers";
147pub const SLED_SYNC_HEADER_TREE: &[u8] = b"_sync_headers";
148
149#[derive(Clone)]
152pub struct HeaderStore {
153 pub main: sled::Tree,
156 pub sync: sled::Tree,
160}
161
162impl HeaderStore {
163 pub fn new(db: &sled::Db) -> Result<Self> {
165 let main = db.open_tree(SLED_HEADER_TREE)?;
166 let sync = db.open_tree(SLED_SYNC_HEADER_TREE)?;
167 Ok(Self { main, sync })
168 }
169
170 pub fn insert(&self, headers: &[Header]) -> Result<Vec<HeaderHash>> {
172 let (batch, ret) = self.insert_batch(headers);
173 self.main.apply_batch(batch)?;
174 Ok(ret)
175 }
176
177 pub fn insert_sync(&self, headers: &[Header]) -> Result<()> {
179 let batch = self.insert_batch_sync(headers);
180 self.sync.apply_batch(batch)?;
181 Ok(())
182 }
183
184 pub fn insert_batch(&self, headers: &[Header]) -> (sled::Batch, Vec<HeaderHash>) {
191 let mut ret = Vec::with_capacity(headers.len());
192 let mut batch = sled::Batch::default();
193
194 for header in headers {
195 let headerhash = header.hash();
196 batch.insert(headerhash.inner(), serialize(header));
197 ret.push(headerhash);
198 }
199
200 (batch, ret)
201 }
202
203 pub fn insert_batch_sync(&self, headers: &[Header]) -> sled::Batch {
208 let mut batch = sled::Batch::default();
209
210 for header in headers {
211 batch.insert(&header.height.to_be_bytes(), serialize(header));
212 }
213
214 batch
215 }
216
217 pub fn contains(&self, headerhash: &HeaderHash) -> Result<bool> {
219 Ok(self.main.contains_key(headerhash.inner())?)
220 }
221
222 pub fn get(&self, headerhashes: &[HeaderHash], strict: bool) -> Result<Vec<Option<Header>>> {
228 let mut ret = Vec::with_capacity(headerhashes.len());
229
230 for hash in headerhashes {
231 if let Some(found) = self.main.get(hash.inner())? {
232 let header = deserialize(&found)?;
233 ret.push(Some(header));
234 continue
235 }
236 if strict {
237 return Err(Error::HeaderNotFound(hash.inner().hex()))
238 }
239 ret.push(None);
240 }
241
242 Ok(ret)
243 }
244
245 pub fn get_all(&self) -> Result<Vec<(HeaderHash, Header)>> {
249 let mut headers = vec![];
250
251 for header in self.main.iter() {
252 headers.push(parse_record(header.unwrap())?);
253 }
254
255 Ok(headers)
256 }
257
258 pub fn get_all_sync(&self) -> Result<Vec<(u32, Header)>> {
262 let mut headers = vec![];
263
264 for record in self.sync.iter() {
265 headers.push(parse_u32_key_record(record.unwrap())?);
266 }
267
268 Ok(headers)
269 }
270
271 pub fn get_first_sync(&self) -> Result<Option<Header>> {
274 let Some(found) = self.sync.first()? else { return Ok(None) };
275 let (_, header) = parse_u32_key_record(found)?;
276
277 Ok(Some(header))
278 }
279
280 pub fn get_last_sync(&self) -> Result<Option<Header>> {
283 let Some(found) = self.sync.last()? else { return Ok(None) };
284 let (_, header) = parse_u32_key_record(found)?;
285
286 Ok(Some(header))
287 }
288
289 pub fn get_after_sync(&self, height: u32, n: usize) -> Result<Vec<Header>> {
293 let mut ret = vec![];
294
295 let mut key = height;
296 let mut counter = 0;
297 while counter < n {
298 if let Some(found) = self.sync.get_gt(key.to_be_bytes())? {
299 let (height, hash) = parse_u32_key_record(found)?;
300 key = height;
301 ret.push(hash);
302 counter += 1;
303 continue
304 }
305 break
306 }
307
308 Ok(ret)
309 }
310
311 pub fn len_sync(&self) -> usize {
313 self.sync.len()
314 }
315
316 pub fn is_empty_sync(&self) -> bool {
318 self.sync.is_empty()
319 }
320
321 pub fn remove_sync(&self, heights: &[u32]) -> Result<()> {
323 let batch = self.remove_batch_sync(heights);
324 self.sync.apply_batch(batch)?;
325 Ok(())
326 }
327
328 pub fn remove_all_sync(&self) -> Result<()> {
330 let headers = self.get_all_sync()?;
331 let heights = headers.iter().map(|h| h.0).collect::<Vec<u32>>();
332 let batch = self.remove_batch_sync(&heights);
333 self.sync.apply_batch(batch)?;
334 Ok(())
335 }
336
337 pub fn remove_batch_sync(&self, heights: &[u32]) -> sled::Batch {
340 let mut batch = sled::Batch::default();
341
342 for height in heights {
343 batch.remove(&height.to_be_bytes());
344 }
345
346 batch
347 }
348}
349
350pub struct HeaderStoreOverlay(SledDbOverlayPtr);
352
353impl HeaderStoreOverlay {
354 pub fn new(overlay: &SledDbOverlayPtr) -> Result<Self> {
355 overlay.lock().unwrap().open_tree(SLED_HEADER_TREE, true)?;
356 Ok(Self(overlay.clone()))
357 }
358
359 pub fn insert(&self, headers: &[Header]) -> Result<Vec<HeaderHash>> {
364 let mut ret = Vec::with_capacity(headers.len());
365 let mut lock = self.0.lock().unwrap();
366
367 for header in headers {
368 let headerhash = header.hash();
369 lock.insert(SLED_HEADER_TREE, headerhash.inner(), &serialize(header))?;
370 ret.push(headerhash);
371 }
372
373 Ok(ret)
374 }
375
376 pub fn get(&self, headerhashes: &[HeaderHash], strict: bool) -> Result<Vec<Option<Header>>> {
382 let mut ret = Vec::with_capacity(headerhashes.len());
383 let lock = self.0.lock().unwrap();
384
385 for hash in headerhashes {
386 if let Some(found) = lock.get(SLED_HEADER_TREE, hash.inner())? {
387 let header = deserialize(&found)?;
388 ret.push(Some(header));
389 continue
390 }
391 if strict {
392 return Err(Error::HeaderNotFound(hash.inner().hex()))
393 }
394 ret.push(None);
395 }
396
397 Ok(ret)
398 }
399}