From eda31ab9e5dcaae81891e1546a39d2f6378792fd Mon Sep 17 00:00:00 2001 From: Yeastplume Date: Fri, 1 Apr 2022 13:42:12 +0100 Subject: [PATCH] documentation updates + todo fixes (#3703) --- chain/src/chain.rs | 9 ++- chain/src/store.rs | 12 +--- chain/src/txhashset/desegmenter.rs | 86 +++++------------------------ chain/src/txhashset/txhashset.rs | 4 +- core/src/global.rs | 12 +++- servers/src/grin/sync/state_sync.rs | 4 +- 6 files changed, 35 insertions(+), 92 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index ff8bbaa13..52bb6dc5c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -226,6 +226,9 @@ impl Chain { /// Reset both head and header_head to the provided header. /// Handles simple rewind and more complex fork scenarios. /// Used by the reset_chain_head owner api endpoint. + /// Caller can choose not to rewind headers, which can be used + /// during PIBD scenarios where it's desirable to restart the PIBD process + /// without re-downloading the header chain pub fn reset_chain_head>( &self, head: T, @@ -266,7 +269,9 @@ impl Chain { Ok(()) } - /// Reset prune lists (when PIBD resets) + /// Reset prune lists (when PIBD resets and rolls back the + /// entire chain, the prune list needs to be manually wiped + /// as it's currently not included as part of rewind) pub fn reset_prune_lists(&self) -> Result<(), Error> { let mut header_pmmr = self.header_pmmr.write(); let mut txhashset = self.txhashset.write(); @@ -917,8 +922,6 @@ impl Chain { } } - // TODO: (Check whether we can do this.. we *should* be able to modify this as the desegmenter - // is in flight and we cross a horizon boundary, but needs more thinking) let desegmenter = self.init_desegmenter(archive_header)?; let mut cache = self.pibd_desegmenter.write(); *cache = Some(desegmenter.clone()); diff --git a/chain/src/store.rs b/chain/src/store.rs index bda8b1ee9..e11d1f486 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -17,9 +17,9 @@ use crate::core::consensus::HeaderDifficultyInfo; use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::{Block, BlockHeader, BlockSums}; +use crate::core::global; use crate::core::pow::Difficulty; use crate::core::ser::{DeserializationMode, ProtocolVersion, Readable, Writeable}; -use crate::core::{genesis, global, global::ChainTypes}; use crate::linked_list::MultiIndex; use crate::types::{CommitPos, Tip}; use crate::util::secp::pedersen::Commitment; @@ -83,17 +83,9 @@ impl ChainStore { "PIBD_HEAD".to_owned() }); - // todo: fix duplication in batch below match res { Ok(r) => Ok(r), - Err(_) => { - let gen = match global::get_chain_type() { - ChainTypes::Mainnet => genesis::genesis_main(), - ChainTypes::Testnet => genesis::genesis_test(), - _ => genesis::genesis_dev(), - }; - Ok(Tip::from_header(&gen.header)) - } + Err(_) => Ok(Tip::from_header(&global::get_genesis_block().header)), } } diff --git a/chain/src/txhashset/desegmenter.rs b/chain/src/txhashset/desegmenter.rs index b61a13e37..9a09252dc 100644 --- a/chain/src/txhashset/desegmenter.rs +++ b/chain/src/txhashset/desegmenter.rs @@ -139,7 +139,7 @@ impl Desegmenter { /// Check progress, update status if needed, returns true if all required /// segments are in place - pub fn check_progress(&self, status: Arc) -> bool { + pub fn check_progress(&self, status: Arc) -> Result { let mut latest_block_height = 0; let local_output_mmr_size; @@ -183,9 +183,9 @@ impl Desegmenter { // TODO: Unwraps let tip = Tip::from_header(&h); - let batch = self.store.batch().unwrap(); - batch.save_pibd_head(&tip).unwrap(); - batch.commit().unwrap(); + let batch = self.store.batch()?; + batch.save_pibd_head(&tip)?; + batch.commit()?; status.update_pibd_progress( false, @@ -200,11 +200,11 @@ impl Desegmenter { && self.bitmap_cache.is_some() { // All is complete - return true; + return Ok(true); } } - false + Ok(false) } /// Once the PIBD set is downloaded, we need to ensure that the respective leaf sets @@ -223,11 +223,8 @@ impl Desegmenter { Ok(()) } - /// TODO: This is largely copied from chain.rs txhashset_write and related functions, - /// the idea being that these will eventually be broken out to perform validation while - /// segments are still being downloaded and applied. Current validation logic is all tied up - /// around unzipping, so re-developing this logic separate from the txhashset version - /// will to allow this to happen more cleanly + /// This is largely copied from chain.rs txhashset_write and related functions, + /// the idea being that the txhashset version will eventually be removed pub fn validate_complete_state( &self, status: Arc, @@ -239,7 +236,7 @@ impl Desegmenter { txhashset.roots().validate(&self.archive_header)?; } - // TODO: Keep track of this in the DB so we can pick up where we left off if needed + // TODO: Possibly Keep track of this in the DB so we can pick up where we left off if needed let last_rangeproof_validation_pos = 0; // Validate kernel history @@ -348,7 +345,7 @@ impl Desegmenter { { // Save the new head to the db and rebuild the header by height index. let tip = Tip::from_header(&self.archive_header); - // TODO: Throw error + batch.save_body_head(&tip)?; // Reset the body tail to the body head after a txhashset write @@ -372,8 +369,7 @@ impl Desegmenter { } /// Apply next set of segments that are ready to be appended to their respective trees, - /// and kick off any validations that can happen. TODO: figure out where and how - /// this should be called considering any thread blocking implications + /// and kick off any validations that can happen. pub fn apply_next_segments(&mut self) -> Result<(), Error> { let next_bmp_idx = self.next_required_bitmap_segment_index(); if let Some(bmp_idx) = next_bmp_idx { @@ -561,10 +557,6 @@ impl Desegmenter { /// 'Finalize' the bitmap accumulator, storing an in-memory copy of the bitmap for /// use in further validation and setting the accumulator on the underlying txhashset - /// TODO: Could be called automatically when we have the calculated number of - /// required segments for the archive header - /// TODO: Accumulator will likely need to be stored locally to deal with server - /// being shut down and restarted pub fn finalize_bitmap(&mut self) -> Result<(), Error> { trace!( "pibd_desegmenter: finalizing and caching bitmap - accumulator root: {}", @@ -630,58 +622,6 @@ impl Desegmenter { } } - /// Apply a list of segments, in a single extension - pub fn _apply_segments( - &mut self, - output_segments: Vec>, - rp_segments: Vec>, - kernel_segments: Vec>, - ) -> Result<(), Error> { - let t = self.txhashset.clone(); - let s = self.store.clone(); - let mut header_pmmr = self.header_pmmr.write(); - let mut txhashset = t.write(); - let mut batch = s.batch()?; - txhashset::extending( - &mut header_pmmr, - &mut txhashset, - &mut batch, - |ext, _batch| { - let extension = &mut ext.extension; - // outputs - for segment in output_segments { - let id = segment.identifier().idx; - if let Err(e) = extension.apply_output_segment(segment) { - debug!("pibd_desegmenter: applying output segment at idx {}", id); - error!("Error applying output segment {}, {}", id, e); - break; - } - } - for segment in rp_segments { - let id = segment.identifier().idx; - if let Err(e) = extension.apply_rangeproof_segment(segment) { - debug!( - "pibd_desegmenter: applying rangeproof segment at idx {}", - id - ); - error!("Error applying rangeproof segment {}, {}", id, e); - break; - } - } - for segment in kernel_segments { - let id = segment.identifier().idx; - if let Err(e) = extension.apply_kernel_segment(segment) { - debug!("pibd_desegmenter: applying kernel segment at idx {}", id); - error!("Error applying kernel segment {}, {}", id, e); - break; - } - } - Ok(()) - }, - )?; - Ok(()) - } - /// Whether our list already contains this bitmap segment fn has_bitmap_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool { self.bitmap_segment_cache @@ -798,6 +738,8 @@ impl Desegmenter { // Special case here. If the mmr size is 1, this is a fresh chain // with naught but a humble genesis block. We need segment 0, (and // also need to skip the genesis block when applying the segment) + // note this is implementation-specific, the code for creating + // a new chain creates the genesis block pmmr entries by default let mut cur_segment_count = if local_output_mmr_size == 1 { 0 @@ -856,8 +798,6 @@ impl Desegmenter { } /// Whether our list already contains this rangeproof segment - /// TODO: Refactor all these similar functions, but will require some time - /// refining traits fn has_rangeproof_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool { self.rangeproof_segment_cache .iter() diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 3b3c44a06..817ccf793 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1295,8 +1295,6 @@ impl<'a> Extension<'a> { .leaf_idx_iter(BitmapAccumulator::chunk_start_idx(min_idx)), size, ) - // TODO: will need to set bitmap cache here if it's ever needed - // outside of PIBD sync } /// Sets the bitmap accumulator (as received during PIBD sync) @@ -1402,7 +1400,7 @@ impl<'a> Extension<'a> { /// Apply an output segment to the output PMMR. must be called in order /// Sort and apply hashes and leaves within a segment to output pmmr, skipping over /// genesis position. - /// TODO NB: Would like to make this more generic but the hard casting of pmmrs + /// NB: Would like to make this more generic but the hard casting of pmmrs /// held by this struct makes it awkward to do so pub fn apply_output_segment( diff --git a/core/src/global.rs b/core/src/global.rs index 00cc3ef61..c12a13465 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -22,7 +22,8 @@ use crate::consensus::{ DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT, PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD, }; -use crate::core::block::HeaderVersion; +use crate::core::block::{Block, HeaderVersion}; +use crate::genesis; use crate::pow::{ self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx, new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof, @@ -201,6 +202,15 @@ pub fn get_chain_type() -> ChainTypes { }) } +/// Return genesis block for the active chain type +pub fn get_genesis_block() -> Block { + match get_chain_type() { + ChainTypes::Mainnet => genesis::genesis_main(), + ChainTypes::Testnet => genesis::genesis_test(), + _ => genesis::genesis_dev(), + } +} + /// One time initialization of the global future time limit /// Will panic if we attempt to re-initialize this (via OneTime). pub fn init_global_future_time_limit(new_ftl: u64) { diff --git a/servers/src/grin/sync/state_sync.rs b/servers/src/grin/sync/state_sync.rs index 14da0807e..a580cf42b 100644 --- a/servers/src/grin/sync/state_sync.rs +++ b/servers/src/grin/sync/state_sync.rs @@ -171,7 +171,7 @@ impl StateSync { let desegmenter = self.chain.desegmenter(&archive_header).unwrap(); // All segments in, validate if let Some(d) = desegmenter.read().as_ref() { - if d.check_progress(self.sync_state.clone()) { + if let Ok(true) = d.check_progress(self.sync_state.clone()) { if let Err(e) = d.check_update_leaf_set_state() { error!("error updating PIBD leaf set: {}", e); self.sync_state.update_pibd_progress( @@ -263,7 +263,7 @@ impl StateSync { // requests we want to send to peers let mut next_segment_ids = vec![]; if let Some(d) = desegmenter.write().as_mut() { - if d.check_progress(self.sync_state.clone()) { + if let Ok(true) = d.check_progress(self.sync_state.clone()) { return true; } // Figure out the next segments we need