documentation updates + todo fixes (#3703)

This commit is contained in:
Yeastplume 2022-04-01 13:42:12 +01:00 committed by GitHub
parent 6a7b66b329
commit eda31ab9e5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 35 additions and 92 deletions

View file

@ -226,6 +226,9 @@ impl Chain {
/// Reset both head and header_head to the provided header. /// Reset both head and header_head to the provided header.
/// Handles simple rewind and more complex fork scenarios. /// Handles simple rewind and more complex fork scenarios.
/// Used by the reset_chain_head owner api endpoint. /// Used by the reset_chain_head owner api endpoint.
/// Caller can choose not to rewind headers, which can be used
/// during PIBD scenarios where it's desirable to restart the PIBD process
/// without re-downloading the header chain
pub fn reset_chain_head<T: Into<Tip>>( pub fn reset_chain_head<T: Into<Tip>>(
&self, &self,
head: T, head: T,
@ -266,7 +269,9 @@ impl Chain {
Ok(()) Ok(())
} }
/// Reset prune lists (when PIBD resets) /// Reset prune lists (when PIBD resets and rolls back the
/// entire chain, the prune list needs to be manually wiped
/// as it's currently not included as part of rewind)
pub fn reset_prune_lists(&self) -> Result<(), Error> { pub fn reset_prune_lists(&self) -> Result<(), Error> {
let mut header_pmmr = self.header_pmmr.write(); let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write(); let mut txhashset = self.txhashset.write();
@ -917,8 +922,6 @@ impl Chain {
} }
} }
// TODO: (Check whether we can do this.. we *should* be able to modify this as the desegmenter
// is in flight and we cross a horizon boundary, but needs more thinking)
let desegmenter = self.init_desegmenter(archive_header)?; let desegmenter = self.init_desegmenter(archive_header)?;
let mut cache = self.pibd_desegmenter.write(); let mut cache = self.pibd_desegmenter.write();
*cache = Some(desegmenter.clone()); *cache = Some(desegmenter.clone());

View file

@ -17,9 +17,9 @@
use crate::core::consensus::HeaderDifficultyInfo; use crate::core::consensus::HeaderDifficultyInfo;
use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::{Block, BlockHeader, BlockSums}; use crate::core::core::{Block, BlockHeader, BlockSums};
use crate::core::global;
use crate::core::pow::Difficulty; use crate::core::pow::Difficulty;
use crate::core::ser::{DeserializationMode, ProtocolVersion, Readable, Writeable}; use crate::core::ser::{DeserializationMode, ProtocolVersion, Readable, Writeable};
use crate::core::{genesis, global, global::ChainTypes};
use crate::linked_list::MultiIndex; use crate::linked_list::MultiIndex;
use crate::types::{CommitPos, Tip}; use crate::types::{CommitPos, Tip};
use crate::util::secp::pedersen::Commitment; use crate::util::secp::pedersen::Commitment;
@ -83,17 +83,9 @@ impl ChainStore {
"PIBD_HEAD".to_owned() "PIBD_HEAD".to_owned()
}); });
// todo: fix duplication in batch below
match res { match res {
Ok(r) => Ok(r), Ok(r) => Ok(r),
Err(_) => { Err(_) => Ok(Tip::from_header(&global::get_genesis_block().header)),
let gen = match global::get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
};
Ok(Tip::from_header(&gen.header))
}
} }
} }

View file

@ -139,7 +139,7 @@ impl Desegmenter {
/// Check progress, update status if needed, returns true if all required /// Check progress, update status if needed, returns true if all required
/// segments are in place /// segments are in place
pub fn check_progress(&self, status: Arc<SyncState>) -> bool { pub fn check_progress(&self, status: Arc<SyncState>) -> Result<bool, Error> {
let mut latest_block_height = 0; let mut latest_block_height = 0;
let local_output_mmr_size; let local_output_mmr_size;
@ -183,9 +183,9 @@ impl Desegmenter {
// TODO: Unwraps // TODO: Unwraps
let tip = Tip::from_header(&h); let tip = Tip::from_header(&h);
let batch = self.store.batch().unwrap(); let batch = self.store.batch()?;
batch.save_pibd_head(&tip).unwrap(); batch.save_pibd_head(&tip)?;
batch.commit().unwrap(); batch.commit()?;
status.update_pibd_progress( status.update_pibd_progress(
false, false,
@ -200,11 +200,11 @@ impl Desegmenter {
&& self.bitmap_cache.is_some() && self.bitmap_cache.is_some()
{ {
// All is complete // All is complete
return true; return Ok(true);
} }
} }
false Ok(false)
} }
/// Once the PIBD set is downloaded, we need to ensure that the respective leaf sets /// Once the PIBD set is downloaded, we need to ensure that the respective leaf sets
@ -223,11 +223,8 @@ impl Desegmenter {
Ok(()) Ok(())
} }
/// TODO: This is largely copied from chain.rs txhashset_write and related functions, /// This is largely copied from chain.rs txhashset_write and related functions,
/// the idea being that these will eventually be broken out to perform validation while /// the idea being that the txhashset version will eventually be removed
/// segments are still being downloaded and applied. Current validation logic is all tied up
/// around unzipping, so re-developing this logic separate from the txhashset version
/// will to allow this to happen more cleanly
pub fn validate_complete_state( pub fn validate_complete_state(
&self, &self,
status: Arc<SyncState>, status: Arc<SyncState>,
@ -239,7 +236,7 @@ impl Desegmenter {
txhashset.roots().validate(&self.archive_header)?; txhashset.roots().validate(&self.archive_header)?;
} }
// TODO: Keep track of this in the DB so we can pick up where we left off if needed // TODO: Possibly Keep track of this in the DB so we can pick up where we left off if needed
let last_rangeproof_validation_pos = 0; let last_rangeproof_validation_pos = 0;
// Validate kernel history // Validate kernel history
@ -348,7 +345,7 @@ impl Desegmenter {
{ {
// Save the new head to the db and rebuild the header by height index. // Save the new head to the db and rebuild the header by height index.
let tip = Tip::from_header(&self.archive_header); let tip = Tip::from_header(&self.archive_header);
// TODO: Throw error
batch.save_body_head(&tip)?; batch.save_body_head(&tip)?;
// Reset the body tail to the body head after a txhashset write // Reset the body tail to the body head after a txhashset write
@ -372,8 +369,7 @@ impl Desegmenter {
} }
/// Apply next set of segments that are ready to be appended to their respective trees, /// Apply next set of segments that are ready to be appended to their respective trees,
/// and kick off any validations that can happen. TODO: figure out where and how /// and kick off any validations that can happen.
/// this should be called considering any thread blocking implications
pub fn apply_next_segments(&mut self) -> Result<(), Error> { pub fn apply_next_segments(&mut self) -> Result<(), Error> {
let next_bmp_idx = self.next_required_bitmap_segment_index(); let next_bmp_idx = self.next_required_bitmap_segment_index();
if let Some(bmp_idx) = next_bmp_idx { if let Some(bmp_idx) = next_bmp_idx {
@ -561,10 +557,6 @@ impl Desegmenter {
/// 'Finalize' the bitmap accumulator, storing an in-memory copy of the bitmap for /// 'Finalize' the bitmap accumulator, storing an in-memory copy of the bitmap for
/// use in further validation and setting the accumulator on the underlying txhashset /// use in further validation and setting the accumulator on the underlying txhashset
/// TODO: Could be called automatically when we have the calculated number of
/// required segments for the archive header
/// TODO: Accumulator will likely need to be stored locally to deal with server
/// being shut down and restarted
pub fn finalize_bitmap(&mut self) -> Result<(), Error> { pub fn finalize_bitmap(&mut self) -> Result<(), Error> {
trace!( trace!(
"pibd_desegmenter: finalizing and caching bitmap - accumulator root: {}", "pibd_desegmenter: finalizing and caching bitmap - accumulator root: {}",
@ -630,58 +622,6 @@ impl Desegmenter {
} }
} }
/// Apply a list of segments, in a single extension
pub fn _apply_segments(
&mut self,
output_segments: Vec<Segment<OutputIdentifier>>,
rp_segments: Vec<Segment<RangeProof>>,
kernel_segments: Vec<Segment<TxKernel>>,
) -> Result<(), Error> {
let t = self.txhashset.clone();
let s = self.store.clone();
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = t.write();
let mut batch = s.batch()?;
txhashset::extending(
&mut header_pmmr,
&mut txhashset,
&mut batch,
|ext, _batch| {
let extension = &mut ext.extension;
// outputs
for segment in output_segments {
let id = segment.identifier().idx;
if let Err(e) = extension.apply_output_segment(segment) {
debug!("pibd_desegmenter: applying output segment at idx {}", id);
error!("Error applying output segment {}, {}", id, e);
break;
}
}
for segment in rp_segments {
let id = segment.identifier().idx;
if let Err(e) = extension.apply_rangeproof_segment(segment) {
debug!(
"pibd_desegmenter: applying rangeproof segment at idx {}",
id
);
error!("Error applying rangeproof segment {}, {}", id, e);
break;
}
}
for segment in kernel_segments {
let id = segment.identifier().idx;
if let Err(e) = extension.apply_kernel_segment(segment) {
debug!("pibd_desegmenter: applying kernel segment at idx {}", id);
error!("Error applying kernel segment {}, {}", id, e);
break;
}
}
Ok(())
},
)?;
Ok(())
}
/// Whether our list already contains this bitmap segment /// Whether our list already contains this bitmap segment
fn has_bitmap_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool { fn has_bitmap_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool {
self.bitmap_segment_cache self.bitmap_segment_cache
@ -798,6 +738,8 @@ impl Desegmenter {
// Special case here. If the mmr size is 1, this is a fresh chain // Special case here. If the mmr size is 1, this is a fresh chain
// with naught but a humble genesis block. We need segment 0, (and // with naught but a humble genesis block. We need segment 0, (and
// also need to skip the genesis block when applying the segment) // also need to skip the genesis block when applying the segment)
// note this is implementation-specific, the code for creating
// a new chain creates the genesis block pmmr entries by default
let mut cur_segment_count = if local_output_mmr_size == 1 { let mut cur_segment_count = if local_output_mmr_size == 1 {
0 0
@ -856,8 +798,6 @@ impl Desegmenter {
} }
/// Whether our list already contains this rangeproof segment /// Whether our list already contains this rangeproof segment
/// TODO: Refactor all these similar functions, but will require some time
/// refining traits
fn has_rangeproof_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool { fn has_rangeproof_segment_with_id(&self, seg_id: SegmentIdentifier) -> bool {
self.rangeproof_segment_cache self.rangeproof_segment_cache
.iter() .iter()

View file

@ -1295,8 +1295,6 @@ impl<'a> Extension<'a> {
.leaf_idx_iter(BitmapAccumulator::chunk_start_idx(min_idx)), .leaf_idx_iter(BitmapAccumulator::chunk_start_idx(min_idx)),
size, size,
) )
// TODO: will need to set bitmap cache here if it's ever needed
// outside of PIBD sync
} }
/// Sets the bitmap accumulator (as received during PIBD sync) /// Sets the bitmap accumulator (as received during PIBD sync)
@ -1402,7 +1400,7 @@ impl<'a> Extension<'a> {
/// Apply an output segment to the output PMMR. must be called in order /// Apply an output segment to the output PMMR. must be called in order
/// Sort and apply hashes and leaves within a segment to output pmmr, skipping over /// Sort and apply hashes and leaves within a segment to output pmmr, skipping over
/// genesis position. /// genesis position.
/// TODO NB: Would like to make this more generic but the hard casting of pmmrs /// NB: Would like to make this more generic but the hard casting of pmmrs
/// held by this struct makes it awkward to do so /// held by this struct makes it awkward to do so
pub fn apply_output_segment( pub fn apply_output_segment(

View file

@ -22,7 +22,8 @@ use crate::consensus::{
DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT, DMA_WINDOW, GRIN_BASE, INITIAL_DIFFICULTY, KERNEL_WEIGHT, MAX_BLOCK_WEIGHT, OUTPUT_WEIGHT,
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD, PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
}; };
use crate::core::block::HeaderVersion; use crate::core::block::{Block, HeaderVersion};
use crate::genesis;
use crate::pow::{ use crate::pow::{
self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx, self, new_cuckaroo_ctx, new_cuckarood_ctx, new_cuckaroom_ctx, new_cuckarooz_ctx,
new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof, new_cuckatoo_ctx, no_cuckaroo_ctx, PoWContext, Proof,
@ -201,6 +202,15 @@ pub fn get_chain_type() -> ChainTypes {
}) })
} }
/// Return genesis block for the active chain type
pub fn get_genesis_block() -> Block {
match get_chain_type() {
ChainTypes::Mainnet => genesis::genesis_main(),
ChainTypes::Testnet => genesis::genesis_test(),
_ => genesis::genesis_dev(),
}
}
/// One time initialization of the global future time limit /// One time initialization of the global future time limit
/// Will panic if we attempt to re-initialize this (via OneTime). /// Will panic if we attempt to re-initialize this (via OneTime).
pub fn init_global_future_time_limit(new_ftl: u64) { pub fn init_global_future_time_limit(new_ftl: u64) {

View file

@ -171,7 +171,7 @@ impl StateSync {
let desegmenter = self.chain.desegmenter(&archive_header).unwrap(); let desegmenter = self.chain.desegmenter(&archive_header).unwrap();
// All segments in, validate // All segments in, validate
if let Some(d) = desegmenter.read().as_ref() { if let Some(d) = desegmenter.read().as_ref() {
if d.check_progress(self.sync_state.clone()) { if let Ok(true) = d.check_progress(self.sync_state.clone()) {
if let Err(e) = d.check_update_leaf_set_state() { if let Err(e) = d.check_update_leaf_set_state() {
error!("error updating PIBD leaf set: {}", e); error!("error updating PIBD leaf set: {}", e);
self.sync_state.update_pibd_progress( self.sync_state.update_pibd_progress(
@ -263,7 +263,7 @@ impl StateSync {
// requests we want to send to peers // requests we want to send to peers
let mut next_segment_ids = vec![]; let mut next_segment_ids = vec![];
if let Some(d) = desegmenter.write().as_mut() { if let Some(d) = desegmenter.write().as_mut() {
if d.check_progress(self.sync_state.clone()) { if let Ok(true) = d.check_progress(self.sync_state.clone()) {
return true; return true;
} }
// Figure out the next segments we need // Figure out the next segments we need