diff --git a/chain/src/chain.rs b/chain/src/chain.rs index ba5a7c8dd..21e980e56 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -166,56 +166,71 @@ impl Chain { // check if we have a head in store, otherwise the genesis block is it let head = store.head(); - let txhashset_md = match head { - Ok(h) => { - // Add the height to the metadata for the use of the rewind log, as this isn't - // stored - if let Ok(mut ts) = store.get_block_pmmr_file_metadata(&h.last_block_h) { - ts.output_file_md.block_height = h.height; - ts.rproof_file_md.block_height = h.height; - ts.kernel_file_md.block_height = h.height; - Some(ts) - } else { - debug!(LOGGER, "metadata not found for {} @ {}", h.height, h.hash()); - None + // open the txhashset, creating a new one if necessary + let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone())?; + + match head { + Ok(head) => { + let mut head = head; + loop { + // Use current chain tip if we have one. + // Note: We are rewinding and validating against a writeable extension. + // If validation is successful we will truncate the backend files + // to match the provided block header. + let header = store.get_block_header(&head.last_block_h)?; + let res = txhashset::extending(&mut txhashset, |extension| { + debug!( + LOGGER, + "chain: init: rewinding and validating before we start... {} at {}", + header.hash(), + header.height, + ); + + extension.rewind(&header)?; + extension.validate_roots(&header)?; + Ok(()) + }); + if res.is_ok() { + break; + } else { + // We may have corrupted the MMR backend files + // last time we stopped the node. + // If this appears to be the case + // revert the head to the previous header and try again + + let _ = store.delete_block(&header.hash()); + let prev_header = store.get_block_header(&head.prev_block_h)?; + let _ = store.setup_height(&prev_header, &head)?; + head = Tip::from_block(&prev_header); + store.save_head(&head)?; + } } } - Err(NotFoundErr) => None, - Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())), - }; - - let mut txhashset = - txhashset::TxHashSet::open(db_root.clone(), store.clone(), txhashset_md)?; - - let head = store.head(); - - let head = match head { - Ok(h) => h, Err(NotFoundErr) => { let tip = Tip::from_block(&genesis.header); store.save_block(&genesis)?; store.setup_height(&genesis.header, &tip)?; - if genesis.kernels.len() > 0 { - txhashset::extending(&mut txhashset, |extension| { - extension.apply_block(&genesis) - })?; - } + txhashset::extending(&mut txhashset, |extension| { + extension.apply_block(&genesis)?; + Ok(()) + })?; // saving a new tip based on genesis store.save_head(&tip)?; info!( LOGGER, - "Saved genesis block: {:?}, nonce: {:?}, pow: {:?}", + "chain: init: saved genesis block: {:?}, nonce: {:?}, pow: {:?}", genesis.hash(), genesis.header.nonce, genesis.header.pow, ); - pipe::save_pmmr_metadata(&tip, &txhashset, store.clone())?; - tip } Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())), }; + // Now reload the chain head (either existing head or genesis from above) + let head = store.head()?; + // Initialize header_head and sync_head as necessary for chain init. store.init_head()?; @@ -224,7 +239,7 @@ impl Chain { "Chain init: {} @ {} [{}]", head.total_difficulty.into_num(), head.height, - head.last_block_h + head.last_block_h, ); Ok(Chain { @@ -434,9 +449,10 @@ impl Chain { // Now create an extension from the txhashset and validate // against the latest block header. - // We will rewind the extension internally to the pos for - // the block header to ensure the view is consistent. + // Rewind the extension to the specified header to ensure the view is + // consistent. txhashset::extending_readonly(&mut txhashset, |extension| { + extension.rewind(&header)?; extension.validate(&header, skip_rproofs) }) } @@ -502,18 +518,14 @@ impl Chain { /// at the provided block hash. pub fn txhashset_read(&self, h: Hash) -> Result<(u64, u64, File), Error> { // get the indexes for the block - let out_index: u64; - let kernel_index: u64; - { + let marker = { let txhashset = self.txhashset.read().unwrap(); - let (oi, ki) = txhashset.indexes_at(&h)?; - out_index = oi; - kernel_index = ki; - } + txhashset.indexes_at(&h)? + }; // prepares the zip and return the corresponding Read let txhashset_reader = txhashset::zip_read(self.db_root.clone())?; - Ok((out_index, kernel_index, txhashset_reader)) + Ok((marker.output_pos, marker.kernel_pos, txhashset_reader)) } /// Writes a reading view on a txhashset state that's been provided to us. @@ -539,16 +551,22 @@ impl Chain { // write the block marker so we can safely rewind to // the pos for that block when we validate the extension below - self.store - .save_block_marker(&h, &(rewind_to_output, rewind_to_kernel))?; + let marker = BlockMarker { + output_pos: rewind_to_output, + kernel_pos: rewind_to_kernel, + }; + self.store.save_block_marker(&h, &marker)?; debug!( LOGGER, "Going to validate new txhashset, might take some time..." ); - let mut txhashset = - txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone(), None)?; + + let mut txhashset = txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone())?; + + // Note: we are validating against a writeable extension. txhashset::extending(&mut txhashset, |extension| { + extension.rewind(&header)?; extension.validate(&header, false)?; extension.rebuild_index()?; Ok(()) @@ -621,7 +639,6 @@ impl Chain { match self.store.get_block(¤t.hash()) { Ok(b) => { self.store.delete_block(&b.hash())?; - self.store.delete_block_pmmr_file_metadata(&b.hash())?; self.store.delete_block_marker(&b.hash())?; } Err(NotFoundErr) => { @@ -733,6 +750,13 @@ impl Chain { .map_err(|e| Error::StoreErr(e, "chain get header".to_owned())) } + /// Get the block marker for the specified block hash. + pub fn get_block_marker(&self, bh: &Hash) -> Result { + self.store + .get_block_marker(bh) + .map_err(|e| Error::StoreErr(e, "chain get block marker".to_owned())) + } + /// Gets the block header at the provided height pub fn get_header_by_height(&self, height: u64) -> Result { self.store @@ -778,16 +802,6 @@ impl Chain { .map_err(|e| Error::StoreErr(e, "chain block exists".to_owned())) } - /// Retrieve the file index metadata for a given block - pub fn get_block_pmmr_file_metadata( - &self, - h: &Hash, - ) -> Result { - self.store - .get_block_pmmr_file_metadata(h) - .map_err(|e| Error::StoreErr(e, "retrieve block pmmr metadata".to_owned())) - } - /// Rebuilds height index. Reachable as endpoint POST /chain/height-index pub fn rebuild_header_by_height(&self) -> Result<(), Error> { let head = self.head_header()?; diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index dd5a5df26..7d4999ef6 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -112,27 +112,7 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result, Er Ok(h) }); - match result { - Ok(t) => { - save_pmmr_metadata(&Tip::from_block(&b.header), &txhashset, ctx.store.clone())?; - Ok(t) - } - Err(e) => Err(e), - } -} - -/// Save pmmr index location for a given block -pub fn save_pmmr_metadata( - t: &Tip, - txhashset: &txhashset::TxHashSet, - store: Arc, -) -> Result<(), Error> { - // Save pmmr file metadata for this block - let block_file_md = txhashset.last_file_metadata(); - store - .save_block_pmmr_file_metadata(&t.last_block_h, &block_file_md) - .map_err(|e| Error::StoreErr(e, "saving pmmr file metadata".to_owned()))?; - Ok(()) + result } /// Process the block header. diff --git a/chain/src/store.rs b/chain/src/store.rs index 2e3f8cb75..80dbb1cd2 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -37,7 +37,6 @@ const SYNC_HEAD_PREFIX: u8 = 's' as u8; const HEADER_HEIGHT_PREFIX: u8 = '8' as u8; const COMMIT_POS_PREFIX: u8 = 'c' as u8; const BLOCK_MARKER_PREFIX: u8 = 'm' as u8; -const BLOCK_PMMR_FILE_METADATA_PREFIX: u8 = 'p' as u8; /// An implementation of the ChainStore trait backed by a simple key-value /// store. @@ -163,7 +162,17 @@ impl ChainStore for ChainKVStore { self.db.delete(&to_key(BLOCK_PREFIX, &mut bh.to_vec())[..]) } + // We are on the current chain if - + // * the header by height index matches the header, and + // * we are not ahead of the current head fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> { + let head = self.head()?; + + // check we are not out ahead of the current head + if header.height > head.height { + return Err(Error::NotFoundErr); + } + let header_at_height = self.get_header_by_height(header.height)?; if header.hash() == header_at_height.hash() { Ok(()) @@ -212,12 +221,12 @@ impl ChainStore for ChainKVStore { .delete(&to_key(COMMIT_POS_PREFIX, &mut commit.to_vec())) } - fn save_block_marker(&self, bh: &Hash, marker: &(u64, u64)) -> Result<(), Error> { + fn save_block_marker(&self, bh: &Hash, marker: &BlockMarker) -> Result<(), Error> { self.db .put_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())[..], &marker) } - fn get_block_marker(&self, bh: &Hash) -> Result<(u64, u64), Error> { + fn get_block_marker(&self, bh: &Hash) -> Result { option_to_not_found( self.db .get_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())), @@ -229,29 +238,6 @@ impl ChainStore for ChainKVStore { .delete(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())) } - fn save_block_pmmr_file_metadata( - &self, - h: &Hash, - md: &PMMRFileMetadataCollection, - ) -> Result<(), Error> { - self.db.put_ser( - &to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..], - &md, - ) - } - - fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result { - option_to_not_found( - self.db - .get_ser(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())), - ) - } - - fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), Error> { - self.db - .delete(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..]) - } - /// Maintain consistency of the "header_by_height" index by traversing back /// through the current chain and updating "header_by_height" until we reach /// a block_header that is consistent with its height (everything prior to diff --git a/chain/src/txhashset.rs b/chain/src/txhashset.rs index 567a1fbd1..ae84d5524 100644 --- a/chain/src/txhashset.rs +++ b/chain/src/txhashset.rs @@ -33,10 +33,10 @@ use core::core::hash::{Hash, Hashed}; use core::ser::{PMMRIndexHashable, PMMRable}; use grin_store; -use grin_store::pmmr::{PMMRBackend, PMMRFileMetadata}; +use grin_store::pmmr::PMMRBackend; use grin_store::types::prune_noop; use keychain::BlindingFactor; -use types::{ChainStore, Error, PMMRFileMetadataCollection, TxHashSetRoots}; +use types::{BlockMarker, ChainStore, Error, TxHashSetRoots}; use util::{zip, LOGGER}; const TXHASHSET_SUBDIR: &'static str = "txhashset"; @@ -57,25 +57,16 @@ impl PMMRHandle where T: PMMRable + ::std::fmt::Debug, { - fn new( - root_dir: String, - file_name: &str, - index_md: Option, - ) -> Result, Error> { + fn new(root_dir: String, file_name: &str) -> Result, Error> { let path = Path::new(&root_dir).join(TXHASHSET_SUBDIR).join(file_name); fs::create_dir_all(path.clone())?; - let be = PMMRBackend::new(path.to_str().unwrap().to_string(), index_md)?; + let be = PMMRBackend::new(path.to_str().unwrap().to_string())?; let sz = be.unpruned_size()?; Ok(PMMRHandle { backend: be, last_pos: sz, }) } - - /// Return last written positions of hash file and data file - pub fn last_file_positions(&self) -> PMMRFileMetadata { - self.backend.last_file_positions() - } } /// An easy to manipulate structure holding the 3 sum trees necessary to @@ -99,11 +90,7 @@ pub struct TxHashSet { impl TxHashSet { /// Open an existing or new set of backends for the TxHashSet - pub fn open( - root_dir: String, - commit_index: Arc, - last_file_positions: Option, - ) -> Result { + pub fn open(root_dir: String, commit_index: Arc) -> Result { let output_file_path: PathBuf = [&root_dir, TXHASHSET_SUBDIR, OUTPUT_SUBDIR] .iter() .collect(); @@ -119,21 +106,11 @@ impl TxHashSet { .collect(); fs::create_dir_all(kernel_file_path.clone())?; - let mut output_md = None; - let mut rproof_md = None; - let mut kernel_md = None; - - if let Some(p) = last_file_positions { - output_md = Some(p.output_file_md); - rproof_md = Some(p.rproof_file_md); - kernel_md = Some(p.kernel_file_md); - } - Ok(TxHashSet { - output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR, output_md)?, - rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR, rproof_md)?, - kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR, kernel_md)?, - commit_index: commit_index, + output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR)?, + rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR)?, + kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR)?, + commit_index, }) } @@ -212,19 +189,10 @@ impl TxHashSet { } /// Output and kernel MMR indexes at the end of the provided block - pub fn indexes_at(&self, bh: &Hash) -> Result<(u64, u64), Error> { + pub fn indexes_at(&self, bh: &Hash) -> Result { self.commit_index.get_block_marker(bh).map_err(&From::from) } - /// Last file positions of Output set.. hash file,data file - pub fn last_file_metadata(&self) -> PMMRFileMetadataCollection { - PMMRFileMetadataCollection::new( - self.output_pmmr_h.last_file_positions(), - self.rproof_pmmr_h.last_file_positions(), - self.kernel_pmmr_h.last_file_positions(), - ) - } - /// Get sum tree roots /// TODO: Return data instead of hashes pub fn roots(&mut self) -> (Hash, Hash, Hash) { @@ -314,6 +282,7 @@ where let sizes: (u64, u64, u64); let res: Result; let rollback: bool; + { let commit_index = trees.commit_index.clone(); @@ -327,6 +296,7 @@ where } sizes = extension.sizes(); } + match res { Err(e) => { debug!(LOGGER, "Error returned, discarding txhashset extension."); @@ -367,7 +337,7 @@ pub struct Extension<'a> { commit_index: Arc, new_output_commits: HashMap, - new_block_markers: HashMap, + new_block_markers: HashMap, rollback: bool, } @@ -427,11 +397,11 @@ impl<'a> Extension<'a> { } // finally, recording the PMMR positions after this block for future rewind - let last_output_pos = self.output_pmmr.unpruned_size(); - let last_kernel_pos = self.kernel_pmmr.unpruned_size(); - self.new_block_markers - .insert(b.hash(), (last_output_pos, last_kernel_pos)); - + let marker = BlockMarker { + output_pos: self.output_pmmr.unpruned_size(), + kernel_pos: self.kernel_pmmr.unpruned_size(), + }; + self.new_block_markers.insert(b.hash(), marker); Ok(()) } @@ -440,8 +410,8 @@ impl<'a> Extension<'a> { for (commit, pos) in &self.new_output_commits { self.commit_index.save_output_pos(commit, *pos)?; } - for (bh, tag) in &self.new_block_markers { - self.commit_index.save_block_marker(bh, tag)?; + for (bh, marker) in &self.new_block_markers { + self.commit_index.save_block_marker(bh, marker)?; } Ok(()) } @@ -566,35 +536,28 @@ impl<'a> Extension<'a> { pub fn rewind(&mut self, block_header: &BlockHeader) -> Result<(), Error> { let hash = block_header.hash(); let height = block_header.height; - debug!(LOGGER, "Rewind to header at {} [{}]", height, hash); // keep this + debug!(LOGGER, "Rewind to header {} @ {}", height, hash); - // rewind each MMR - let (out_pos_rew, kern_pos_rew) = self.commit_index.get_block_marker(&hash)?; - self.rewind_pos(height, out_pos_rew, kern_pos_rew)?; + // rewind our MMRs to the appropriate pos + // based on block height and block marker + let marker = self.commit_index.get_block_marker(&hash)?; + self.rewind_to_marker(height, &marker)?; Ok(()) } /// Rewinds the MMRs to the provided positions, given the output and /// kernel we want to rewind to. - fn rewind_pos( - &mut self, - height: u64, - out_pos_rew: u64, - kern_pos_rew: u64, - ) -> Result<(), Error> { - debug!( - LOGGER, - "Rewind txhashset to output pos: {}, kernel pos: {}", out_pos_rew, kern_pos_rew, - ); + fn rewind_to_marker(&mut self, height: u64, marker: &BlockMarker) -> Result<(), Error> { + debug!(LOGGER, "Rewind txhashset to {}, {:?}", height, marker); self.output_pmmr - .rewind(out_pos_rew, height as u32) + .rewind(marker.output_pos, height as u32) .map_err(&Error::TxHashSetErr)?; self.rproof_pmmr - .rewind(out_pos_rew, height as u32) + .rewind(marker.output_pos, height as u32) .map_err(&Error::TxHashSetErr)?; self.kernel_pmmr - .rewind(kern_pos_rew, height as u32) + .rewind(marker.kernel_pos, height as u32) .map_err(&Error::TxHashSetErr)?; Ok(()) @@ -618,15 +581,26 @@ impl<'a> Extension<'a> { } } - /// Validate the txhashset state against the provided block header. - /// Rewinds to that pos for the header first so we see a consistent - /// view of the world. - /// Note: this is an expensive operation and sets force_rollback - /// so the extension is read-only. - pub fn validate(&mut self, header: &BlockHeader, skip_rproofs: bool) -> Result<(), Error> { - // rewind to the provided header for a consistent view - &self.rewind(header)?; + /// Validate the various MMR roots against the block header. + pub fn validate_roots(&self, header: &BlockHeader) -> Result<(), Error> { + // If we are validating the genesis block then + // we have no outputs or kernels. + // So we are done here. + if header.height == 0 { + return Ok(()); + } + let roots = self.roots(); + if roots.output_root != header.output_root || roots.rproof_root != header.range_proof_root + || roots.kernel_root != header.kernel_root + { + return Err(Error::InvalidRoot); + } + Ok(()) + } + + /// Validate the txhashset state against the provided block header. + pub fn validate(&mut self, header: &BlockHeader, skip_rproofs: bool) -> Result<(), Error> { // validate all hashes and sums within the trees if let Err(e) = self.output_pmmr.validate() { return Err(Error::InvalidTxHashSet(e)); @@ -638,12 +612,10 @@ impl<'a> Extension<'a> { return Err(Error::InvalidTxHashSet(e)); } - // validate the tree roots against the block header - let roots = self.roots(); - if roots.output_root != header.output_root || roots.rproof_root != header.range_proof_root - || roots.kernel_root != header.kernel_root - { - return Err(Error::InvalidRoot); + self.validate_roots(header)?; + + if header.height == 0 { + return Ok(()); } // the real magicking: the sum of all kernel excess should equal the sum diff --git a/chain/src/types.rs b/chain/src/types.rs index d695dc3f2..b5ba71ec9 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -25,7 +25,6 @@ use core::core::{block, transaction, Block, BlockHeader}; use core::ser::{self, Readable, Reader, Writeable, Writer}; use grin_store as store; use grin_store; -use grin_store::pmmr::PMMRFileMetadata; use keychain; bitflags! { @@ -321,31 +320,14 @@ pub trait ChainStore: Send + Sync { /// Saves a marker associated with a block recording the MMR positions of /// its last elements. - fn save_block_marker(&self, bh: &Hash, marker: &(u64, u64)) -> Result<(), store::Error>; + fn save_block_marker(&self, bh: &Hash, marker: &BlockMarker) -> Result<(), store::Error>; /// Retrieves a block marker from a block hash. - fn get_block_marker(&self, bh: &Hash) -> Result<(u64, u64), store::Error>; + fn get_block_marker(&self, bh: &Hash) -> Result; /// Deletes a block marker associated with the provided hash fn delete_block_marker(&self, bh: &Hash) -> Result<(), store::Error>; - /// Saves information about the last written PMMR file positions for each - /// committed block - fn save_block_pmmr_file_metadata( - &self, - h: &Hash, - md: &PMMRFileMetadataCollection, - ) -> Result<(), store::Error>; - - /// Retrieves stored pmmr file metadata information for a given block - fn get_block_pmmr_file_metadata( - &self, - h: &Hash, - ) -> Result; - - /// Delete stored pmmr file metadata information for a given block - fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), store::Error>; - /// Saves the provided block header at the corresponding height. Also check /// the consistency of the height chain in store by assuring previous /// headers are also at their respective heights. @@ -355,61 +337,6 @@ pub trait ChainStore: Send + Sync { fn build_by_height_index(&self, header: &BlockHeader, force: bool) -> Result<(), store::Error>; } -/// Single serializable struct to hold metadata about all PMMR file position -/// for a given block -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] -pub struct PMMRFileMetadataCollection { - /// file metadata for the output file - pub output_file_md: PMMRFileMetadata, - /// file metadata for the rangeproof file - pub rproof_file_md: PMMRFileMetadata, - /// file metadata for the kernel file - pub kernel_file_md: PMMRFileMetadata, -} - -impl Writeable for PMMRFileMetadataCollection { - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - self.output_file_md.write(writer)?; - self.rproof_file_md.write(writer)?; - self.kernel_file_md.write(writer)?; - Ok(()) - } -} - -impl Readable for PMMRFileMetadataCollection { - fn read(reader: &mut Reader) -> Result { - Ok(PMMRFileMetadataCollection { - output_file_md: PMMRFileMetadata::read(reader)?, - rproof_file_md: PMMRFileMetadata::read(reader)?, - kernel_file_md: PMMRFileMetadata::read(reader)?, - }) - } -} - -impl PMMRFileMetadataCollection { - /// Return empty with all file positions = 0 - pub fn empty() -> PMMRFileMetadataCollection { - PMMRFileMetadataCollection { - output_file_md: PMMRFileMetadata::empty(), - rproof_file_md: PMMRFileMetadata::empty(), - kernel_file_md: PMMRFileMetadata::empty(), - } - } - - /// Helper to create a new collection - pub fn new( - output_md: PMMRFileMetadata, - rproof_md: PMMRFileMetadata, - kernel_md: PMMRFileMetadata, - ) -> PMMRFileMetadataCollection { - PMMRFileMetadataCollection { - output_file_md: output_md, - rproof_file_md: rproof_md, - kernel_file_md: kernel_md, - } - } -} - /// Bridge between the chain pipeline and the rest of the system. Handles /// downstream processing of valid blocks by the rest of the system, most /// importantly the broadcasting of blocks to our peers. @@ -421,6 +348,44 @@ pub trait ChainAdapter { /// Dummy adapter used as a placeholder for real implementations pub struct NoopAdapter {} + impl ChainAdapter for NoopAdapter { fn block_accepted(&self, _: &Block, _: Options) {} } + +/// The output and kernel positions that define the size of the MMRs for a +/// particular block. +#[derive(Debug, Clone)] +pub struct BlockMarker { + /// The output (and rangeproof) MMR position of the final output in the + /// block + pub output_pos: u64, + /// The kernel position of the final kernel in the block + pub kernel_pos: u64, +} + +impl Writeable for BlockMarker { + fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + writer.write_u64(self.output_pos)?; + writer.write_u64(self.kernel_pos)?; + Ok(()) + } +} + +impl Readable for BlockMarker { + fn read(reader: &mut Reader) -> Result { + Ok(BlockMarker { + output_pos: reader.read_u64()?, + kernel_pos: reader.read_u64()?, + }) + } +} + +impl Default for BlockMarker { + fn default() -> BlockMarker { + BlockMarker { + output_pos: 0, + kernel_pos: 0, + } + } +} diff --git a/chain/tests/data_file_integrity.rs b/chain/tests/data_file_integrity.rs index f88fa3daa..313b0f2a6 100644 --- a/chain/tests/data_file_integrity.rs +++ b/chain/tests/data_file_integrity.rs @@ -94,13 +94,13 @@ fn data_files() { let head = Tip::from_block(&b.header); - // Check we have indexes for the last block and the block previous + // Check we have block markers for the last block and the block previous let cur_pmmr_md = chain - .get_block_pmmr_file_metadata(&head.last_block_h) - .expect("block pmmr file data doesn't exist"); + .get_block_marker(&head.last_block_h) + .expect("block marker does not exist"); chain - .get_block_pmmr_file_metadata(&head.prev_block_h) - .expect("previous block pmmr file data doesn't exist"); + .get_block_marker(&head.prev_block_h) + .expect("prev block marker does not exist"); println!("Cur_pmmr_md: {:?}", cur_pmmr_md); chain.validate(false).unwrap(); diff --git a/core/src/core/pmmr.rs b/core/src/core/pmmr.rs index 1ef0a2cb1..7bebbc771 100644 --- a/core/src/core/pmmr.rs +++ b/core/src/core/pmmr.rs @@ -799,6 +799,10 @@ impl PruneList { /// side of the range, and navigates toward lower siblings toward the right /// of the range. pub fn peaks(num: u64) -> Vec { + if num == 0 { + return vec![]; + } + // detecting an invalid mountain range, when siblings exist but no parent // exists if bintree_postorder_height(num + 1) > bintree_postorder_height(num) { @@ -838,6 +842,10 @@ pub fn peaks(num: u64) -> Vec { /// The number of leaves nodes in a MMR of the provided size. Uses peaks to /// get the positions of all full binary trees and uses the height of these pub fn n_leaves(mut sz: u64) -> u64 { + if sz == 0 { + return 0; + } + while bintree_postorder_height(sz + 1) > 0 { sz += 1; } @@ -1140,16 +1148,6 @@ mod test { } } - #[test] - fn test_leaf_index() { - assert_eq!(n_leaves(1), 1); - assert_eq!(n_leaves(2), 2); - assert_eq!(n_leaves(4), 3); - assert_eq!(n_leaves(5), 4); - assert_eq!(n_leaves(8), 5); - assert_eq!(n_leaves(9), 6); - } - #[test] fn some_all_ones() { for n in vec![1, 7, 255] { @@ -1190,15 +1188,22 @@ mod test { } } - // Trst our n_leaves impl does the right thing for various MMR sizes #[test] - fn various_n_leaves() { + fn test_n_leaves() { + // make sure we handle an empty MMR correctly + assert_eq!(n_leaves(0), 0); + + // and various sizes on non-empty MMRs assert_eq!(n_leaves(1), 1); - // 2 is not a valid size for a tree, but n_leaves rounds up to next valid tree - // size assert_eq!(n_leaves(2), 2); assert_eq!(n_leaves(3), 2); + assert_eq!(n_leaves(4), 3); + assert_eq!(n_leaves(5), 4); + assert_eq!(n_leaves(6), 4); assert_eq!(n_leaves(7), 4); + assert_eq!(n_leaves(8), 5); + assert_eq!(n_leaves(9), 6); + assert_eq!(n_leaves(10), 6); } /// Find parent and sibling positions for various node positions. @@ -1281,7 +1286,13 @@ mod test { #[test] fn some_peaks() { // 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 + let empty: Vec = vec![]; + + // make sure we handle an empty MMR correctly + assert_eq!(peaks(0), empty); + + // and various non-empty MMRs assert_eq!(peaks(1), [1]); assert_eq!(peaks(2), empty); assert_eq!(peaks(3), [3]); @@ -1921,19 +1932,6 @@ mod test { // assert_eq!(pl.get_shift(17), Some(11)); } - #[test] - fn n_size_check() { - assert_eq!(n_leaves(1), 1); - assert_eq!(n_leaves(2), 2); - assert_eq!(n_leaves(3), 2); - assert_eq!(n_leaves(4), 3); - assert_eq!(n_leaves(5), 4); - assert_eq!(n_leaves(7), 4); - assert_eq!(n_leaves(8), 5); - assert_eq!(n_leaves(9), 6); - assert_eq!(n_leaves(10), 6); - } - #[test] fn check_all_ones() { for i in 0..1000000 { diff --git a/servers/src/grin/sync.rs b/servers/src/grin/sync.rs index 425ce99d8..533b4348c 100644 --- a/servers/src/grin/sync.rs +++ b/servers/src/grin/sync.rs @@ -205,8 +205,10 @@ fn body_sync(peers: Arc, chain: Arc) { ); for hash in hashes_to_get.clone() { - // TODO - Is there a threshold where we sync from most_work_peer (not - // more_work_peer)? + // TODO - Is there a threshold where we sync from most_work_peer + // (not more_work_peer)? + // TODO - right now we *only* sync blocks from a full archival node + // even if we are requesting recent blocks (i.e. during a fast sync) let peer = peers.more_work_archival_peer(); if let Some(peer) = peer { if let Ok(peer) = peer.try_read() { diff --git a/store/src/pmmr.rs b/store/src/pmmr.rs index 1cc890309..d099dd40f 100644 --- a/store/src/pmmr.rs +++ b/store/src/pmmr.rs @@ -18,10 +18,11 @@ use std::io; use std::marker; use core::core::pmmr::{self, family, Backend}; -use core::ser::{self, PMMRable, Readable, Reader, Writeable, Writer}; use core::core::hash::Hash; -use util::LOGGER; +use core::ser; +use core::ser::PMMRable; use types::*; +use util::LOGGER; const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin"; const PMMR_DATA_FILE: &'static str = "pmmr_data.bin"; @@ -31,48 +32,6 @@ const PMMR_PRUNED_FILE: &'static str = "pmmr_pruned.bin"; /// Maximum number of nodes in the remove log before it gets flushed pub const RM_LOG_MAX_NODES: usize = 10_000; -/// Metadata for the PMMR backend's AppendOnlyFile, which can be serialized and -/// stored -#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] -pub struct PMMRFileMetadata { - /// The block height represented by these indices in the file - pub block_height: u64, - /// last written index of the hash file - pub last_hash_file_pos: u64, - /// last written index of the data file - pub last_data_file_pos: u64, -} - -impl Writeable for PMMRFileMetadata { - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - writer.write_u64(self.block_height)?; - writer.write_u64(self.last_hash_file_pos)?; - writer.write_u64(self.last_data_file_pos)?; - Ok(()) - } -} - -impl Readable for PMMRFileMetadata { - fn read(reader: &mut Reader) -> Result { - Ok(PMMRFileMetadata { - block_height: reader.read_u64()?, - last_hash_file_pos: reader.read_u64()?, - last_data_file_pos: reader.read_u64()?, - }) - } -} - -impl PMMRFileMetadata { - /// Return fields with all positions = 0 - pub fn empty() -> PMMRFileMetadata { - PMMRFileMetadata { - block_height: 0, - last_hash_file_pos: 0, - last_data_file_pos: 0, - } - } -} - /// PMMR persistent backend implementation. Relies on multiple facilities to /// handle writing, reading and pruning. /// @@ -184,22 +143,27 @@ where } } + /// Rewind the PMMR backend to the given position. + /// Use the index to rewind the rm_log correctly (based on block height). fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> { + // Rewind the rm_log based on index (block height) self.rm_log .rewind(index) .map_err(|e| format!("Could not truncate remove log: {}", e))?; - // Hash file + // Rewind the hash file accounting for pruned/compacted pos let shift = self.pruned_nodes.get_shift(position).unwrap_or(0); - let record_len = 32; - let file_pos = (position - shift) * (record_len as u64); + let record_len = 32 as u64; + let file_pos = (position - shift) * record_len; self.hash_file.rewind(file_pos); - // Data file + // Rewind the data file accounting for pruned/compacted pos let leaf_shift = self.pruned_nodes.get_leaf_shift(position).unwrap_or(0); let flatfile_pos = pmmr::n_leaves(position); - let file_pos = (flatfile_pos - leaf_shift) * T::len() as u64; - self.data_file.rewind(file_pos as u64); + let record_len = T::len() as u64; + let file_pos = (flatfile_pos - leaf_shift) * record_len; + self.data_file.rewind(file_pos); + Ok(()) } @@ -218,11 +182,12 @@ where fn dump_stats(&self) { debug!( LOGGER, - "pmmr backend: unpruned - {}, hashes - {}, data - {}, rm_log - {:?}", + "pmmr backend: unpruned: {}, hashes: {}, data: {}, rm_log: {}, prune_list: {}", self.unpruned_size().unwrap_or(0), self.hash_size().unwrap_or(0), self.data_size().unwrap_or(0), - self.rm_log.removed + self.rm_log.removed.len(), + self.pruned_nodes.pruned_nodes.len(), ); } } @@ -231,32 +196,23 @@ impl PMMRBackend where T: PMMRable + ::std::fmt::Debug, { - /// Instantiates a new PMMR backend that will use the provided directly to - /// store its files. - pub fn new(data_dir: String, file_md: Option) -> io::Result> { - let (height, hash_to_pos, data_to_pos) = match file_md { - Some(m) => ( - m.block_height as u32, - m.last_hash_file_pos, - m.last_data_file_pos, - ), - None => (0, 0, 0), - }; - let hash_file = - AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE), hash_to_pos)?; - let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE), height)?; + /// Instantiates a new PMMR backend. + /// Use the provided dir to store its files. + pub fn new(data_dir: String) -> io::Result> { let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?; - let data_file = - AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE), data_to_pos)?; + let pruned_nodes = pmmr::PruneList { + pruned_nodes: prune_list, + }; + let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE))?; + let hash_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE))?; + let data_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE))?; Ok(PMMRBackend { - data_dir: data_dir, - hash_file: hash_file, - data_file: data_file, - rm_log: rm_log, - pruned_nodes: pmmr::PruneList { - pruned_nodes: prune_list, - }, + data_dir, + hash_file, + data_file, + rm_log, + pruned_nodes, _marker: marker::PhantomData, }) } @@ -317,13 +273,13 @@ where } /// Return last written buffer positions for the hash file and the data file - pub fn last_file_positions(&self) -> PMMRFileMetadata { - PMMRFileMetadata { - block_height: 0, - last_hash_file_pos: self.hash_file.last_buffer_pos() as u64, - last_data_file_pos: self.data_file.last_buffer_pos() as u64, - } - } + // pub fn last_file_positions(&self) -> PMMRFileMetadata { + // PMMRFileMetadata { + // block_height: 0, + // last_hash_file_pos: self.hash_file.last_buffer_pos() as u64, + // last_data_file_pos: self.data_file.last_buffer_pos() as u64, + // } + // } /// Checks the length of the remove log to see if it should get compacted. /// If so, the remove log is flushed into the pruned list, which itself gets @@ -417,14 +373,14 @@ where tmp_prune_file_hash.clone(), format!("{}/{}", self.data_dir, PMMR_HASH_FILE), )?; - self.hash_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_HASH_FILE), 0)?; + self.hash_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_HASH_FILE))?; // 5. Rename the compact copy of the data file and reopen it. fs::rename( tmp_prune_file_data.clone(), format!("{}/{}", self.data_dir, PMMR_DATA_FILE), )?; - self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE), 0)?; + self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?; // 6. Truncate the rm log based on pos removed. // Excluding roots which remain in rm log. diff --git a/store/src/types.rs b/store/src/types.rs index d059d9dde..303eaaa46 100644 --- a/store/src/types.rs +++ b/store/src/types.rs @@ -49,9 +49,8 @@ pub struct AppendOnlyFile { } impl AppendOnlyFile { - /// Open a file (existing or not) as append-only, backed by a mmap. Sets - /// the last written pos to to_pos if > 0, otherwise the end of the file - pub fn open(path: String, to_pos: u64) -> io::Result { + /// Open a file (existing or not) as append-only, backed by a mmap. + pub fn open(path: String) -> io::Result { let file = OpenOptions::new() .read(true) .append(true) @@ -65,13 +64,10 @@ impl AppendOnlyFile { buffer: vec![], buffer_start_bak: 0, }; + // if we have a non-empty file then mmap it. if let Ok(sz) = aof.size() { - let mut buf_start = sz; - if to_pos > 0 && to_pos <= buf_start { - buf_start = to_pos; - } - if buf_start > 0 { - aof.buffer_start = buf_start as usize; + if sz > 0 { + aof.buffer_start = sz as usize; aof.mmap = Some(unsafe { memmap::Mmap::map(&aof.file)? }); } } @@ -149,6 +145,11 @@ impl AppendOnlyFile { return vec![]; } let mmap = self.mmap.as_ref().unwrap(); + + if mmap.len() < (offset + length) { + return vec![]; + } + (&mmap[offset..(offset + length)]).to_vec() } @@ -245,21 +246,16 @@ pub struct RemoveLog { } impl RemoveLog { - /// Open the remove log file. The content of the file will be read in memory - /// for fast checking. - pub fn open(path: String, rewind_to_index: u32) -> io::Result { + /// Open the remove log file. + /// The content of the file will be read in memory for fast checking. + pub fn open(path: String) -> io::Result { let removed = read_ordered_vec(path.clone(), 12)?; - let mut rl = RemoveLog { + Ok(RemoveLog { path: path, removed: removed, removed_tmp: vec![], removed_bak: vec![], - }; - if rewind_to_index > 0 { - rl.rewind(rewind_to_index)?; - rl.flush()?; - } - Ok(rl) + }) } /// Rewinds the remove log back to the provided index. diff --git a/store/tests/pmmr.rs b/store/tests/pmmr.rs index 24fdfe752..5e470444c 100644 --- a/store/tests/pmmr.rs +++ b/store/tests/pmmr.rs @@ -26,7 +26,7 @@ use store::types::prune_noop; #[test] fn pmmr_append() { let (data_dir, elems) = setup("append"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap(); // adding first set of 4 elements and sync let mut mmr_size = load(0, &elems[0..4], &mut backend); @@ -76,7 +76,7 @@ fn pmmr_compact_leaf_sibling() { let (data_dir, elems) = setup("compact_leaf_sibling"); // setup the mmr store with all elements - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap(); @@ -146,7 +146,7 @@ fn pmmr_prune_compact() { let (data_dir, elems) = setup("prune_compact"); // setup the mmr store with all elements - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap(); @@ -194,7 +194,7 @@ fn pmmr_reload() { let (data_dir, elems) = setup("reload"); // set everything up with an initial backend - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); @@ -248,7 +248,7 @@ fn pmmr_reload() { // create a new backend referencing the data files // and check everything still works as expected { - let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap(); assert_eq!(backend.unpruned_size().unwrap(), mmr_size); { let pmmr: PMMR = PMMR::at(&mut backend, mmr_size); @@ -286,7 +286,7 @@ fn pmmr_reload() { #[test] fn pmmr_rewind() { let (data_dir, elems) = setup("rewind"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap(); // adding elements and keeping the corresponding root let mut mmr_size = load(0, &elems[0..4], &mut backend); @@ -371,7 +371,7 @@ fn pmmr_rewind() { #[test] fn pmmr_compact_single_leaves() { let (data_dir, elems) = setup("compact_single_leaves"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap(); let mmr_size = load(0, &elems[0..5], &mut backend); backend.sync().unwrap(); @@ -403,7 +403,7 @@ fn pmmr_compact_single_leaves() { #[test] fn pmmr_compact_entire_peak() { let (data_dir, elems) = setup("compact_entire_peak"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap(); let mmr_size = load(0, &elems[0..5], &mut backend); backend.sync().unwrap(); @@ -442,7 +442,7 @@ fn pmmr_compact_entire_peak() { #[test] fn pmmr_compact_horizon() { let (data_dir, elems) = setup("compact_horizon"); - let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap(); @@ -517,8 +517,7 @@ fn pmmr_compact_horizon() { // recheck stored data { // recreate backend - let backend = - store::pmmr::PMMRBackend::::new(data_dir.to_string(), None).unwrap(); + let backend = store::pmmr::PMMRBackend::::new(data_dir.to_string()).unwrap(); assert_eq!(backend.data_size().unwrap(), 17); assert_eq!(backend.hash_size().unwrap(), 33); @@ -532,8 +531,7 @@ fn pmmr_compact_horizon() { } { - let mut backend = - store::pmmr::PMMRBackend::::new(data_dir.to_string(), None).unwrap(); + let mut backend = store::pmmr::PMMRBackend::::new(data_dir.to_string()).unwrap(); { let mut pmmr: PMMR = PMMR::at(&mut backend, mmr_size); @@ -549,8 +547,7 @@ fn pmmr_compact_horizon() { // recheck stored data { // recreate backend - let backend = - store::pmmr::PMMRBackend::::new(data_dir.to_string(), None).unwrap(); + let backend = store::pmmr::PMMRBackend::::new(data_dir.to_string()).unwrap(); // 0010012001001230