From 53414ae1059fd402bee1e1d5926ec0159262d848 Mon Sep 17 00:00:00 2001 From: John Tromp Date: Fri, 26 Nov 2021 12:25:10 +0100 Subject: [PATCH] Fixmmr part2 (#3666) * use 0-based positions in methods pmmr_leaf_to_insertion_index and bintree_postorder_height; add round_up_to_leaf_pos method * use 0-based positions in method insertion_to_pmmr_index * use 0-based positions in method is_leaf * use 0-based positions in method family() * use 0-based positions in method is_left_sibling * use 0-based positions in method family_branch * use 0-based positions in methods bintree_{left,right}most * use 0-based positions in method bintree_pos_iter * use 0-based positions in method bintree_range * use 0-based positions in method bintree_leaf_pos_iter * rename last_pos in MMR related structs to size * use 0-based positions in method prune * use 0-based positions in method push and apply_output return value * use 0-based position argument of method merkle_proof * use 0-based outputs in method pmmr::peaks * fix peaks() code comments * refix peaks() code comments * use 0-based positions in method get_peak_from_file * use 0-based positions in methods get_data_from_file * use 0-based positions in methods get_from_file * use 0-based positions in methods get_data * use 0-based positions in methods get_hash * use 0-based positions in method peak_path * use 0-based positions in method bag_the_rhs * use 0-based positions in method Backend::remove * use 0-based positions in method leaf_pos_iter * use 0-based positions in method self.LeafSet::includes * use 0-based positions in methods self.LeafSet::{add,remove} * use 0-based positions in methods is_pruned,is_pruned_root,is_compacted * use 0-based positions in methods PruneList::append * use 0-based positions in methods append_pruned_subtree * use 0-based positions in method calculate_next_leaf_shift * use 0-based positions in method append_single * use 0-based positions in method calculate_next_shift * use 0-based positions in method segment_pos_range * use 0-based positions in method reconstruct_root * use 0-based positions in method validate_with * use 0-based positions in method validate * rename size (formerly last_pos) to mmr_size * use 0-based positions in Segment's hash_pos and leaf_pos * minimize use of saturating_sub(1) and rename some pos/idx to size * use 0-based positions in methods get_output_pos * use 0-based positions in method get_unspent_output_at * use 0-based positions in method get_header_hash * use 0-based positions in methods MerkleProof::verify{,_consume} * use 0-based positions in method cleanup_subtree * don't allow 0 in prunelist bitmap * use 0-based positions in methods get_{,leaf_}shift * rename some 1-based pos to pos1; identify TODO * Address yeastplume's PR review comments --- api/src/handlers/utils.rs | 2 +- chain/src/chain.rs | 23 +- chain/src/store.rs | 4 +- chain/src/txhashset/bitmap_accumulator.rs | 28 +- chain/src/txhashset/txhashset.rs | 219 +++++++-------- chain/src/txhashset/utxo_view.rs | 26 +- chain/tests/process_block_cut_through.rs | 3 + core/src/core/merkle_proof.rs | 24 +- core/src/core/pmmr/backend.rs | 14 +- core/src/core/pmmr/pmmr.rs | 310 +++++++++------------- core/src/core/pmmr/readonly_pmmr.rs | 76 +++--- core/src/core/pmmr/rewindable_pmmr.rs | 8 +- core/src/core/pmmr/segment.rs | 234 ++++++++-------- core/src/core/pmmr/vec_backend.rs | 38 +-- core/tests/merkle_proof.rs | 84 +++--- core/tests/pmmr.rs | 257 +++++++++--------- core/tests/segment.rs | 2 +- core/tests/vec_backend.rs | 4 +- store/src/leaf_set.rs | 14 +- store/src/pmmr.rs | 103 +++---- store/src/prune_list.rs | 177 ++++++------ store/src/types.rs | 5 +- store/tests/pmmr.rs | 270 +++++++++---------- store/tests/prune_list.rs | 210 +++++++-------- store/tests/segment.rs | 21 +- store/tests/utxo_set_perf.rs | 6 +- 26 files changed, 1044 insertions(+), 1118 deletions(-) diff --git a/api/src/handlers/utils.rs b/api/src/handlers/utils.rs index 35df9ba13..3986a0aa9 100644 --- a/api/src/handlers/utils.rs +++ b/api/src/handlers/utils.rs @@ -71,7 +71,7 @@ pub fn get_output_v2( None => return Ok(None), }; - let output = chain.get_unspent_output_at(pos.pos)?; + let output = chain.get_unspent_output_at(pos.pos - 1)?; let header = if include_merkle_proof && output.is_coinbase() { chain.get_header_by_height(pos.height).ok() } else { diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 34835efd9..a3f2cee5b 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -557,11 +557,11 @@ impl Chain { } /// Retrieves an unspent output using its PMMR position - pub fn get_unspent_output_at(&self, pos: u64) -> Result { + pub fn get_unspent_output_at(&self, pos0: u64) -> Result { let header_pmmr = self.header_pmmr.read(); let txhashset = self.txhashset.read(); txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, _| { - utxo.get_unspent_output_at(pos) + utxo.get_unspent_output_at(pos0) }) } @@ -1267,7 +1267,7 @@ impl Chain { let txhashset = self.txhashset.read(); let last_index = match max_pmmr_index { Some(i) => i, - None => txhashset.highest_output_insertion_index(), + None => txhashset.output_mmr_size(), }; let outputs = txhashset.outputs_by_pmmr_index(start_index, max_count, max_pmmr_index); let rangeproofs = @@ -1296,13 +1296,14 @@ impl Chain { None => self.head_header()?.height, }; // Return headers at the given heights - let prev_to_start_header = - self.get_header_by_height(start_block_height.saturating_sub(1))?; - let end_header = self.get_header_by_height(end_block_height)?; - Ok(( - prev_to_start_header.output_mmr_size + 1, - end_header.output_mmr_size, - )) + let start_mmr_size = if start_block_height == 0 { + 0 + } else { + self.get_header_by_height(start_block_height - 1)? + .output_mmr_size + 1 + }; + let end_mmr_size = self.get_header_by_height(end_block_height)?.output_mmr_size; + Ok((start_mmr_size, end_mmr_size)) } /// Orphans pool size @@ -1545,7 +1546,7 @@ fn setup_head( batch.save_block_header(&genesis.header)?; } - if header_pmmr.last_pos == 0 { + if header_pmmr.size == 0 { txhashset::header_extending(header_pmmr, &mut batch, |ext, _| { ext.apply_header(&genesis.header) })?; diff --git a/chain/src/store.rs b/chain/src/store.rs index d89112ca3..31ebb7ef7 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -114,7 +114,7 @@ impl ChainStore { /// Get PMMR pos for the given output commitment. pub fn get_output_pos(&self, commit: &Commitment) -> Result { match self.get_output_pos_height(commit)? { - Some(pos) => Ok(pos.pos), + Some(pos) => Ok(pos.pos - 1), None => Err(Error::NotFoundErr(format!( "Output position for: {:?}", commit @@ -278,7 +278,7 @@ impl<'a> Batch<'a> { /// Get output_pos from index. pub fn get_output_pos(&self, commit: &Commitment) -> Result { match self.get_output_pos_height(commit)? { - Some(pos) => Ok(pos.pos), + Some(pos) => Ok(pos.pos - 1), None => Err(Error::NotFoundErr(format!( "Output position for: {:?}", commit diff --git a/chain/src/txhashset/bitmap_accumulator.rs b/chain/src/txhashset/bitmap_accumulator.rs index efcf5a812..20619bd0d 100644 --- a/chain/src/txhashset/bitmap_accumulator.rs +++ b/chain/src/txhashset/bitmap_accumulator.rs @@ -50,6 +50,8 @@ pub struct BitmapAccumulator { } impl BitmapAccumulator { + const NBITS: u64 = BitmapChunk::LEN_BITS as u64; + /// Crate a new empty bitmap accumulator. pub fn new() -> BitmapAccumulator { BitmapAccumulator { @@ -65,12 +67,12 @@ impl BitmapAccumulator { /// Find the start of the first "chunk" of 1024 bits from the provided idx. /// Zero the last 10 bits to round down to multiple of 1024. pub fn chunk_start_idx(idx: u64) -> u64 { - idx & !0x3ff + idx & !(Self::NBITS - 1) } /// The first 1024 belong to chunk 0, the next 1024 to chunk 1 etc. fn chunk_idx(idx: u64) -> u64 { - idx / 1024 + idx / Self::NBITS } /// Apply the provided idx iterator to our bitmap accumulator. @@ -91,12 +93,13 @@ impl BitmapAccumulator { let mut idx_iter = idx.into_iter().filter(|&x| x < size).peekable(); while let Some(x) = idx_iter.peek() { - if *x < chunk_idx * 1024 { + if *x < chunk_idx * Self::NBITS { + // NOTE we never get here if idx starts from from_idx // skip until we reach our first chunk idx_iter.next(); - } else if *x < (chunk_idx + 1) * 1024 { + } else if *x < (chunk_idx + 1) * Self::NBITS { let idx = idx_iter.next().expect("next after peek"); - chunk.set(idx % 1024, true); + chunk.set(idx % Self::NBITS, true); } else { self.append_chunk(chunk)?; chunk_idx += 1; @@ -124,6 +127,8 @@ impl BitmapAccumulator { /// If size is 1 then we will have a single chunk. /// If size is 1023 then we will have a single chunk (bits 0 to 1023 inclusive). /// If the size is 1024 then we will have two chunks. + /// TODO: first argument is an iterator for no good reason; + /// might as well pass from_idx as first argument pub fn apply(&mut self, invalidated_idx: T, idx: U, size: u64) -> Result<(), Error> where T: IntoIterator, @@ -149,8 +154,7 @@ impl BitmapAccumulator { let chunk_idx = BitmapAccumulator::chunk_idx(from_idx); let last_pos = self.backend.size(); let mut pmmr = PMMR::at(&mut self.backend, last_pos); - let chunk_pos = pmmr::insertion_to_pmmr_index(chunk_idx + 1); - let rewind_pos = chunk_pos.saturating_sub(1); + let rewind_pos = pmmr::insertion_to_pmmr_index(chunk_idx); pmmr.rewind(rewind_pos, &Bitmap::create()) .map_err(ErrorKind::Other)?; Ok(()) @@ -339,11 +343,11 @@ impl From for Segment { } = segment; // Count the number of chunks taking into account that the final block might be smaller - let n_chunks = blocks.len().saturating_sub(1) * BitmapBlock::NCHUNKS + let n_chunks = (blocks.len() - 1) * BitmapBlock::NCHUNKS + blocks.last().map(|b| b.n_chunks()).unwrap_or(0); let mut leaf_pos = Vec::with_capacity(n_chunks); let mut chunks = Vec::with_capacity(n_chunks); - let offset = (1 << identifier.height) * identifier.idx + 1; + let offset = (1 << identifier.height) * identifier.idx; for i in 0..(n_chunks as u64) { leaf_pos.push(pmmr::insertion_to_pmmr_index(offset + i)); chunks.push(BitmapChunk::new()); @@ -546,13 +550,15 @@ mod tests { #[test] fn sparse_block_ser_roundtrip() { - let entries = thread_rng().gen_range(1024, BitmapBlock::NBITS as usize / 16); + let entries = + thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16); test_roundtrip(entries, false, 1, 4 + 2 * entries); } #[test] fn abdundant_block_ser_roundtrip() { - let entries = thread_rng().gen_range(1024, BitmapBlock::NBITS as usize / 16); + let entries = + thread_rng().gen_range(BitmapChunk::LEN_BITS, BitmapBlock::NBITS as usize / 16); test_roundtrip(entries, true, 2, 4 + 2 * entries); } } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 9104cddfc..c002b9222 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -52,8 +52,8 @@ const TXHASHSET_ZIP: &str = "txhashset_snapshot"; pub struct PMMRHandle { /// The backend storage for the MMR. pub backend: PMMRBackend, - /// The last position accessible via this MMR handle (backend may continue out beyond this). - pub last_pos: u64, + /// The MMR size accessible via this handle (backend may continue out beyond this). + pub size: u64, } impl PMMRHandle { @@ -67,8 +67,8 @@ impl PMMRHandle { ) -> Result, Error> { fs::create_dir_all(&path)?; let backend = PMMRBackend::new(&path, prunable, version, header)?; - let last_pos = backend.unpruned_size(); - Ok(PMMRHandle { backend, last_pos }) + let size = backend.unpruned_size(); + Ok(PMMRHandle { backend, size }) } } @@ -87,31 +87,29 @@ impl PMMRHandle { return Err(ErrorKind::Other("header PMMR inconsistent".to_string()).into()); } - // 1-indexed pos and we want to account for subsequent parent hash pos. - // so use next header pos to find our last_pos. + // use next header pos to find our size. let next_height = head.height + 1; - let next_pos = pmmr::insertion_to_pmmr_index(next_height + 1); - let pos = next_pos.saturating_sub(1); + let size = pmmr::insertion_to_pmmr_index(next_height); debug!( "init_head: header PMMR: current head {} at pos {}", - head_hash, self.last_pos + head_hash, self.size ); debug!( "init_head: header PMMR: resetting to {} at pos {} (height {})", head.hash(), - pos, + size, head.height ); - self.last_pos = pos; + self.size = size; Ok(()) } /// Get the header hash at the specified height based on the current header MMR state. pub fn get_header_hash_by_height(&self, height: u64) -> Result { - let pos = pmmr::insertion_to_pmmr_index(height + 1); - let header_pmmr = ReadonlyPMMR::at(&self.backend, self.last_pos); + let pos = pmmr::insertion_to_pmmr_index(height); + let header_pmmr = ReadonlyPMMR::at(&self.backend, self.size); if let Some(entry) = header_pmmr.get_data(pos) { Ok(entry.hash()) } else { @@ -122,11 +120,11 @@ impl PMMRHandle { /// Get the header hash for the head of the header chain based on current MMR state. /// Find the last leaf pos based on MMR size and return its header hash. pub fn head_hash(&self) -> Result { - if self.last_pos == 0 { + if self.size == 0 { return Err(ErrorKind::Other("MMR empty, no head".to_string()).into()); } - let header_pmmr = ReadonlyPMMR::at(&self.backend, self.last_pos); - let leaf_pos = pmmr::bintree_rightmost(self.last_pos); + let header_pmmr = ReadonlyPMMR::at(&self.backend, self.size); + let leaf_pos = pmmr::bintree_rightmost(self.size - 1); if let Some(entry) = header_pmmr.get_data(leaf_pos) { Ok(entry.hash()) } else { @@ -194,7 +192,7 @@ impl TxHashSet { version, None, )?; - if handle.last_pos == 0 { + if handle.size == 0 { debug!( "attempting to open (empty) kernel PMMR using {:?} - SUCCESS", version @@ -202,7 +200,7 @@ impl TxHashSet { maybe_kernel_handle = Some(handle); break; } - let kernel: Option = ReadonlyPMMR::at(&handle.backend, 1).get_data(1); + let kernel: Option = ReadonlyPMMR::at(&handle.backend, 1).get_data(0); if let Some(kernel) = kernel { if kernel.verify().is_ok() { debug!( @@ -241,10 +239,10 @@ impl TxHashSet { fn bitmap_accumulator( pmmr_h: &PMMRHandle, ) -> Result { - let pmmr = ReadonlyPMMR::at(&pmmr_h.backend, pmmr_h.last_pos); - let size = pmmr::n_leaves(pmmr_h.last_pos); + let pmmr = ReadonlyPMMR::at(&pmmr_h.backend, pmmr_h.size); + let nbits = pmmr::n_leaves(pmmr_h.size); let mut bitmap_accumulator = BitmapAccumulator::new(); - bitmap_accumulator.init(&mut pmmr.leaf_idx_iter(0), size)?; + bitmap_accumulator.init(&mut pmmr.leaf_idx_iter(0), nbits)?; Ok(bitmap_accumulator) } @@ -263,12 +261,12 @@ impl TxHashSet { commit: Commitment, ) -> Result, Error> { match self.commit_index.get_output_pos_height(&commit) { - Ok(Some(pos)) => { + Ok(Some(pos1)) => { let output_pmmr: ReadonlyPMMR<'_, OutputIdentifier, _> = - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); - if let Some(out) = output_pmmr.get_data(pos.pos) { + ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size); + if let Some(out) = output_pmmr.get_data(pos1.pos - 1) { if out.commitment() == commit { - Ok(Some((out, pos))) + Ok(Some((out, pos1))) } else { Ok(None) } @@ -286,19 +284,19 @@ impl TxHashSet { /// TODO: These need to return the actual data from the flat-files instead /// of hashes now pub fn last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> { - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size) .get_last_n_insertions(distance) } /// as above, for range proofs pub fn last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> { - ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size) .get_last_n_insertions(distance) } /// as above, for kernels pub fn last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> { - ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size) .get_last_n_insertions(distance) } @@ -340,13 +338,13 @@ impl TxHashSet { max_count: u64, max_index: Option, ) -> (u64, Vec) { - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size) .elements_from_pmmr_index(start_index, max_count, max_index) } - /// highest output insertion index available - pub fn highest_output_insertion_index(&self) -> u64 { - self.output_pmmr_h.last_pos + /// number of outputs + pub fn output_mmr_size(&self) -> u64 { + self.output_pmmr_h.size } /// As above, for rangeproofs @@ -356,11 +354,13 @@ impl TxHashSet { max_count: u64, max_index: Option, ) -> (u64, Vec) { - ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos) + ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size) .elements_from_pmmr_index(start_index, max_count, max_index) } /// Find a kernel with a given excess. Work backwards from `max_index` to `min_index` + /// NOTE: this linear search over all kernel history can be VERY expensive + /// public API access to this method should be limited pub fn find_kernel( &self, excess: &Commitment, @@ -368,13 +368,13 @@ impl TxHashSet { max_index: Option, ) -> Option<(TxKernel, u64)> { let min_index = min_index.unwrap_or(1); - let max_index = max_index.unwrap_or(self.kernel_pmmr_h.last_pos); + let max_index = max_index.unwrap_or(self.kernel_pmmr_h.size); - let pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); + let pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size); let mut index = max_index + 1; while index > min_index { index -= 1; - if let Some(kernel) = pmmr.get_data(index) { + if let Some(kernel) = pmmr.get_data(index - 1) { if &kernel.excess == excess { return Some((kernel, index)); } @@ -385,12 +385,9 @@ impl TxHashSet { /// Get MMR roots. pub fn roots(&self) -> TxHashSetRoots { - let output_pmmr = - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); - let rproof_pmmr = - ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos); - let kernel_pmmr = - ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); + let output_pmmr = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size); + let rproof_pmmr = ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, self.rproof_pmmr_h.size); + let kernel_pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size); TxHashSetRoots { output_roots: OutputRoots { @@ -407,11 +404,11 @@ impl TxHashSet { Ok(self.commit_index.get_output_pos(&commit)?) } - /// build a new merkle proof for the given position. + /// build a new merkle proof for the given output commitment pub fn merkle_proof(&mut self, commit: Commitment) -> Result { - let pos = self.commit_index.get_output_pos(&commit)?; - PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos) - .merkle_proof(pos) + let pos0 = self.commit_index.get_output_pos(&commit)?; + PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.size) + .merkle_proof(pos0) .map_err(|_| ErrorKind::MerkleProof.into()) } @@ -484,15 +481,14 @@ impl TxHashSet { prev_size, ); - let kernel_pmmr = - ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); + let kernel_pmmr = ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.size); let mut current_pos = prev_size + 1; let mut current_header = from_header.clone(); let mut count = 0; - while current_pos <= self.kernel_pmmr_h.last_pos { - if pmmr::is_leaf(current_pos) { - if let Some(kernel) = kernel_pmmr.get_data(current_pos) { + while current_pos <= self.kernel_pmmr_h.size { + if pmmr::is_leaf(current_pos - 1) { + if let Some(kernel) = kernel_pmmr.get_data(current_pos - 1) { match kernel.features { KernelFeatures::NoRecentDuplicate { .. } => { while current_pos > current_header.kernel_mmr_size { @@ -532,18 +528,18 @@ impl TxHashSet { ) -> Result<(), Error> { let now = Instant::now(); - let output_pmmr = - ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); + let output_pmmr = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.size); // Iterate over the current output_pos index, removing any entries that // do not point to to the expected output. let mut removed_count = 0; - for (key, pos) in batch.output_pos_iter()? { - if let Some(out) = output_pmmr.get_data(pos.pos) { - if let Ok(pos_via_mmr) = batch.get_output_pos(&out.commitment()) { + for (key, pos1) in batch.output_pos_iter()? { + let pos0 = pos1.pos - 1; + if let Some(out) = output_pmmr.get_data(pos0) { + if let Ok(pos0_via_mmr) = batch.get_output_pos(&out.commitment()) { // If the pos matches and the index key matches the commitment // then keep the entry, other we want to clean it up. - if pos.pos == pos_via_mmr + if pos0 == pos0_via_mmr && batch.is_match_output_pos_key(&key, &out.commitment()) { continue; @@ -559,9 +555,9 @@ impl TxHashSet { ); let mut outputs_pos: Vec<(Commitment, u64)> = vec![]; - for pos in output_pmmr.leaf_pos_iter() { - if let Some(out) = output_pmmr.get_data(pos) { - outputs_pos.push((out.commit, pos)); + for pos0 in output_pmmr.leaf_pos_iter() { + if let Some(out) = output_pmmr.get_data(pos0) { + outputs_pos.push((out.commit, 1 + pos0)); } } @@ -591,15 +587,14 @@ impl TxHashSet { let hash = header_pmmr.get_header_hash_by_height(search_height + 1)?; let h = batch.get_block_header(&hash)?; while i < total_outputs { - let (commit, pos) = outputs_pos[i]; - if pos > h.output_mmr_size { - // Note: MMR position is 1-based and not 0-based, so here must be '>' instead of '>=' + let (commit, pos1) = outputs_pos[i]; + if pos1 > h.output_mmr_size { break; } batch.save_output_pos_height( &commit, CommitPos { - pos, + pos: pos1, height: h.height, }, )?; @@ -638,7 +633,7 @@ where let header_head = batch.header_head()?; let res = { - let header_pmmr = PMMR::at(&mut handle.backend, handle.last_pos); + let header_pmmr = PMMR::at(&mut handle.backend, handle.size); let mut header_extension = HeaderExtension::new(header_pmmr, header_head); let mut extension = Extension::new(trees, head); let mut extension_pair = ExtensionPair { @@ -673,11 +668,9 @@ where { let res: Result; { - let header_pmmr = ReadonlyPMMR::at(&handle.backend, handle.last_pos); - let output_pmmr = - ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos); - let rproof_pmmr = - ReadonlyPMMR::at(&trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.last_pos); + let header_pmmr = ReadonlyPMMR::at(&handle.backend, handle.size); + let output_pmmr = ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.size); + let rproof_pmmr = ReadonlyPMMR::at(&trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.size); // Create a new batch here to pass into the utxo_view. // Discard it (rollback) after we finish with the utxo_view. @@ -690,7 +683,7 @@ where /// Rewindable (but still readonly) view on the kernel MMR. /// The underlying backend is readonly. But we permit the PMMR to be "rewound" -/// via last_pos. +/// via size. /// We create a new db batch for this view and discard it (rollback) /// when we are done with the view. pub fn rewindable_kernel_view(trees: &TxHashSet, inner: F) -> Result @@ -700,7 +693,7 @@ where let res: Result; { let kernel_pmmr = - RewindablePMMR::at(&trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos); + RewindablePMMR::at(&trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.size); // Create a new batch here to pass into the kernel_view. // Discard it (rollback) after we finish with the kernel_view. @@ -742,7 +735,7 @@ where { trace!("Starting new txhashset extension."); - let header_pmmr = PMMR::at(&mut header_pmmr.backend, header_pmmr.last_pos); + let header_pmmr = PMMR::at(&mut header_pmmr.backend, header_pmmr.size); let mut header_extension = HeaderExtension::new(header_pmmr, header_head); let mut extension = Extension::new(trees, head); let mut extension_pair = ExtensionPair { @@ -780,9 +773,9 @@ where trees.output_pmmr_h.backend.sync()?; trees.rproof_pmmr_h.backend.sync()?; trees.kernel_pmmr_h.backend.sync()?; - trees.output_pmmr_h.last_pos = sizes.0; - trees.rproof_pmmr_h.last_pos = sizes.1; - trees.kernel_pmmr_h.last_pos = sizes.2; + trees.output_pmmr_h.size = sizes.0; + trees.rproof_pmmr_h.size = sizes.1; + trees.kernel_pmmr_h.size = sizes.2; // Update our bitmap_accumulator based on our extension trees.bitmap_accumulator = bitmap_accumulator; @@ -815,7 +808,7 @@ where Err(_) => Tip::default(), }; - let pmmr = PMMR::at(&mut handle.backend, handle.last_pos); + let pmmr = PMMR::at(&mut handle.backend, handle.size); let mut extension = HeaderExtension::new(pmmr, head); let res = inner(&mut extension, &batch); @@ -852,7 +845,7 @@ where }; { - let pmmr = PMMR::at(&mut handle.backend, handle.last_pos); + let pmmr = PMMR::at(&mut handle.backend, handle.size); let mut extension = HeaderExtension::new(pmmr, head); res = inner(&mut extension, &child_batch); @@ -871,7 +864,7 @@ where } else { child_batch.commit()?; handle.backend.sync()?; - handle.last_pos = size; + handle.size = size; } Ok(r) } @@ -902,8 +895,8 @@ impl<'a> HeaderExtension<'a> { } /// Get the header hash for the specified pos from the underlying MMR backend. - fn get_header_hash(&self, pos: u64) -> Option { - self.pmmr.get_data(pos).map(|x| x.hash()) + fn get_header_hash(&self, pos0: u64) -> Option { + self.pmmr.get_data(pos0).map(|x| x.hash()) } /// The head representing the furthest extent of the current extension. @@ -914,7 +907,7 @@ impl<'a> HeaderExtension<'a> { /// Get header hash by height. /// Based on current header MMR. pub fn get_header_hash_by_height(&self, height: u64) -> Option { - let pos = pmmr::insertion_to_pmmr_index(height + 1); + let pos = pmmr::insertion_to_pmmr_index(height); self.get_header_hash(pos) } @@ -973,7 +966,7 @@ impl<'a> HeaderExtension<'a> { self.head.height, ); - let header_pos = pmmr::insertion_to_pmmr_index(header.height + 1); + let header_pos = 1 + pmmr::insertion_to_pmmr_index(header.height); self.pmmr .rewind(header_pos, &Bitmap::create()) .map_err(&ErrorKind::TxHashSetErr)?; @@ -1041,8 +1034,8 @@ impl<'a> Committed for Extension<'a> { fn outputs_committed(&self) -> Vec { let mut commitments = vec![]; - for pos in self.output_pmmr.leaf_pos_iter() { - if let Some(out) = self.output_pmmr.get_data(pos) { + for pos0 in self.output_pmmr.leaf_pos_iter() { + if let Some(out) = self.output_pmmr.get_data(pos0) { commitments.push(out.commit); } } @@ -1051,7 +1044,7 @@ impl<'a> Committed for Extension<'a> { fn kernels_committed(&self) -> Vec { let mut commitments = vec![]; - for n in 1..self.kernel_pmmr.unpruned_size() + 1 { + for n in 0..self.kernel_pmmr.unpruned_size() { if pmmr::is_leaf(n) { if let Some(kernel) = self.kernel_pmmr.get_data(n) { commitments.push(kernel.excess()); @@ -1066,18 +1059,9 @@ impl<'a> Extension<'a> { fn new(trees: &'a mut TxHashSet, head: Tip) -> Extension<'a> { Extension { head, - output_pmmr: PMMR::at( - &mut trees.output_pmmr_h.backend, - trees.output_pmmr_h.last_pos, - ), - rproof_pmmr: PMMR::at( - &mut trees.rproof_pmmr_h.backend, - trees.rproof_pmmr_h.last_pos, - ), - kernel_pmmr: PMMR::at( - &mut trees.kernel_pmmr_h.backend, - trees.kernel_pmmr_h.last_pos, - ), + output_pmmr: PMMR::at(&mut trees.output_pmmr_h.backend, trees.output_pmmr_h.size), + rproof_pmmr: PMMR::at(&mut trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.size), + kernel_pmmr: PMMR::at(&mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.size), bitmap_accumulator: trees.bitmap_accumulator.clone(), rollback: false, } @@ -1177,13 +1161,14 @@ impl<'a> Extension<'a> { } fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> { + // NOTE: 1-based output_pos shouldn't have 0 in it (but does) let mut output_idx: Vec<_> = output_pos .iter() .map(|x| pmmr::n_leaves(*x).saturating_sub(1)) .collect(); output_idx.sort_unstable(); let min_idx = output_idx.first().cloned().unwrap_or(0); - let size = pmmr::n_leaves(self.output_pmmr.last_pos); + let size = pmmr::n_leaves(self.output_pmmr.size); self.bitmap_accumulator.apply( output_idx, self.output_pmmr @@ -1195,10 +1180,10 @@ impl<'a> Extension<'a> { // Prune output and rangeproof PMMRs based on provided pos. // Input is not valid if we cannot prune successfully. fn apply_input(&mut self, commit: Commitment, pos: CommitPos) -> Result<(), Error> { - match self.output_pmmr.prune(pos.pos) { + match self.output_pmmr.prune(pos.pos - 1) { Ok(true) => { self.rproof_pmmr - .prune(pos.pos) + .prune(pos.pos - 1) .map_err(ErrorKind::TxHashSetErr)?; Ok(()) } @@ -1210,8 +1195,8 @@ impl<'a> Extension<'a> { fn apply_output(&mut self, out: &Output, batch: &Batch<'_>) -> Result { let commit = out.commitment(); - if let Ok(pos) = batch.get_output_pos(&commit) { - if let Some(out_mmr) = self.output_pmmr.get_data(pos) { + if let Ok(pos0) = batch.get_output_pos(&commit) { + if let Some(out_mmr) = self.output_pmmr.get_data(pos0) { if out_mmr.commitment() == commit { return Err(ErrorKind::DuplicateCommitment(commit).into()); } @@ -1244,7 +1229,7 @@ impl<'a> Extension<'a> { ); } } - Ok(output_pos) + Ok(1 + output_pos) } /// Apply kernels to the kernel MMR. @@ -1272,7 +1257,7 @@ impl<'a> Extension<'a> { .kernel_pmmr .push(kernel) .map_err(&ErrorKind::TxHashSetErr)?; - Ok(pos) + Ok(1 + pos) } /// Build a Merkle proof for the given output and the block @@ -1288,10 +1273,10 @@ impl<'a> Extension<'a> { let out_id = out_id.as_ref(); debug!("txhashset: merkle_proof: output: {:?}", out_id.commit); // then calculate the Merkle Proof based on the known pos - let pos = batch.get_output_pos(&out_id.commit)?; + let pos0 = batch.get_output_pos(&out_id.commit)?; let merkle_proof = self .output_pmmr - .merkle_proof(pos) + .merkle_proof(pos0) .map_err(&ErrorKind::TxHashSetErr)?; Ok(merkle_proof) @@ -1386,9 +1371,9 @@ impl<'a> Extension<'a> { // Update our BitmapAccumulator based on affected outputs. // We want to "unspend" every rewound spent output. - // Treat last_pos as an affected output to ensure we rebuild far enough back. + // Treat size as an affected output to ensure we rebuild far enough back. let mut affected_pos = spent_pos; - affected_pos.push(self.output_pmmr.last_pos); + affected_pos.push(self.output_pmmr.size); // Remove any entries from the output_pos created by the block being rewound. let mut missing_count = 0; @@ -1422,9 +1407,9 @@ impl<'a> Extension<'a> { // reused output commitment. For example an output at pos 1, spent, reused at pos 2. // The output_pos index should be updated to reflect the old pos 1 when unspent. if let Ok(spent) = spent { - for pos in spent { - if let Some(out) = self.output_pmmr.get_data(pos.pos) { - batch.save_output_pos_height(&out.commitment(), pos)?; + for pos1 in spent { + if let Some(out) = self.output_pmmr.get_data(pos1.pos - 1) { + batch.save_output_pos_height(&out.commitment(), pos1)?; } } } @@ -1627,7 +1612,7 @@ impl<'a> Extension<'a> { let mut kern_count = 0; let total_kernels = pmmr::n_leaves(self.kernel_pmmr.unpruned_size()); let mut tx_kernels: Vec = Vec::with_capacity(KERNEL_BATCH_SIZE); - for n in 1..self.kernel_pmmr.unpruned_size() + 1 { + for n in 0..self.kernel_pmmr.unpruned_size() { if pmmr::is_leaf(n) { let kernel = self .kernel_pmmr @@ -1636,7 +1621,7 @@ impl<'a> Extension<'a> { tx_kernels.push(kernel); } - if tx_kernels.len() >= KERNEL_BATCH_SIZE || n >= self.kernel_pmmr.unpruned_size() { + if tx_kernels.len() >= KERNEL_BATCH_SIZE || n + 1 >= self.kernel_pmmr.unpruned_size() { TxKernel::batch_sig_verify(&tx_kernels)?; kern_count += tx_kernels.len() as u64; tx_kernels.clear(); @@ -1667,9 +1652,9 @@ impl<'a> Extension<'a> { let mut proof_count = 0; let total_rproofs = self.output_pmmr.n_unpruned_leaves(); - for pos in self.output_pmmr.leaf_pos_iter() { - let output = self.output_pmmr.get_data(pos); - let proof = self.rproof_pmmr.get_data(pos); + for pos0 in self.output_pmmr.leaf_pos_iter() { + let output = self.output_pmmr.get_data(pos0); + let proof = self.rproof_pmmr.get_data(pos0); // Output and corresponding rangeproof *must* exist. // It is invalid for either to be missing and we fail immediately in this case. diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index 4d2bfe88a..279f681e1 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -123,12 +123,12 @@ impl<'a> UTXOView<'a> { batch: &Batch<'_>, ) -> Result<(OutputIdentifier, CommitPos), Error> { let pos = batch.get_output_pos_height(&input)?; - if let Some(pos) = pos { - if let Some(out) = self.output_pmmr.get_data(pos.pos) { + if let Some(pos1) = pos { + if let Some(out) = self.output_pmmr.get_data(pos1.pos - 1) { if out.commitment() == input { - return Ok((out, pos)); + return Ok((out, pos1)); } else { - error!("input mismatch: {:?}, {:?}, {:?}", out, pos, input); + error!("input mismatch: {:?}, {:?}, {:?}", out, pos1, input); return Err(ErrorKind::Other( "input mismatch (output_pos index mismatch?)".into(), ) @@ -141,8 +141,8 @@ impl<'a> UTXOView<'a> { // Output is valid if it would not result in a duplicate commitment in the output MMR. fn validate_output(&self, output: &Output, batch: &Batch<'_>) -> Result<(), Error> { - if let Ok(pos) = batch.get_output_pos(&output.commitment()) { - if let Some(out_mmr) = self.output_pmmr.get_data(pos) { + if let Ok(pos0) = batch.get_output_pos(&output.commitment()) { + if let Some(out_mmr) = self.output_pmmr.get_data(pos0) { if out_mmr.commitment() == output.commitment() { return Err(ErrorKind::DuplicateCommitment(output.commitment()).into()); } @@ -152,9 +152,9 @@ impl<'a> UTXOView<'a> { } /// Retrieves an unspent output using its PMMR position - pub fn get_unspent_output_at(&self, pos: u64) -> Result { - match self.output_pmmr.get_data(pos) { - Some(output_id) => match self.rproof_pmmr.get_data(pos) { + pub fn get_unspent_output_at(&self, pos0: u64) -> Result { + match self.output_pmmr.get_data(pos0) { + Some(output_id) => match self.rproof_pmmr.get_data(pos0) { Some(rproof) => Ok(output_id.into_output(rproof)), None => Err(ErrorKind::RangeproofNotFound.into()), }, @@ -214,8 +214,8 @@ impl<'a> UTXOView<'a> { } /// Get the header hash for the specified pos from the underlying MMR backend. - fn get_header_hash(&self, pos: u64) -> Option { - self.header_pmmr.get_data(pos).map(|x| x.hash()) + fn get_header_hash(&self, pos1: u64) -> Option { + self.header_pmmr.get_data(pos1 - 1).map(|x| x.hash()) } /// Get the header at the specified height based on the current state of the extension. @@ -226,8 +226,8 @@ impl<'a> UTXOView<'a> { height: u64, batch: &Batch<'_>, ) -> Result { - let pos = pmmr::insertion_to_pmmr_index(height + 1); - if let Some(hash) = self.get_header_hash(pos) { + let pos1 = 1 + pmmr::insertion_to_pmmr_index(height); + if let Some(hash) = self.get_header_hash(pos1) { let header = batch.get_block_header(&hash)?; Ok(header) } else { diff --git a/chain/tests/process_block_cut_through.rs b/chain/tests/process_block_cut_through.rs index c7d38a0d4..d2c121a78 100644 --- a/chain/tests/process_block_cut_through.rs +++ b/chain/tests/process_block_cut_through.rs @@ -56,6 +56,9 @@ where chain.set_prev_root_only(&mut block.header)?; // Manually set the mmr sizes for a "valid" block (increment prev output and kernel counts). + // The 2 lines below were bogus before when using 1-based positions. + // They worked only for even output_mmr_count()s + // But it was actually correct for 0-based position! block.header.output_mmr_size = pmmr::insertion_to_pmmr_index(prev.output_mmr_count() + 1); block.header.kernel_mmr_size = pmmr::insertion_to_pmmr_index(prev.kernel_mmr_count() + 1); } else { diff --git a/core/src/core/merkle_proof.rs b/core/src/core/merkle_proof.rs index 3b4a1a722..f867fd6b2 100644 --- a/core/src/core/merkle_proof.rs +++ b/core/src/core/merkle_proof.rs @@ -112,13 +112,13 @@ impl MerkleProof { &mut self, root: Hash, element: &dyn PMMRIndexHashable, - node_pos: u64, - peaks_pos: &[u64], + node_pos0: u64, + peaks_pos0: &[u64], ) -> Result<(), MerkleProofError> { - let node_hash = if node_pos > self.mmr_size { + let node_hash = if node_pos0 >= self.mmr_size { element.hash_with_index(self.mmr_size) } else { - element.hash_with_index(node_pos - 1) + element.hash_with_index(node_pos0) }; // handle special case of only a single entry in the MMR @@ -132,25 +132,25 @@ impl MerkleProof { } let sibling = self.path.remove(0); - let (parent_pos, sibling_pos) = pmmr::family(node_pos); + let (parent_pos0, sibling_pos0) = pmmr::family(node_pos0); - if let Ok(x) = peaks_pos.binary_search(&node_pos) { - let parent = if x == peaks_pos.len() - 1 { + if let Ok(x) = peaks_pos0.binary_search(&(node_pos0)) { + let parent = if x == peaks_pos0.len() - 1 { (sibling, node_hash) } else { (node_hash, sibling) }; - self.verify(root, &parent, parent_pos) - } else if parent_pos > self.mmr_size { + self.verify(root, &parent, parent_pos0) + } else if parent_pos0 >= self.mmr_size { let parent = (sibling, node_hash); - self.verify(root, &parent, parent_pos) + self.verify(root, &parent, parent_pos0) } else { - let parent = if pmmr::is_left_sibling(sibling_pos) { + let parent = if pmmr::is_left_sibling(sibling_pos0) { (sibling, node_hash) } else { (node_hash, sibling) }; - self.verify(root, &parent, parent_pos) + self.verify(root, &parent, parent_pos0) } } } diff --git a/core/src/core/pmmr/backend.rs b/core/src/core/pmmr/backend.rs index c7f0de72a..8c53835ea 100644 --- a/core/src/core/pmmr/backend.rs +++ b/core/src/core/pmmr/backend.rs @@ -31,33 +31,33 @@ pub trait Backend { /// Rebuilding a PMMR locally from PIBD segments requires pruned subtree support. /// This allows us to append an existing pruned subtree directly without the underlying leaf nodes. - fn append_pruned_subtree(&mut self, hash: Hash, pos: u64) -> Result<(), String>; + fn append_pruned_subtree(&mut self, hash: Hash, pos0: u64) -> Result<(), String>; /// Rewind the backend state to a previous position, as if all append /// operations after that had been canceled. Expects a position in the PMMR /// to rewind to as well as bitmaps representing the positions added and /// removed since the rewind position. These are what we will "undo" /// during the rewind. - fn rewind(&mut self, position: u64, rewind_rm_pos: &Bitmap) -> Result<(), String>; + fn rewind(&mut self, pos1: u64, rewind_rm_pos: &Bitmap) -> Result<(), String>; /// Get a Hash by insertion position. - fn get_hash(&self, position: u64) -> Option; + fn get_hash(&self, pos0: u64) -> Option; /// Get underlying data by insertion position. - fn get_data(&self, position: u64) -> Option; + fn get_data(&self, pos0: u64) -> Option; /// Get a Hash by original insertion position /// (ignoring the remove log). - fn get_from_file(&self, position: u64) -> Option; + fn get_from_file(&self, pos0: u64) -> Option; /// Get hash for peak pos. /// Optimized for reading peak hashes rather than arbitrary pos hashes. /// Peaks can be assumed to not be compacted. - fn get_peak_from_file(&self, position: u64) -> Option; + fn get_peak_from_file(&self, pos0: u64) -> Option; /// Get a Data Element by original insertion position /// (ignoring the remove log). - fn get_data_from_file(&self, position: u64) -> Option; + fn get_data_from_file(&self, pos0: u64) -> Option; /// Iterator over current (unpruned, unremoved) leaf positions. fn leaf_pos_iter(&self) -> Box + '_>; diff --git a/core/src/core/pmmr/pmmr.rs b/core/src/core/pmmr/pmmr.rs index cce8765a0..a0c039d56 100644 --- a/core/src/core/pmmr/pmmr.rs +++ b/core/src/core/pmmr/pmmr.rs @@ -28,6 +28,8 @@ pub trait ReadablePMMR { type Item; /// Get the hash at provided position in the MMR. + /// NOTE all positions are 0-based, so a size n MMR has nodes in positions 0 through n-1 + /// just like a Rust Range 0..n fn get_hash(&self, pos: u64) -> Option; /// Get the data element at provided position in the MMR. @@ -65,18 +67,18 @@ pub trait ReadablePMMR { /// all the peaks to the right of this peak (if any). /// If this return a hash then this is our peaks sibling. /// If none then the sibling of our peak is the peak to the left. - fn bag_the_rhs(&self, peak_pos: u64) -> Option { - let last_pos = self.unpruned_size(); - let rhs = peaks(last_pos) + fn bag_the_rhs(&self, peak_pos0: u64) -> Option { + let size = self.unpruned_size(); + let rhs = peaks(size) .into_iter() - .filter(|&x| x > peak_pos) + .filter(|&x| x > peak_pos0) .filter_map(|x| self.get_from_file(x)); let mut res = None; for peak in rhs.rev() { res = match res { None => Some(peak), - Some(rhash) => Some((peak, rhash).hash_with_index(last_pos)), + Some(rhash) => Some((peak, rhash).hash_with_index(size)), } } res @@ -86,16 +88,16 @@ pub trait ReadablePMMR { fn peaks(&self) -> Vec { peaks(self.unpruned_size()) .into_iter() - .filter_map(move |pi| self.get_peak_from_file(pi)) + .filter_map(move |pi0| self.get_peak_from_file(pi0)) .collect() } /// Hashes of the peaks excluding `peak_pos`, where the rhs is bagged together - fn peak_path(&self, peak_pos: u64) -> Vec { - let rhs = self.bag_the_rhs(peak_pos); + fn peak_path(&self, peak_pos0: u64) -> Vec { + let rhs = self.bag_the_rhs(peak_pos0); let mut res = peaks(self.unpruned_size()) .into_iter() - .filter(|&x| x < peak_pos) + .filter(|&x| x < peak_pos0) .filter_map(|x| self.get_peak_from_file(x)) .collect::>(); if let Some(rhs) = rhs { @@ -114,30 +116,31 @@ pub trait ReadablePMMR { } let mut res = None; let peaks = self.peaks(); + let mmr_size = self.unpruned_size(); for peak in peaks.into_iter().rev() { res = match res { None => Some(peak), - Some(rhash) => Some((peak, rhash).hash_with_index(self.unpruned_size())), + Some(rhash) => Some((peak, rhash).hash_with_index(mmr_size)), } } res.ok_or_else(|| "no root, invalid tree".to_owned()) } /// Build a Merkle proof for the element at the given position. - fn merkle_proof(&self, pos: u64) -> Result { - let last_pos = self.unpruned_size(); - debug!("merkle_proof {}, last_pos {}", pos, last_pos); + fn merkle_proof(&self, pos0: u64) -> Result { + let size = self.unpruned_size(); + debug!("merkle_proof {}, size {}", pos0, size); // check this pos is actually a leaf in the MMR - if !is_leaf(pos) { - return Err(format!("not a leaf at pos {}", pos)); + if !is_leaf(pos0) { + return Err(format!("not a leaf at pos {}", pos0)); } // check we actually have a hash in the MMR at this pos - self.get_hash(pos) - .ok_or_else(|| format!("no element at pos {}", pos))?; + self.get_hash(pos0) + .ok_or_else(|| format!("no element at pos {}", pos0))?; - let family_branch = family_branch(pos, last_pos); + let family_branch = family_branch(pos0, size); let mut path = family_branch .iter() @@ -146,21 +149,20 @@ pub trait ReadablePMMR { let peak_pos = match family_branch.last() { Some(&(x, _)) => x, - None => pos, + None => pos0, }; path.append(&mut self.peak_path(peak_pos)); Ok(MerkleProof { - mmr_size: last_pos, + mmr_size: size, path, }) } } /// Prunable Merkle Mountain Range implementation. All positions within the tree -/// start at 1 as they're postorder tree traversal positions rather than array -/// indices. +/// start at 0 just like array indices. /// /// Heavily relies on navigation operations within a binary tree. In particular, /// all the implementation needs to keep track of the MMR structure is how far @@ -170,8 +172,8 @@ where T: PMMRable, B: Backend, { - /// The last position in the PMMR - pub last_pos: u64, + /// Number of nodes in the PMMR + pub size: u64, backend: &'a mut B, // only needed to parameterise Backend _marker: marker::PhantomData, @@ -186,38 +188,38 @@ where pub fn new(backend: &'a mut B) -> PMMR<'_, T, B> { PMMR { backend, - last_pos: 0, + size: 0, _marker: marker::PhantomData, } } /// Build a new prunable Merkle Mountain Range pre-initialized until - /// last_pos with the provided backend. - pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR<'_, T, B> { + /// size with the provided backend. + pub fn at(backend: &'a mut B, size: u64) -> PMMR<'_, T, B> { PMMR { backend, - last_pos, + size, _marker: marker::PhantomData, } } /// Build a "readonly" view of this PMMR. pub fn readonly_pmmr(&self) -> ReadonlyPMMR<'_, T, B> { - ReadonlyPMMR::at(&self.backend, self.last_pos) + ReadonlyPMMR::at(&self.backend, self.size) } /// Push a new element into the MMR. Computes new related peaks at /// the same time if applicable. - pub fn push(&mut self, elmt: &T) -> Result { - let elmt_pos = self.last_pos + 1; - let mut current_hash = elmt.hash_with_index(elmt_pos - 1); + pub fn push(&mut self, leaf: &T) -> Result { + let leaf_pos = self.size; + let mut current_hash = leaf.hash_with_index(leaf_pos); let mut hashes = vec![current_hash]; - let mut pos = elmt_pos; + let mut pos = leaf_pos; - let (peak_map, height) = peak_map_height(pos - 1); + let (peak_map, height) = peak_map_height(pos); if height != 0 { - return Err(format!("bad mmr size {}", pos - 1)); + return Err(format!("bad mmr size {}", pos)); } // hash with all immediately preceding peaks, as indicated by peak map let mut peak = 1; @@ -229,14 +231,14 @@ where .ok_or("missing left sibling in tree, should not have been pruned")?; peak *= 2; pos += 1; - current_hash = (left_hash, current_hash).hash_with_index(pos - 1); + current_hash = (left_hash, current_hash).hash_with_index(pos); hashes.push(current_hash); } // append all the new nodes and update the MMR index - self.backend.append(elmt, &hashes)?; - self.last_pos = pos; - Ok(elmt_pos) + self.backend.append(leaf, &hashes)?; + self.size = pos + 1; + Ok(leaf_pos) } /// Saves a snapshot of the MMR tagged with the block hash. @@ -255,13 +257,9 @@ where // Identify which actual position we should rewind to as the provided // position is a leaf. We traverse the MMR to include any parent(s) that // need to be included for the MMR to be valid. - let mut pos = position; - while bintree_postorder_height(pos + 1) > 0 { - pos += 1; - } - - self.backend.rewind(pos, rewind_rm_pos)?; - self.last_pos = pos; + let leaf_pos = round_up_to_leaf_pos(position); + self.backend.rewind(leaf_pos, rewind_rm_pos)?; + self.size = leaf_pos; Ok(()) } @@ -269,23 +267,23 @@ where /// Returns an error if prune is called on a non-leaf position. /// Returns false if the leaf node has already been pruned. /// Returns true if pruning is successful. - pub fn prune(&mut self, position: u64) -> Result { - if !is_leaf(position) { - return Err(format!("Node at {} is not a leaf, can't prune.", position)); + pub fn prune(&mut self, pos0: u64) -> Result { + if !is_leaf(pos0) { + return Err(format!("Node at {} is not a leaf, can't prune.", pos0)); } - if self.backend.get_hash(position).is_none() { + if self.backend.get_hash(pos0).is_none() { return Ok(false); } - self.backend.remove(position)?; + self.backend.remove(pos0)?; Ok(true) } /// Walks all unpruned nodes in the MMR and revalidate all parent hashes pub fn validate(&self) -> Result<(), String> { // iterate on all parent nodes - for n in 1..(self.last_pos + 1) { + for n in 0..self.size { let height = bintree_postorder_height(n); if height > 0 { if let Some(hash) = self.get_hash(n) { @@ -295,11 +293,11 @@ where if let Some(left_child_hs) = self.get_from_file(left_pos) { if let Some(right_child_hs) = self.get_from_file(right_pos) { // hash the two child nodes together with parent_pos and compare - if (left_child_hs, right_child_hs).hash_with_index(n - 1) != hash { + if (left_child_hs, right_child_hs).hash_with_index(n) != hash { return Err(format!( "Invalid MMR, hash of parent at {} does \ not match children.", - n + n + 1 )); } } @@ -317,7 +315,7 @@ where if sz > 2000 && !short { return; } - let start = if short && sz > 7 { sz / 8 - 1 } else { 0 }; + let start = if short { sz / 8 } else { 0 }; for n in start..(sz / 8 + 1) { let mut idx = "".to_owned(); let mut hashes = "".to_owned(); @@ -326,7 +324,7 @@ where break; } idx.push_str(&format!("{:>8} ", m + 1)); - let ohs = self.get_hash(m + 1); + let ohs = self.get_hash(m); match ohs { Some(hs) => hashes.push_str(&format!("{} ", hs)), None => hashes.push_str(&format!("{:>8} ", "??")), @@ -351,7 +349,7 @@ where if sz > 2000 && !short { return; } - let start = if short && sz > 7 { sz / 8 - 1 } else { 0 }; + let start = if short { sz / 8 } else { 0 }; for n in start..(sz / 8 + 1) { let mut idx = "".to_owned(); let mut hashes = "".to_owned(); @@ -360,7 +358,7 @@ where break; } idx.push_str(&format!("{:>8} ", m + 1)); - let ohs = self.get_from_file(m + 1); + let ohs = self.get_from_file(m); match ohs { Some(hs) => hashes.push_str(&format!("{} ", hs)), None => hashes.push_str(&format!("{:>8} ", " .")), @@ -379,57 +377,57 @@ where { type Item = T::E; - fn get_hash(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_hash(&self, pos0: u64) -> Option { + if pos0 >= self.size { None - } else if is_leaf(pos) { + } else if is_leaf(pos0) { // If we are a leaf then get hash from the backend. - self.backend.get_hash(pos) + self.backend.get_hash(pos0) } else { // If we are not a leaf get hash ignoring the remove log. - self.backend.get_from_file(pos) + self.backend.get_from_file(pos0) } } - fn get_data(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_data(&self, pos0: u64) -> Option { + if pos0 >= self.size { // If we are beyond the rhs of the MMR return None. None - } else if is_leaf(pos) { + } else if is_leaf(pos0) { // If we are a leaf then get data from the backend. - self.backend.get_data(pos) + self.backend.get_data(pos0) } else { // If we are not a leaf then return None as only leaves have data. None } } - fn get_from_file(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { None } else { - self.backend.get_from_file(pos) + self.backend.get_from_file(pos0) } } - fn get_peak_from_file(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_peak_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { None } else { - self.backend.get_peak_from_file(pos) + self.backend.get_peak_from_file(pos0) } } - fn get_data_from_file(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_data_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { None } else { - self.backend.get_data_from_file(pos) + self.backend.get_data_from_file(pos0) } } fn unpruned_size(&self) -> u64 { - self.last_pos + self.size } fn leaf_pos_iter(&self) -> Box + '_> { @@ -500,7 +498,7 @@ pub fn peak_sizes_height(mut size: u64) -> (Vec, u64) { (peak_sizes, size) } -/// Gets the postorder traversal index of all peaks in a MMR given its size. +/// Gets the postorder traversal 0-based index of all peaks in a MMR given its size. /// Starts with the top peak, which is always on the left /// side of the range, and navigates toward lower siblings toward the right /// of the range. @@ -514,6 +512,7 @@ pub fn peaks(size: u64) -> Vec { *acc += &x; Some(*acc) }) + .map(|x| x - 1) // rust doesn't allow starting scan with -1 as u64 .collect() } else { vec![] @@ -529,20 +528,25 @@ pub fn n_leaves(size: u64) -> u64 { } } -/// Returns the 1-based pmmr index of 1-based leaf n -pub fn insertion_to_pmmr_index(nleaf1: u64) -> u64 { - if nleaf1 == 0 { - panic!("insertion_to_pmmr_index called with nleaf1 == 0"); - } - 2 * (nleaf1 - 1) + 1 - (nleaf1 - 1).count_ones() as u64 +/// returns least position >= pos0 with height 0 +pub fn round_up_to_leaf_pos(pos0: u64) -> u64 { + let (insert_idx, height) = peak_map_height(pos0); + let leaf_idx = if height == 0 { + insert_idx + } else { + insert_idx + 1 + }; + return insertion_to_pmmr_index(leaf_idx); +} + +/// Returns the 0-based pmmr index of 0-based leaf index n +pub fn insertion_to_pmmr_index(nleaf0: u64) -> u64 { + 2 * nleaf0 - nleaf0.count_ones() as u64 } /// Returns the insertion index of the given leaf index -pub fn pmmr_leaf_to_insertion_index(pos1: u64) -> Option { - if pos1 == 0 { - panic!("pmmr_leaf_to_insertion_index called with pos1 == 0"); - } - let (insert_idx, height) = peak_map_height(pos1 - 1); +pub fn pmmr_leaf_to_insertion_index(pos0: u64) -> Option { + let (insert_idx, height) = peak_map_height(pos0); if height == 0 { Some(insert_idx) } else { @@ -552,113 +556,49 @@ pub fn pmmr_leaf_to_insertion_index(pos1: u64) -> Option { /// The height of a node in a full binary tree from its postorder traversal /// index. -pub fn bintree_postorder_height(pos1: u64) -> u64 { - if pos1 == 0 { - panic!("bintree_postorder_height called with pos1 == 0"); - } - peak_map_height(pos1 - 1).1 +pub fn bintree_postorder_height(pos0: u64) -> u64 { + peak_map_height(pos0).1 } /// Is this position a leaf in the MMR? /// We know the positions of all leaves based on the postorder height of an MMR /// of any size (somewhat unintuitively but this is how the PMMR is "append /// only"). -pub fn is_leaf(pos1: u64) -> bool { - if pos1 == 0 { - panic!("is_leaf called with pos1 == 0"); - } - bintree_postorder_height(pos1) == 0 +pub fn is_leaf(pos0: u64) -> bool { + bintree_postorder_height(pos0) == 0 } /// Calculates the positions of the parent and sibling of the node at the /// provided position. -pub fn family(pos1: u64) -> (u64, u64) { - if pos1 == 0 { - panic!("family called with pos1 == 0"); - } - let (peak_map, height) = peak_map_height(pos1 - 1); +pub fn family(pos0: u64) -> (u64, u64) { + let (peak_map, height) = peak_map_height(pos0); let peak = 1 << height; if (peak_map & peak) != 0 { - (pos1 + 1, pos1 + 1 - 2 * peak) + (pos0 + 1, pos0 + 1 - 2 * peak) } else { - (pos1 + 2 * peak, pos1 + 2 * peak - 1) + (pos0 + 2 * peak, pos0 + 2 * peak - 1) } } /// Is the node at this pos the "left" sibling of its parent? -pub fn is_left_sibling(pos1: u64) -> bool { - if pos1 == 0 { - panic!("is_left_sibling called with pos1 == 0"); - } - let (peak_map, height) = peak_map_height(pos1 - 1); +pub fn is_left_sibling(pos0: u64) -> bool { + let (peak_map, height) = peak_map_height(pos0); let peak = 1 << height; (peak_map & peak) == 0 } -/// Returns the path from the specified position up to its -/// corresponding peak in the MMR. -/// The size (and therefore the set of peaks) of the MMR -/// is defined by last_pos. -pub fn path(pos1: u64, last_pos: u64) -> impl Iterator { - Path::new(pos1, last_pos) -} - -struct Path { - current: u64, - last_pos: u64, - peak: u64, - peak_map: u64, -} - -impl Path { - fn new(pos1: u64, last_pos: u64) -> Self { - if pos1 == 0 { - panic!("Path::new called with pos1 == 0"); - } - let (peak_map, height) = peak_map_height(pos1 - 1); - Path { - current: pos1, - peak: 1 << height, - peak_map, - last_pos, - } - } -} - -impl Iterator for Path { - type Item = u64; - - fn next(&mut self) -> Option { - if self.current > self.last_pos { - return None; - } - - let next = Some(self.current); - self.current += if (self.peak_map & self.peak) != 0 { - 1 - } else { - 2 * self.peak - }; - self.peak <<= 1; - next - } -} - /// For a given starting position calculate the parent and sibling positions /// for the branch/path from that position to the peak of the tree. /// We will use the sibling positions to generate the "path" of a Merkle proof. -pub fn family_branch(pos1: u64, last_pos: u64) -> Vec<(u64, u64)> { - if pos1 == 0 { - panic!("family_branch called with pos1 == 0"); - } +pub fn family_branch(pos0: u64, size: u64) -> Vec<(u64, u64)> { // loop going up the tree, from node to parent, as long as we stay inside - // the tree (as defined by last_pos). - let (peak_map, height) = peak_map_height(pos1 - 1); + // the tree (as defined by size). + let (peak_map, height) = peak_map_height(pos0); let mut peak = 1 << height; let mut branch = vec![]; - let mut current = pos1; + let mut current = pos0; let mut sibling; - while current < last_pos { + while current + 1 < size { if (peak_map & peak) != 0 { current += 1; sibling = current - 2 * peak; @@ -666,7 +606,7 @@ pub fn family_branch(pos1: u64, last_pos: u64) -> Vec<(u64, u64)> { current += 2 * peak; sibling = current - 1; }; - if current > last_pos { + if current >= size { break; } branch.push((current, sibling)); @@ -676,20 +616,20 @@ pub fn family_branch(pos1: u64, last_pos: u64) -> Vec<(u64, u64)> { } /// Gets the position of the rightmost node (i.e. leaf) beneath the provided subtree root. -pub fn bintree_rightmost(pos1: u64) -> u64 { - pos1 - bintree_postorder_height(pos1) +pub fn bintree_rightmost(pos0: u64) -> u64 { + pos0 - bintree_postorder_height(pos0) } /// Gets the position of the leftmost node (i.e. leaf) beneath the provided subtree root. -pub fn bintree_leftmost(pos1: u64) -> u64 { - let height = bintree_postorder_height(pos1); - pos1 + 2 - (2 << height) +pub fn bintree_leftmost(pos0: u64) -> u64 { + let height = bintree_postorder_height(pos0); + pos0 + 2 - (2 << height) } /// Iterator over all leaf pos beneath the provided subtree root (including the root itself). -pub fn bintree_leaf_pos_iter(pos1: u64) -> Box> { - let leaf_start = pmmr_leaf_to_insertion_index(bintree_leftmost(pos1)); - let leaf_end = pmmr_leaf_to_insertion_index(bintree_rightmost(pos1)); +pub fn bintree_leaf_pos_iter(pos0: u64) -> Box> { + let leaf_start = pmmr_leaf_to_insertion_index(bintree_leftmost(pos0)); + let leaf_end = pmmr_leaf_to_insertion_index(bintree_rightmost(pos0)); let leaf_start = match leaf_start { Some(l) => l, None => return Box::new(iter::empty::()), @@ -698,18 +638,18 @@ pub fn bintree_leaf_pos_iter(pos1: u64) -> Box> { Some(l) => l, None => return Box::new(iter::empty::()), }; - Box::new((leaf_start..=leaf_end).map(|n| insertion_to_pmmr_index(n + 1))) + Box::new((leaf_start..=leaf_end).map(|n| insertion_to_pmmr_index(n))) } /// Iterator over all pos beneath the provided subtree root (including the root itself). -pub fn bintree_pos_iter(pos1: u64) -> impl Iterator { - let leaf_start = bintree_leftmost(pos1 as u64); - (leaf_start..=pos1).into_iter() +pub fn bintree_pos_iter(pos0: u64) -> impl Iterator { + let leaf_start = bintree_leftmost(pos0); + (leaf_start..=pos0).into_iter() } /// All pos in the subtree beneath the provided root, including root itself. -pub fn bintree_range(pos1: u64) -> Range { - let height = bintree_postorder_height(pos1); - let leftmost = pos1 + 2 - (2 << height); - leftmost..(pos1 + 1) +pub fn bintree_range(pos0: u64) -> Range { + let height = bintree_postorder_height(pos0); + let leftmost = pos0 + 2 - (2 << height); + leftmost..(pos0 + 1) } diff --git a/core/src/core/pmmr/readonly_pmmr.rs b/core/src/core/pmmr/readonly_pmmr.rs index 6b71cb930..9ecc92186 100644 --- a/core/src/core/pmmr/readonly_pmmr.rs +++ b/core/src/core/pmmr/readonly_pmmr.rs @@ -28,7 +28,7 @@ where B: Backend, { /// The last position in the PMMR - last_pos: u64, + size: u64, /// The backend for this readonly PMMR backend: &'a B, // only needed to parameterise Backend @@ -44,17 +44,17 @@ where pub fn new(backend: &'a B) -> ReadonlyPMMR<'_, T, B> { ReadonlyPMMR { backend, - last_pos: 0, + size: 0, _marker: marker::PhantomData, } } /// Build a new readonly PMMR pre-initialized to - /// last_pos with the provided backend. - pub fn at(backend: &'a B, last_pos: u64) -> ReadonlyPMMR<'_, T, B> { + /// size with the provided backend. + pub fn at(backend: &'a B, size: u64) -> ReadonlyPMMR<'_, T, B> { ReadonlyPMMR { backend, - last_pos, + size, _marker: marker::PhantomData, } } @@ -64,45 +64,41 @@ where /// returns last pmmr index returned along with data pub fn elements_from_pmmr_index( &self, - mut pmmr_index: u64, + pmmr_index1: u64, max_count: u64, - max_pmmr_pos: Option, + max_pmmr_pos1: Option, ) -> (u64, Vec) { let mut return_vec = vec![]; - let last_pos = match max_pmmr_pos { + let size = match max_pmmr_pos1 { Some(p) => p, - None => self.last_pos, + None => self.size, }; - if pmmr_index == 0 { - pmmr_index = 1; - } - while return_vec.len() < max_count as usize && pmmr_index <= last_pos { + let mut pmmr_index = pmmr_index1 - 1; + while return_vec.len() < max_count as usize && pmmr_index < size { if let Some(t) = self.get_data(pmmr_index) { return_vec.push(t); } pmmr_index += 1; } - (pmmr_index.saturating_sub(1), return_vec) + (pmmr_index, return_vec) } /// Helper function to get the last N nodes inserted, i.e. the last /// n nodes along the bottom of the tree. /// May return less than n items if the MMR has been pruned/compacted. + /// NOTE This should just iterate over insertion indices + /// to avoid the repeated calls to bintree_rightmost! pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, T::E)> { let mut return_vec = vec![]; - let mut last_leaf = self.last_pos; - for _ in 0..n as u64 { - if last_leaf == 0 { - break; - } - last_leaf = bintree_rightmost(last_leaf); + let mut last_leaf = self.size; + while return_vec.len() < n as usize && last_leaf > 0 { + last_leaf = bintree_rightmost(last_leaf - 1); if let Some(hash) = self.backend.get_hash(last_leaf) { if let Some(data) = self.backend.get_data(last_leaf) { return_vec.push((hash, data)); } } - last_leaf -= 1; } return_vec } @@ -115,57 +111,57 @@ where { type Item = T::E; - fn get_hash(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_hash(&self, pos0: u64) -> Option { + if pos0 >= self.size { None - } else if is_leaf(pos) { + } else if is_leaf(pos0) { // If we are a leaf then get hash from the backend. - self.backend.get_hash(pos) + self.backend.get_hash(pos0) } else { // If we are not a leaf get hash ignoring the remove log. - self.backend.get_from_file(pos) + self.backend.get_from_file(pos0) } } - fn get_data(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_data(&self, pos0: u64) -> Option { + if pos0 >= self.size { // If we are beyond the rhs of the MMR return None. None - } else if is_leaf(pos) { + } else if is_leaf(pos0) { // If we are a leaf then get data from the backend. - self.backend.get_data(pos) + self.backend.get_data(pos0) } else { // If we are not a leaf then return None as only leaves have data. None } } - fn get_from_file(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { None } else { - self.backend.get_from_file(pos) + self.backend.get_from_file(pos0) } } - fn get_peak_from_file(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_peak_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { None } else { - self.backend.get_peak_from_file(pos) + self.backend.get_peak_from_file(pos0) } } - fn get_data_from_file(&self, pos: u64) -> Option { - if pos > self.last_pos { + fn get_data_from_file(&self, pos0: u64) -> Option { + if pos0 >= self.size { None } else { - self.backend.get_data_from_file(pos) + self.backend.get_data_from_file(pos0) } } fn unpruned_size(&self) -> u64 { - self.last_pos + self.size } fn leaf_pos_iter(&self) -> Box + '_> { diff --git a/core/src/core/pmmr/rewindable_pmmr.rs b/core/src/core/pmmr/rewindable_pmmr.rs index fee03394b..3610d082c 100644 --- a/core/src/core/pmmr/rewindable_pmmr.rs +++ b/core/src/core/pmmr/rewindable_pmmr.rs @@ -17,7 +17,7 @@ use std::marker; -use crate::core::pmmr::{bintree_postorder_height, Backend, ReadonlyPMMR}; +use crate::core::pmmr::{round_up_to_leaf_pos, Backend, ReadonlyPMMR}; use crate::ser::PMMRable; /// Rewindable (but still readonly) view of a PMMR. @@ -64,11 +64,7 @@ where // Identify which actual position we should rewind to as the provided // position is a leaf. We traverse the MMR to include any parent(s) that // need to be included for the MMR to be valid. - let mut pos = position; - while bintree_postorder_height(pos + 1) > 0 { - pos += 1; - } - self.last_pos = pos; + self.last_pos = round_up_to_leaf_pos(position); Ok(()) } diff --git a/core/src/core/pmmr/segment.rs b/core/src/core/pmmr/segment.rs index ecaa0bc15..b4d1286e3 100644 --- a/core/src/core/pmmr/segment.rs +++ b/core/src/core/pmmr/segment.rs @@ -134,34 +134,33 @@ impl Segment { ) } - /// Whether the segment is full (size == capacity) + /// Whether the segment is full (segment size == capacity) fn full_segment(&self, last_pos: u64) -> bool { self.segment_unpruned_size(last_pos) == self.segment_capacity() } /// Inclusive range of MMR positions for this segment - pub fn segment_pos_range(&self, last_pos: u64) -> (u64, u64) { - let segment_size = self.segment_unpruned_size(last_pos); + pub fn segment_pos_range(&self, mmr_size: u64) -> (u64, u64) { + let segment_size = self.segment_unpruned_size(mmr_size); let leaf_offset = self.leaf_offset(); - let first = pmmr::insertion_to_pmmr_index(leaf_offset + 1); - let last = if self.full_segment(last_pos) { - pmmr::insertion_to_pmmr_index(leaf_offset + segment_size) + let first = pmmr::insertion_to_pmmr_index(leaf_offset); + let last = if self.full_segment(mmr_size) { + pmmr::insertion_to_pmmr_index(leaf_offset + segment_size - 1) + (self.identifier.height as u64) } else { - last_pos + mmr_size - 1 }; - (first, last) } /// TODO - binary_search_by_key() here (can we assume these are sorted by pos?) - fn get_hash(&self, pos: u64) -> Result { + fn get_hash(&self, pos0: u64) -> Result { self.hash_pos .iter() .zip(&self.hashes) - .find(|&(&p, _)| p == pos) + .find(|&(&p, _)| p == pos0) .map(|(_, &h)| h) - .ok_or_else(|| SegmentError::MissingHash(pos)) + .ok_or_else(|| SegmentError::MissingHash(pos0)) } /// Get the identifier associated with this segment @@ -200,16 +199,16 @@ impl Segment { proof: SegmentProof, ) -> Self { assert_eq!(hash_pos.len(), hashes.len()); - let mut last_pos = 0; + let mut last = 0; for &pos in &hash_pos { - assert!(pos > last_pos); - last_pos = pos; + assert!(last == 0 || pos > last); + last = pos; } assert_eq!(leaf_pos.len(), leaf_data.len()); - last_pos = 0; + last = 0; for &pos in &leaf_pos { - assert!(pos > last_pos); - last_pos = pos; + assert!(last == 0 || pos > last); + last = pos; } Self { @@ -262,28 +261,28 @@ where { let mut segment = Segment::empty(segment_id); - let last_pos = pmmr.unpruned_size(); - if segment.segment_unpruned_size(last_pos) == 0 { + let mmr_size = pmmr.unpruned_size(); + if segment.segment_unpruned_size(mmr_size) == 0 { return Err(SegmentError::NonExistent); } // Fill leaf data and hashes - let (segment_first_pos, segment_last_pos) = segment.segment_pos_range(last_pos); - for pos in segment_first_pos..=segment_last_pos { - if pmmr::is_leaf(pos) { - if let Some(data) = pmmr.get_data_from_file(pos) { + let (segment_first_pos, segment_last_pos) = segment.segment_pos_range(mmr_size); + for pos0 in segment_first_pos..=segment_last_pos { + if pmmr::is_leaf(pos0) { + if let Some(data) = pmmr.get_data_from_file(pos0) { segment.leaf_data.push(data); - segment.leaf_pos.push(pos); + segment.leaf_pos.push(pos0); continue; } else if !prunable { - return Err(SegmentError::MissingLeaf(pos)); + return Err(SegmentError::MissingLeaf(pos0)); } } // TODO: optimize, no need to send every intermediary hash if prunable { - if let Some(hash) = pmmr.get_from_file(pos) { + if let Some(hash) = pmmr.get_from_file(pos0) { segment.hashes.push(hash); - segment.hash_pos.push(pos); + segment.hash_pos.push(pos0); } } } @@ -291,12 +290,12 @@ where let mut start_pos = None; // Fully pruned segment: only include a single hash, the first unpruned parent if segment.leaf_data.is_empty() && segment.hashes.is_empty() { - let family_branch = pmmr::family_branch(segment_last_pos, last_pos); - for (pos, _) in family_branch { - if let Some(hash) = pmmr.get_from_file(pos) { + let family_branch = pmmr::family_branch(segment_last_pos, mmr_size); + for (pos0, _) in family_branch { + if let Some(hash) = pmmr.get_from_file(pos0) { segment.hashes.push(hash); - segment.hash_pos.push(pos); - start_pos = Some(pos); + segment.hash_pos.push(pos0); + start_pos = Some(1 + pos0); break; } } @@ -305,9 +304,9 @@ where // Segment merkle proof segment.proof = SegmentProof::generate( pmmr, - last_pos, - segment_first_pos, - segment_last_pos, + mmr_size, + 1 + segment_first_pos, + 1 + segment_last_pos, start_pos, )?; @@ -323,25 +322,25 @@ where /// Returns `None` iff the segment is full and completely pruned pub fn root( &self, - last_pos: u64, + mmr_size: u64, bitmap: Option<&Bitmap>, ) -> Result, SegmentError> { - let (segment_first_pos, segment_last_pos) = self.segment_pos_range(last_pos); + let (segment_first_pos, segment_last_pos) = self.segment_pos_range(mmr_size); let mut hashes = Vec::>::with_capacity(2 * (self.identifier.height as usize)); - let mut leaves = self.leaf_pos.iter().zip(&self.leaf_data); - for pos in segment_first_pos..=segment_last_pos { - let height = pmmr::bintree_postorder_height(pos); + let mut leaves0 = self.leaf_pos.iter().zip(&self.leaf_data); + for pos0 in segment_first_pos..=segment_last_pos { + let height = pmmr::bintree_postorder_height(pos0); let hash = if height == 0 { // Leaf if bitmap .map(|b| { - let idx_1 = pmmr::n_leaves(pos) - 1; - let idx_2 = if pmmr::is_left_sibling(pos) { + let idx_1 = pmmr::n_leaves(pos0 + 1) - 1; + let idx_2 = if pmmr::is_left_sibling(pos0) { idx_1 + 1 } else { idx_1 - 1 }; - b.contains(idx_1 as u32) || b.contains(idx_2 as u32) || pos == last_pos + b.contains(idx_1 as u32) || b.contains(idx_2 as u32) || pos0 == mmr_size - 1 }) .unwrap_or(true) { @@ -351,17 +350,17 @@ where // require the last leaf to be present regardless of the status in the bitmap. // TODO: possibly remove requirement on the sibling when we no longer support // syncing through the txhashset.zip method. - let data = leaves - .find(|&(&p, _)| p == pos) + let data = leaves0 + .find(|&(&p, _)| p == pos0) .map(|(_, l)| l) - .ok_or_else(|| SegmentError::MissingLeaf(pos))?; - Some(data.hash_with_index(pos - 1)) + .ok_or_else(|| SegmentError::MissingLeaf(pos0))?; + Some(data.hash_with_index(pos0)) } else { None } } else { - let left_child_pos = pos - (1 << height); - let right_child_pos = pos - 1; + let left_child_pos = 1 + pos0 - (1 << height); + let right_child_pos = pos0; let right_child = hashes.pop().unwrap(); let left_child = hashes.pop().unwrap(); @@ -370,14 +369,14 @@ where // Prunable MMR match (left_child, right_child) { (None, None) => None, - (Some(l), Some(r)) => Some((l, r).hash_with_index(pos - 1)), + (Some(l), Some(r)) => Some((l, r).hash_with_index(pos0)), (None, Some(r)) => { - let l = self.get_hash(left_child_pos)?; - Some((l, r).hash_with_index(pos - 1)) + let l = self.get_hash(left_child_pos - 1)?; + Some((l, r).hash_with_index(pos0)) } (Some(l), None) => { - let r = self.get_hash(right_child_pos)?; - Some((l, r).hash_with_index(pos - 1)) + let r = self.get_hash(right_child_pos - 1)?; + Some((l, r).hash_with_index(pos0)) } } } else { @@ -388,70 +387,72 @@ where right_child .ok_or_else(|| SegmentError::MissingHash(right_child_pos))?, ) - .hash_with_index(pos - 1), + .hash_with_index(pos0), ) } }; hashes.push(hash); } - if self.full_segment(last_pos) { + if self.full_segment(mmr_size) { // Full segment: last position of segment is subtree root Ok(hashes.pop().unwrap()) } else { // Not full (only final segment): peaks in segment, bag them together - let peaks = pmmr::peaks(last_pos) + let peaks = pmmr::peaks(mmr_size) .into_iter() - .filter(|&pos| pos >= segment_first_pos && pos <= segment_last_pos) + .filter(|&pos0| pos0 >= segment_first_pos && pos0 <= segment_last_pos) .rev(); let mut hash = None; - for pos in peaks { - let mut lhash = hashes.pop().ok_or_else(|| SegmentError::MissingHash(pos))?; + for pos0 in peaks { + let mut lhash = hashes + .pop() + .ok_or_else(|| SegmentError::MissingHash(1 + pos0))?; if lhash.is_none() && bitmap.is_some() { // If this entire peak is pruned, load it from the segment hashes - lhash = Some(self.get_hash(pos)?); + lhash = Some(self.get_hash(pos0)?); } - let lhash = lhash.ok_or_else(|| SegmentError::MissingHash(pos))?; + let lhash = lhash.ok_or_else(|| SegmentError::MissingHash(1 + pos0))?; hash = match hash { None => Some(lhash), - Some(rhash) => Some((lhash, rhash).hash_with_index(last_pos)), + Some(rhash) => Some((lhash, rhash).hash_with_index(mmr_size)), }; } Ok(Some(hash.unwrap())) } } - /// Get the first unpruned parent hash of this segment + /// Get the first 1-based (sucks) unpruned parent hash of this segment pub fn first_unpruned_parent( &self, - last_pos: u64, + mmr_size: u64, bitmap: Option<&Bitmap>, ) -> Result<(Hash, u64), SegmentError> { - let root = self.root(last_pos, bitmap)?; - let (_, last) = self.segment_pos_range(last_pos); + let root = self.root(mmr_size, bitmap)?; + let (_, last) = self.segment_pos_range(mmr_size); if let Some(root) = root { - return Ok((root, last)); + return Ok((root, 1 + last)); } let bitmap = bitmap.unwrap(); - let n_leaves = pmmr::n_leaves(last_pos); + let n_leaves = pmmr::n_leaves(mmr_size); let mut cardinality = 0; - let mut pos = last; + let mut pos0 = last; let mut hash = Err(SegmentError::MissingHash(last)); - let mut family_branch = pmmr::family_branch(last, last_pos).into_iter(); + let mut family_branch = pmmr::family_branch(last, mmr_size).into_iter(); while cardinality == 0 { - hash = self.get_hash(pos).map(|h| (h, pos)); + hash = self.get_hash(pos0).map(|h| (h, 1 + pos0)); if hash.is_ok() { // Return early in case a lower level hash is already present // This can occur if both child trees are pruned but compaction hasn't run yet return hash; } - if let Some((p, _)) = family_branch.next() { - pos = p; - let range = (pmmr::n_leaves(pmmr::bintree_leftmost(p)) - 1) - ..min(pmmr::n_leaves(pmmr::bintree_rightmost(p)), n_leaves); + if let Some((p0, _)) = family_branch.next() { + pos0 = p0; + let range = (pmmr::n_leaves(1 + pmmr::bintree_leftmost(p0)) - 1) + ..min(pmmr::n_leaves(1 + pmmr::bintree_rightmost(p0)), n_leaves); cardinality = bitmap.range_cardinality(range); } else { break; @@ -463,14 +464,14 @@ where /// Check validity of the segment by calculating its root and validating the merkle proof pub fn validate( &self, - last_pos: u64, + mmr_size: u64, bitmap: Option<&Bitmap>, mmr_root: Hash, ) -> Result<(), SegmentError> { - let (first, last) = self.segment_pos_range(last_pos); - let (segment_root, segment_unpruned_pos) = self.first_unpruned_parent(last_pos, bitmap)?; + let (first, last) = self.segment_pos_range(mmr_size); + let (segment_root, segment_unpruned_pos) = self.first_unpruned_parent(mmr_size, bitmap)?; self.proof.validate( - last_pos, + mmr_size, mmr_root, first, last, @@ -483,17 +484,17 @@ where /// This function assumes a final hashing step together with `other_root` pub fn validate_with( &self, - last_pos: u64, + mmr_size: u64, bitmap: Option<&Bitmap>, mmr_root: Hash, hash_last_pos: u64, other_root: Hash, other_is_left: bool, ) -> Result<(), SegmentError> { - let (first, last) = self.segment_pos_range(last_pos); - let (segment_root, segment_unpruned_pos) = self.first_unpruned_parent(last_pos, bitmap)?; + let (first, last) = self.segment_pos_range(mmr_size); + let (segment_root, segment_unpruned_pos) = self.first_unpruned_parent(mmr_size, bitmap)?; self.proof.validate_with( - last_pos, + mmr_size, mmr_root, first, last, @@ -510,16 +511,16 @@ impl Readable for Segment { fn read(reader: &mut R) -> Result { let identifier = Readable::read(reader)?; - let mut last_pos = 0; let n_hashes = reader.read_u64()? as usize; let mut hash_pos = Vec::with_capacity(n_hashes); + let mut last_pos = 0; for _ in 0..n_hashes { let pos = reader.read_u64()?; if pos <= last_pos { return Err(Error::SortError); } last_pos = pos; - hash_pos.push(pos); + hash_pos.push(pos - 1); } let mut hashes = Vec::::with_capacity(n_hashes); @@ -536,7 +537,7 @@ impl Readable for Segment { return Err(Error::SortError); } last_pos = pos; - leaf_pos.push(pos); + leaf_pos.push(pos - 1); } let mut leaf_data = Vec::::with_capacity(n_leaves); @@ -562,14 +563,14 @@ impl Writeable for Segment { Writeable::write(&self.identifier, writer)?; writer.write_u64(self.hashes.len() as u64)?; for &pos in &self.hash_pos { - writer.write_u64(pos)?; + writer.write_u64(1 + pos)?; } for hash in &self.hashes { Writeable::write(hash, writer)?; } writer.write_u64(self.leaf_data.len() as u64)?; for &pos in &self.leaf_pos { - writer.write_u64(pos)?; + writer.write_u64(1 + pos)?; } for data in &self.leaf_data { Writeable::write(data, writer)?; @@ -601,21 +602,24 @@ impl SegmentProof { U: PMMRable, B: Backend, { - let family_branch = pmmr::family_branch(segment_last_pos, last_pos); + let family_branch = pmmr::family_branch(segment_last_pos - 1, last_pos); // 1. siblings along the path from the subtree root to the peak let hashes: Result, _> = family_branch .iter() - .filter(|&&(p, _)| start_pos.map(|s| p > s).unwrap_or(true)) - .map(|&(_, s)| pmmr.get_hash(s).ok_or_else(|| SegmentError::MissingHash(s))) + .filter(|&&(p0, _)| start_pos.map(|s| p0 >= s).unwrap_or(true)) + .map(|&(_, s0)| { + pmmr.get_hash(s0) + .ok_or_else(|| SegmentError::MissingHash(s0)) + }) .collect(); let mut proof = Self { hashes: hashes? }; // 2. bagged peaks to the right let peak_pos = family_branch .last() - .map(|&(p, _)| p) - .unwrap_or(segment_last_pos); + .map(|&(p0, _)| p0) + .unwrap_or(segment_last_pos - 1); if let Some(h) = pmmr.bag_the_rhs(peak_pos) { proof.hashes.push(h); } @@ -623,7 +627,7 @@ impl SegmentProof { // 3. peaks to the left let peaks: Result, _> = pmmr::peaks(last_pos) .into_iter() - .filter(|&x| x < segment_first_pos) + .filter(|&x| 1 + x < segment_first_pos) .rev() .map(|p| pmmr.get_hash(p).ok_or_else(|| SegmentError::MissingHash(p))) .collect(); @@ -641,43 +645,46 @@ impl SegmentProof { pub fn reconstruct_root( &self, last_pos: u64, - segment_first_pos: u64, - segment_last_pos: u64, + segment_first_pos0: u64, + segment_last_pos0: u64, segment_root: Hash, segment_unpruned_pos: u64, ) -> Result { let mut iter = self.hashes.iter(); - let family_branch = pmmr::family_branch(segment_last_pos, last_pos); + let family_branch = pmmr::family_branch(segment_last_pos0, last_pos); // 1. siblings along the path from the subtree root to the peak let mut root = segment_root; - for &(p, s) in family_branch + for &(p0, s0) in family_branch .iter() - .filter(|&&(p, _)| p > segment_unpruned_pos) + .filter(|&&(p0, _)| p0 >= segment_unpruned_pos) { - let sibling_hash = iter.next().ok_or_else(|| SegmentError::MissingHash(s))?; - root = if pmmr::is_left_sibling(s) { - (sibling_hash, root).hash_with_index(p - 1) + let sibling_hash = iter + .next() + .ok_or_else(|| SegmentError::MissingHash(1 + s0))?; + root = if pmmr::is_left_sibling(s0) { + (sibling_hash, root).hash_with_index(p0) } else { - (root, sibling_hash).hash_with_index(p - 1) + (root, sibling_hash).hash_with_index(p0) }; } // 2. bagged peaks to the right - let peak_pos = family_branch + let peak_pos0 = family_branch .last() - .map(|&(p, _)| p) - .unwrap_or(segment_last_pos); + .map(|&(p0, _)| p0) + .unwrap_or(segment_last_pos0); let rhs = pmmr::peaks(last_pos) .into_iter() - .filter(|&x| x > peak_pos) + .filter(|&x| x > peak_pos0) .next(); - if let Some(pos) = rhs { + if let Some(pos0) = rhs { root = ( root, - iter.next().ok_or_else(|| SegmentError::MissingHash(pos))?, + iter.next() + .ok_or_else(|| SegmentError::MissingHash(1 + pos0))?, ) .hash_with_index(last_pos) } @@ -685,11 +692,12 @@ impl SegmentProof { // 3. peaks to the left let peaks = pmmr::peaks(last_pos) .into_iter() - .filter(|&x| x < segment_first_pos) + .filter(|&x| x < segment_first_pos0) .rev(); - for pos in peaks { + for pos0 in peaks { root = ( - iter.next().ok_or_else(|| SegmentError::MissingHash(pos))?, + iter.next() + .ok_or_else(|| SegmentError::MissingHash(1 + pos0))?, root, ) .hash_with_index(last_pos); diff --git a/core/src/core/pmmr/vec_backend.rs b/core/src/core/pmmr/vec_backend.rs index ea7f8b70a..4650b3764 100644 --- a/core/src/core/pmmr/vec_backend.rs +++ b/core/src/core/pmmr/vec_backend.rs @@ -43,39 +43,38 @@ impl Backend for VecBackend { Ok(()) } - fn append_pruned_subtree(&mut self, _hash: Hash, _pos: u64) -> Result<(), String> { + fn append_pruned_subtree(&mut self, _hash: Hash, _pos0: u64) -> Result<(), String> { unimplemented!() } - fn get_hash(&self, position: u64) -> Option { - if self.removed.contains(&position) { + fn get_hash(&self, pos0: u64) -> Option { + if self.removed.contains(&pos0) { None } else { - self.get_from_file(position) + self.get_from_file(pos0) } } - fn get_data(&self, position: u64) -> Option { - if self.removed.contains(&position) { + fn get_data(&self, pos0: u64) -> Option { + if self.removed.contains(&pos0) { None } else { - self.get_data_from_file(position) + self.get_data_from_file(pos0) } } - fn get_from_file(&self, position: u64) -> Option { - let idx = usize::try_from(position.saturating_sub(1)).expect("usize from u64"); + fn get_from_file(&self, pos0: u64) -> Option { + let idx = usize::try_from(pos0).expect("usize from u64"); self.hashes.get(idx).cloned() } - fn get_peak_from_file(&self, position: u64) -> Option { - self.get_from_file(position) + fn get_peak_from_file(&self, pos0: u64) -> Option { + self.get_from_file(pos0) } - fn get_data_from_file(&self, position: u64) -> Option { + fn get_data_from_file(&self, pos0: u64) -> Option { if let Some(data) = &self.data { - let idx = usize::try_from(pmmr::n_leaves(position).saturating_sub(1)) - .expect("usize from u64"); + let idx = usize::try_from(pmmr::n_leaves(1 + pos0) - 1).expect("usize from u64"); data.get(idx).map(|x| x.as_elmt()) } else { None @@ -92,22 +91,23 @@ impl Backend for VecBackend { self.hashes .iter() .enumerate() - .map(|(x, _)| (x + 1) as u64) + .map(|(x, _)| x as u64) .filter(move |x| pmmr::is_leaf(*x) && !self.removed.contains(x)), ) } + /// NOTE this function is needlessly inefficient with repeated calls to n_leaves() fn leaf_idx_iter(&self, from_idx: u64) -> Box + '_> { - let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1); + let from_pos = pmmr::insertion_to_pmmr_index(from_idx); Box::new( self.leaf_pos_iter() .skip_while(move |x| *x < from_pos) - .map(|x| pmmr::n_leaves(x).saturating_sub(1)), + .map(|x| pmmr::n_leaves(x + 1) - 1), ) } - fn remove(&mut self, position: u64) -> Result<(), String> { - self.removed.insert(position); + fn remove(&mut self, pos0: u64) -> Result<(), String> { + self.removed.insert(pos0); Ok(()) } diff --git a/core/tests/merkle_proof.rs b/core/tests/merkle_proof.rs index f45cc9636..09a57d80f 100644 --- a/core/tests/merkle_proof.rs +++ b/core/tests/merkle_proof.rs @@ -34,7 +34,7 @@ fn merkle_proof_ser_deser() { for x in 0..15 { pmmr.push(&TestElem([0, 0, 0, x])).unwrap(); } - let proof = pmmr.merkle_proof(9).unwrap(); + let proof = pmmr.merkle_proof(8).unwrap(); let mut vec = Vec::new(); ser::serialize_default(&mut vec, &proof).expect("serialization failed"); @@ -49,12 +49,12 @@ fn pmmr_merkle_proof_prune_and_rewind() { let mut pmmr = PMMR::new(&mut ba); pmmr.push(&TestElem([0, 0, 0, 1])).unwrap(); pmmr.push(&TestElem([0, 0, 0, 2])).unwrap(); - let proof = pmmr.merkle_proof(2).unwrap(); + let proof = pmmr.merkle_proof(1).unwrap(); // now prune an element and check we can still generate // the correct Merkle proof for the other element (after sibling pruned) - pmmr.prune(1).unwrap(); - let proof_2 = pmmr.merkle_proof(2).unwrap(); + pmmr.prune(0).unwrap(); + let proof_2 = pmmr.merkle_proof(1).unwrap(); assert_eq!(proof, proof_2); } @@ -77,113 +77,113 @@ fn pmmr_merkle_proof() { pmmr.push(&elems[0]).unwrap(); let pos_0 = elems[0].hash_with_index(0); - assert_eq!(pmmr.get_hash(1).unwrap(), pos_0); + assert_eq!(pmmr.get_hash(0).unwrap(), pos_0); - let proof = pmmr.merkle_proof(1).unwrap(); + let proof = pmmr.merkle_proof(0).unwrap(); assert_eq!(proof.path, vec![]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 0).is_ok()); pmmr.push(&elems[1]).unwrap(); let pos_1 = elems[1].hash_with_index(1); - assert_eq!(pmmr.get_hash(2).unwrap(), pos_1); + assert_eq!(pmmr.get_hash(1).unwrap(), pos_1); let pos_2 = (pos_0, pos_1).hash_with_index(2); - assert_eq!(pmmr.get_hash(3).unwrap(), pos_2); + assert_eq!(pmmr.get_hash(2).unwrap(), pos_2); assert_eq!(pmmr.root().unwrap(), pos_2); assert_eq!(pmmr.peaks(), vec![pos_2]); // single peak, path with single sibling - let proof = pmmr.merkle_proof(1).unwrap(); + let proof = pmmr.merkle_proof(0).unwrap(); assert_eq!(proof.path, vec![pos_1]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 0).is_ok()); - let proof = pmmr.merkle_proof(2).unwrap(); + let proof = pmmr.merkle_proof(1).unwrap(); assert_eq!(proof.path, vec![pos_0]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 1).is_ok()); // three leaves, two peaks (one also the right-most leaf) pmmr.push(&elems[2]).unwrap(); let pos_3 = elems[2].hash_with_index(3); - assert_eq!(pmmr.get_hash(4).unwrap(), pos_3); + assert_eq!(pmmr.get_hash(3).unwrap(), pos_3); assert_eq!(pmmr.root().unwrap(), (pos_2, pos_3).hash_with_index(4)); assert_eq!(pmmr.peaks(), vec![pos_2, pos_3]); - let proof = pmmr.merkle_proof(1).unwrap(); + let proof = pmmr.merkle_proof(0).unwrap(); assert_eq!(proof.path, vec![pos_1, pos_3]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 0).is_ok()); - let proof = pmmr.merkle_proof(2).unwrap(); + let proof = pmmr.merkle_proof(1).unwrap(); assert_eq!(proof.path, vec![pos_0, pos_3]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 1).is_ok()); - let proof = pmmr.merkle_proof(4).unwrap(); + let proof = pmmr.merkle_proof(3).unwrap(); assert_eq!(proof.path, vec![pos_2]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 4).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 3).is_ok()); // 7 leaves, 3 peaks, 11 pos in total pmmr.push(&elems[3]).unwrap(); let pos_4 = elems[3].hash_with_index(4); - assert_eq!(pmmr.get_hash(5).unwrap(), pos_4); + assert_eq!(pmmr.get_hash(4).unwrap(), pos_4); let pos_5 = (pos_3, pos_4).hash_with_index(5); - assert_eq!(pmmr.get_hash(6).unwrap(), pos_5); + assert_eq!(pmmr.get_hash(5).unwrap(), pos_5); let pos_6 = (pos_2, pos_5).hash_with_index(6); - assert_eq!(pmmr.get_hash(7).unwrap(), pos_6); + assert_eq!(pmmr.get_hash(6).unwrap(), pos_6); pmmr.push(&elems[4]).unwrap(); let pos_7 = elems[4].hash_with_index(7); - assert_eq!(pmmr.get_hash(8).unwrap(), pos_7); + assert_eq!(pmmr.get_hash(7).unwrap(), pos_7); pmmr.push(&elems[5]).unwrap(); let pos_8 = elems[5].hash_with_index(8); - assert_eq!(pmmr.get_hash(9).unwrap(), pos_8); + assert_eq!(pmmr.get_hash(8).unwrap(), pos_8); let pos_9 = (pos_7, pos_8).hash_with_index(9); - assert_eq!(pmmr.get_hash(10).unwrap(), pos_9); + assert_eq!(pmmr.get_hash(9).unwrap(), pos_9); pmmr.push(&elems[6]).unwrap(); let pos_10 = elems[6].hash_with_index(10); - assert_eq!(pmmr.get_hash(11).unwrap(), pos_10); + assert_eq!(pmmr.get_hash(10).unwrap(), pos_10); assert_eq!(pmmr.unpruned_size(), 11); - let proof = pmmr.merkle_proof(1).unwrap(); + let proof = pmmr.merkle_proof(0).unwrap(); assert_eq!( proof.path, vec![pos_1, pos_5, (pos_9, pos_10).hash_with_index(11)] ); - assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 1).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[0], 0).is_ok()); - let proof = pmmr.merkle_proof(2).unwrap(); + let proof = pmmr.merkle_proof(1).unwrap(); assert_eq!( proof.path, vec![pos_0, pos_5, (pos_9, pos_10).hash_with_index(11)] ); - assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 2).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[1], 1).is_ok()); - let proof = pmmr.merkle_proof(4).unwrap(); + let proof = pmmr.merkle_proof(3).unwrap(); assert_eq!( proof.path, vec![pos_4, pos_2, (pos_9, pos_10).hash_with_index(11)] ); - assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 4).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[2], 3).is_ok()); - let proof = pmmr.merkle_proof(5).unwrap(); + let proof = pmmr.merkle_proof(4).unwrap(); assert_eq!( proof.path, vec![pos_3, pos_2, (pos_9, pos_10).hash_with_index(11)] ); - assert!(proof.verify(pmmr.root().unwrap(), &elems[3], 5).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[3], 4).is_ok()); + + let proof = pmmr.merkle_proof(7).unwrap(); + assert_eq!(proof.path, vec![pos_8, pos_10, pos_6]); + assert!(proof.verify(pmmr.root().unwrap(), &elems[4], 7).is_ok()); let proof = pmmr.merkle_proof(8).unwrap(); - assert_eq!(proof.path, vec![pos_8, pos_10, pos_6]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[4], 8).is_ok()); - - let proof = pmmr.merkle_proof(9).unwrap(); assert_eq!(proof.path, vec![pos_7, pos_10, pos_6]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[5], 9).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[5], 8).is_ok()); - let proof = pmmr.merkle_proof(11).unwrap(); + let proof = pmmr.merkle_proof(10).unwrap(); assert_eq!(proof.path, vec![pos_9, pos_6]); - assert!(proof.verify(pmmr.root().unwrap(), &elems[6], 11).is_ok()); + assert!(proof.verify(pmmr.root().unwrap(), &elems[6], 10).is_ok()); } diff --git a/core/tests/pmmr.rs b/core/tests/pmmr.rs index 5235a28d2..7229b40eb 100644 --- a/core/tests/pmmr.rs +++ b/core/tests/pmmr.rs @@ -76,7 +76,7 @@ fn first_100_mmr_heights() { 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 5 \ 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 0 0 1 0 0"; let first_100 = first_100_str.split(' ').map(|n| n.parse::().unwrap()); - let mut count = 1; + let mut count = 0; for n in first_100 { assert_eq!( n, @@ -91,104 +91,104 @@ fn first_100_mmr_heights() { #[test] fn test_bintree_range() { + assert_eq!(pmmr::bintree_range(0), 0..1); assert_eq!(pmmr::bintree_range(1), 1..2); - assert_eq!(pmmr::bintree_range(2), 2..3); - assert_eq!(pmmr::bintree_range(3), 1..4); + assert_eq!(pmmr::bintree_range(2), 0..3); + assert_eq!(pmmr::bintree_range(3), 3..4); assert_eq!(pmmr::bintree_range(4), 4..5); - assert_eq!(pmmr::bintree_range(5), 5..6); - assert_eq!(pmmr::bintree_range(6), 4..7); - assert_eq!(pmmr::bintree_range(7), 1..8); + assert_eq!(pmmr::bintree_range(5), 3..6); + assert_eq!(pmmr::bintree_range(6), 0..7); } // The pos of the rightmost leaf for the provided MMR size (last leaf in subtree). #[test] fn test_bintree_rightmost() { + assert_eq!(pmmr::bintree_rightmost(0), 0); assert_eq!(pmmr::bintree_rightmost(1), 1); - assert_eq!(pmmr::bintree_rightmost(2), 2); - assert_eq!(pmmr::bintree_rightmost(3), 2); + assert_eq!(pmmr::bintree_rightmost(2), 1); + assert_eq!(pmmr::bintree_rightmost(3), 3); assert_eq!(pmmr::bintree_rightmost(4), 4); - assert_eq!(pmmr::bintree_rightmost(5), 5); - assert_eq!(pmmr::bintree_rightmost(6), 5); - assert_eq!(pmmr::bintree_rightmost(7), 5); + assert_eq!(pmmr::bintree_rightmost(5), 4); + assert_eq!(pmmr::bintree_rightmost(6), 4); } // The pos of the leftmost leaf for the provided MMR size (first leaf in subtree). #[test] fn test_bintree_leftmost() { + assert_eq!(pmmr::bintree_leftmost(0), 0); assert_eq!(pmmr::bintree_leftmost(1), 1); - assert_eq!(pmmr::bintree_leftmost(2), 2); - assert_eq!(pmmr::bintree_leftmost(3), 1); + assert_eq!(pmmr::bintree_leftmost(2), 0); + assert_eq!(pmmr::bintree_leftmost(3), 3); assert_eq!(pmmr::bintree_leftmost(4), 4); - assert_eq!(pmmr::bintree_leftmost(5), 5); - assert_eq!(pmmr::bintree_leftmost(6), 4); - assert_eq!(pmmr::bintree_leftmost(7), 1); + assert_eq!(pmmr::bintree_leftmost(5), 3); + assert_eq!(pmmr::bintree_leftmost(6), 0); } #[test] fn test_bintree_leaf_pos_iter() { + assert_eq!(pmmr::bintree_leaf_pos_iter(0).collect::>(), [0]); assert_eq!(pmmr::bintree_leaf_pos_iter(1).collect::>(), [1]); - assert_eq!(pmmr::bintree_leaf_pos_iter(2).collect::>(), [2]); - assert_eq!(pmmr::bintree_leaf_pos_iter(3).collect::>(), [1, 2]); + assert_eq!(pmmr::bintree_leaf_pos_iter(2).collect::>(), [0, 1]); + assert_eq!(pmmr::bintree_leaf_pos_iter(3).collect::>(), [3]); assert_eq!(pmmr::bintree_leaf_pos_iter(4).collect::>(), [4]); - assert_eq!(pmmr::bintree_leaf_pos_iter(5).collect::>(), [5]); - assert_eq!(pmmr::bintree_leaf_pos_iter(6).collect::>(), [4, 5]); + assert_eq!(pmmr::bintree_leaf_pos_iter(5).collect::>(), [3, 4]); assert_eq!( - pmmr::bintree_leaf_pos_iter(7).collect::>(), - [1, 2, 4, 5] + pmmr::bintree_leaf_pos_iter(6).collect::>(), + [0, 1, 3, 4] ); } #[test] fn test_bintree_pos_iter() { + assert_eq!(pmmr::bintree_pos_iter(0).collect::>(), [0]); assert_eq!(pmmr::bintree_pos_iter(1).collect::>(), [1]); - assert_eq!(pmmr::bintree_pos_iter(2).collect::>(), [2]); - assert_eq!(pmmr::bintree_pos_iter(3).collect::>(), [1, 2, 3]); + assert_eq!(pmmr::bintree_pos_iter(2).collect::>(), [0, 1, 2]); + assert_eq!(pmmr::bintree_pos_iter(3).collect::>(), [3]); assert_eq!(pmmr::bintree_pos_iter(4).collect::>(), [4]); - assert_eq!(pmmr::bintree_pos_iter(5).collect::>(), [5]); - assert_eq!(pmmr::bintree_pos_iter(6).collect::>(), [4, 5, 6]); + assert_eq!(pmmr::bintree_pos_iter(5).collect::>(), [3, 4, 5]); assert_eq!( - pmmr::bintree_pos_iter(7).collect::>(), - [1, 2, 3, 4, 5, 6, 7] + pmmr::bintree_pos_iter(6).collect::>(), + [0, 1, 2, 3, 4, 5, 6] ); } #[test] fn test_is_leaf() { + assert_eq!(pmmr::is_leaf(0), true); assert_eq!(pmmr::is_leaf(1), true); - assert_eq!(pmmr::is_leaf(2), true); - assert_eq!(pmmr::is_leaf(3), false); + assert_eq!(pmmr::is_leaf(2), false); + assert_eq!(pmmr::is_leaf(3), true); assert_eq!(pmmr::is_leaf(4), true); - assert_eq!(pmmr::is_leaf(5), true); + assert_eq!(pmmr::is_leaf(5), false); assert_eq!(pmmr::is_leaf(6), false); - assert_eq!(pmmr::is_leaf(7), false); } #[test] fn test_pmmr_leaf_to_insertion_index() { - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(1), Some(0)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(2), Some(1)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(4), Some(2)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(5), Some(3)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(8), Some(4)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(9), Some(5)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(11), Some(6)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(12), Some(7)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(16), Some(8)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(17), Some(9)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(19), Some(10)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(20), Some(11)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(23), Some(12)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(24), Some(13)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(26), Some(14)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(27), Some(15)); - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(32), Some(16)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(0), Some(0)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(1), Some(1)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(3), Some(2)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(4), Some(3)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(7), Some(4)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(8), Some(5)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(10), Some(6)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(11), Some(7)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(15), Some(8)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(16), Some(9)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(18), Some(10)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(19), Some(11)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(22), Some(12)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(23), Some(13)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(25), Some(14)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(26), Some(15)); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(31), Some(16)); // Not a leaf node - assert_eq!(pmmr::pmmr_leaf_to_insertion_index(31), None); + assert_eq!(pmmr::pmmr_leaf_to_insertion_index(30), None); // Sanity check to make sure we don't get an explosion around the u64 max // number of leaves - let n_leaves_max_u64 = pmmr::n_leaves(u64::MAX - 256); + let n_leaves_max_u64 = pmmr::n_leaves(u64::MAX - 257); assert_eq!( pmmr::pmmr_leaf_to_insertion_index(n_leaves_max_u64), Some(4611686018427387884) @@ -199,7 +199,6 @@ fn test_pmmr_leaf_to_insertion_index() { fn test_n_leaves() { // make sure we handle an empty MMR correctly assert_eq!(pmmr::n_leaves(0), 0); - // and various sizes on non-empty MMRs assert_eq!(pmmr::n_leaves(1), 1); assert_eq!(pmmr::n_leaves(2), 2); @@ -213,57 +212,65 @@ fn test_n_leaves() { assert_eq!(pmmr::n_leaves(10), 6); } +#[test] +fn test_round_up_to_leaf_pos() { + assert_eq!(pmmr::round_up_to_leaf_pos(0), 0); + assert_eq!(pmmr::round_up_to_leaf_pos(1), 1); + assert_eq!(pmmr::round_up_to_leaf_pos(2), 3); + assert_eq!(pmmr::round_up_to_leaf_pos(3), 3); + assert_eq!(pmmr::round_up_to_leaf_pos(4), 4); + assert_eq!(pmmr::round_up_to_leaf_pos(5), 7); + assert_eq!(pmmr::round_up_to_leaf_pos(6), 7); + assert_eq!(pmmr::round_up_to_leaf_pos(7), 7); + assert_eq!(pmmr::round_up_to_leaf_pos(8), 8); + assert_eq!(pmmr::round_up_to_leaf_pos(9), 10); + assert_eq!(pmmr::round_up_to_leaf_pos(10), 10); +} + /// Find parent and sibling positions for various node positions. #[test] fn various_families() { // 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 - assert_eq!(pmmr::family(1), (3, 2)); - assert_eq!(pmmr::family(2), (3, 1)); - assert_eq!(pmmr::family(3), (7, 6)); - assert_eq!(pmmr::family(4), (6, 5)); - assert_eq!(pmmr::family(5), (6, 4)); - assert_eq!(pmmr::family(6), (7, 3)); - assert_eq!(pmmr::family(7), (15, 14)); - assert_eq!(pmmr::family(1_000), (1_001, 997)); -} - -#[test] -fn test_paths() { - assert_eq!(pmmr::path(1, 3).collect::>(), [1, 3]); - assert_eq!(pmmr::path(2, 3).collect::>(), [2, 3]); - assert_eq!(pmmr::path(4, 16).collect::>(), [4, 6, 7, 15]); + assert_eq!(pmmr::family(0), (2, 1)); + assert_eq!(pmmr::family(1), (2, 0)); + assert_eq!(pmmr::family(2), (6, 5)); + assert_eq!(pmmr::family(3), (5, 4)); + assert_eq!(pmmr::family(4), (5, 3)); + assert_eq!(pmmr::family(5), (6, 2)); + assert_eq!(pmmr::family(6), (14, 13)); + assert_eq!(pmmr::family(999), (1_000, 996)); } #[test] fn test_is_left_sibling() { - assert_eq!(pmmr::is_left_sibling(1), true); - assert_eq!(pmmr::is_left_sibling(2), false); - assert_eq!(pmmr::is_left_sibling(3), true); + assert_eq!(pmmr::is_left_sibling(0), true); + assert_eq!(pmmr::is_left_sibling(1), false); + assert_eq!(pmmr::is_left_sibling(2), true); } #[test] fn various_branches() { // the two leaf nodes in a 3 node tree (height 1) - assert_eq!(pmmr::family_branch(1, 3), [(3, 2)]); - assert_eq!(pmmr::family_branch(2, 3), [(3, 1)]); + assert_eq!(pmmr::family_branch(0, 3), [(2, 1)]); + assert_eq!(pmmr::family_branch(1, 3), [(2, 0)]); // the root node in a 3 node tree - assert_eq!(pmmr::family_branch(3, 3), []); + assert_eq!(pmmr::family_branch(2, 3), []); // leaf node in a larger tree of 7 nodes (height 2) - assert_eq!(pmmr::family_branch(1, 7), [(3, 2), (7, 6)]); + assert_eq!(pmmr::family_branch(0, 7), [(2, 1), (6, 5)]); // note these only go as far up as the local peak, not necessarily the single // root - assert_eq!(pmmr::family_branch(1, 4), [(3, 2)]); + assert_eq!(pmmr::family_branch(0, 4), [(2, 1)]); // pos 4 in a tree of size 4 is a local peak - assert_eq!(pmmr::family_branch(4, 4), []); + assert_eq!(pmmr::family_branch(3, 4), []); // pos 4 in a tree of size 5 is also still a local peak - assert_eq!(pmmr::family_branch(4, 5), []); + assert_eq!(pmmr::family_branch(3, 5), []); // pos 4 in a tree of size 6 has a parent and a sibling - assert_eq!(pmmr::family_branch(4, 6), [(6, 5)]); + assert_eq!(pmmr::family_branch(3, 6), [(5, 4)]); // a tree of size 7 is all under a single root - assert_eq!(pmmr::family_branch(4, 7), [(6, 5), (7, 3)]); + assert_eq!(pmmr::family_branch(3, 7), [(5, 4), (6, 2)]); // ok now for a more realistic one, a tree with over a million nodes in it // find the "family path" back up the tree from a leaf node at 0 @@ -272,27 +279,27 @@ fn various_branches() { // largest possible list of peaks before we start combining them into larger // peaks. assert_eq!( - pmmr::family_branch(1, 1_049_000), + pmmr::family_branch(0, 1_049_000), [ - (3, 2), - (7, 6), - (15, 14), - (31, 30), - (63, 62), - (127, 126), - (255, 254), - (511, 510), - (1023, 1022), - (2047, 2046), - (4095, 4094), - (8191, 8190), - (16383, 16382), - (32767, 32766), - (65535, 65534), - (131071, 131070), - (262143, 262142), - (524287, 524286), - (1048575, 1048574), + (2, 1), + (6, 5), + (14, 13), + (30, 29), + (62, 61), + (126, 125), + (254, 253), + (510, 509), + (1022, 1021), + (2046, 2045), + (4094, 4093), + (8190, 8189), + (16382, 16381), + (32766, 32765), + (65534, 65533), + (131070, 131069), + (262142, 262141), + (524286, 524285), + (1048574, 1048573), ] ); } @@ -307,21 +314,21 @@ fn some_peaks() { assert_eq!(pmmr::peaks(0), empty); // and various non-empty MMRs - assert_eq!(pmmr::peaks(1), [1]); + assert_eq!(pmmr::peaks(1), [0]); assert_eq!(pmmr::peaks(2), empty); - assert_eq!(pmmr::peaks(3), [3]); - assert_eq!(pmmr::peaks(4), [3, 4]); + assert_eq!(pmmr::peaks(3), [2]); + assert_eq!(pmmr::peaks(4), [2, 3]); assert_eq!(pmmr::peaks(5), empty); assert_eq!(pmmr::peaks(6), empty); - assert_eq!(pmmr::peaks(7), [7]); - assert_eq!(pmmr::peaks(8), [7, 8]); + assert_eq!(pmmr::peaks(7), [6]); + assert_eq!(pmmr::peaks(8), [6, 7]); assert_eq!(pmmr::peaks(9), empty); - assert_eq!(pmmr::peaks(10), [7, 10]); - assert_eq!(pmmr::peaks(11), [7, 10, 11]); - assert_eq!(pmmr::peaks(22), [15, 22]); - assert_eq!(pmmr::peaks(32), [31, 32]); - assert_eq!(pmmr::peaks(35), [31, 34, 35]); - assert_eq!(pmmr::peaks(42), [31, 38, 41, 42]); + assert_eq!(pmmr::peaks(10), [6, 9]); + assert_eq!(pmmr::peaks(11), [6, 9, 10]); + assert_eq!(pmmr::peaks(22), [14, 21]); + assert_eq!(pmmr::peaks(32), [30, 31]); + assert_eq!(pmmr::peaks(35), [30, 33, 34]); + assert_eq!(pmmr::peaks(42), [30, 37, 40, 41]); // large realistic example with almost 1.5 million nodes // note the distance between peaks decreases toward the right (trees get @@ -329,8 +336,8 @@ fn some_peaks() { assert_eq!( pmmr::peaks(1048555), [ - 524287, 786430, 917501, 983036, 1015803, 1032186, 1040377, 1044472, 1046519, 1047542, - 1048053, 1048308, 1048435, 1048498, 1048529, 1048544, 1048551, 1048554, 1048555, + 524286, 786429, 917500, 983035, 1015802, 1032185, 1040376, 1044471, 1046518, 1047541, + 1048052, 1048307, 1048434, 1048497, 1048528, 1048543, 1048550, 1048553, 1048554, ], ); } @@ -516,7 +523,7 @@ fn pmmr_prune() { // pruning a leaf with no parent should do nothing { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(16).unwrap(); + pmmr.prune(15).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -525,7 +532,7 @@ fn pmmr_prune() { // pruning leaves with no shared parent just removes 1 element { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(2).unwrap(); + pmmr.prune(1).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -533,7 +540,7 @@ fn pmmr_prune() { { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(4).unwrap(); + pmmr.prune(3).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -542,7 +549,7 @@ fn pmmr_prune() { // pruning a non-leaf node has no effect { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(3).unwrap_err(); + pmmr.prune(2).unwrap_err(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -551,7 +558,7 @@ fn pmmr_prune() { // TODO - no longer true (leaves only now) - pruning sibling removes subtree { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(5).unwrap(); + pmmr.prune(4).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -561,7 +568,7 @@ fn pmmr_prune() { // removes all subtree { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - pmmr.prune(1).unwrap(); + pmmr.prune(0).unwrap(); assert_eq!(orig_root, pmmr.root().unwrap()); } assert_eq!(ba.hashes.len(), 16); @@ -570,7 +577,7 @@ fn pmmr_prune() { // pruning everything should only leave us with a single peak { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); - for n in 1..16 { + for n in 0..15 { let _ = pmmr.prune(n); } assert_eq!(orig_root, pmmr.root().unwrap()); @@ -581,14 +588,14 @@ fn pmmr_prune() { #[test] fn check_insertion_to_pmmr_index() { + assert_eq!(pmmr::insertion_to_pmmr_index(0), 0); assert_eq!(pmmr::insertion_to_pmmr_index(1), 1); - assert_eq!(pmmr::insertion_to_pmmr_index(2), 2); + assert_eq!(pmmr::insertion_to_pmmr_index(2), 3); assert_eq!(pmmr::insertion_to_pmmr_index(3), 4); - assert_eq!(pmmr::insertion_to_pmmr_index(4), 5); + assert_eq!(pmmr::insertion_to_pmmr_index(4), 7); assert_eq!(pmmr::insertion_to_pmmr_index(5), 8); - assert_eq!(pmmr::insertion_to_pmmr_index(6), 9); + assert_eq!(pmmr::insertion_to_pmmr_index(6), 10); assert_eq!(pmmr::insertion_to_pmmr_index(7), 11); - assert_eq!(pmmr::insertion_to_pmmr_index(8), 12); } #[test] @@ -626,8 +633,8 @@ fn check_elements_from_pmmr_index() { assert_eq!(res.1[6].0[3], 11); // pruning a few nodes should get consistent results - pmmr.prune(pmmr::insertion_to_pmmr_index(5)).unwrap(); - pmmr.prune(pmmr::insertion_to_pmmr_index(20)).unwrap(); + pmmr.prune(pmmr::insertion_to_pmmr_index(4)).unwrap(); + pmmr.prune(pmmr::insertion_to_pmmr_index(19)).unwrap(); let res = pmmr .readonly_pmmr() diff --git a/core/tests/segment.rs b/core/tests/segment.rs index faf58d39d..cb81402fa 100644 --- a/core/tests/segment.rs +++ b/core/tests/segment.rs @@ -44,7 +44,7 @@ fn test_unprunable_size(height: u8, n_leaves: u32) { if idx < n_segments - 1 || (n_leaves as u64) % size == 0 { // Check if the reconstructed subtree root matches with the hash stored in the mmr let subtree_root = segment.root(last_pos, None).unwrap().unwrap(); - let last = pmmr::insertion_to_pmmr_index((idx + 1) * size) + (height as u64); + let last = pmmr::insertion_to_pmmr_index((idx + 1) * size - 1) + (height as u64); assert_eq!(subtree_root, mmr.get_hash(last).unwrap()); println!(" ROOT OK"); } diff --git a/core/tests/vec_backend.rs b/core/tests/vec_backend.rs index 8193548e6..e9b5808d0 100644 --- a/core/tests/vec_backend.rs +++ b/core/tests/vec_backend.rs @@ -37,7 +37,7 @@ fn leaf_pos_and_idx_iter_test() { pmmr.leaf_idx_iter(0).collect::>() ); assert_eq!( - vec![1, 2, 4, 5, 8], + vec![0, 1, 3, 4, 7], pmmr.leaf_pos_iter().collect::>() ); } @@ -61,7 +61,7 @@ fn leaf_pos_and_idx_iter_hash_only_test() { pmmr.leaf_idx_iter(0).collect::>() ); assert_eq!( - vec![1, 2, 4, 5, 8], + vec![0, 1, 3, 4, 7], pmmr.leaf_pos_iter().collect::>() ); } diff --git a/store/src/leaf_set.rs b/store/src/leaf_set.rs index 44bf91c9c..77c140c03 100644 --- a/store/src/leaf_set.rs +++ b/store/src/leaf_set.rs @@ -96,7 +96,7 @@ impl LeafSet { /// Only applicable for the output MMR. fn unpruned_pre_cutoff(&self, cutoff_pos: u64, prune_list: &PruneList) -> Bitmap { (1..=cutoff_pos) - .filter(|&x| pmmr::is_leaf(x) && !prune_list.is_pruned(x)) + .filter(|&x| pmmr::is_leaf(x - 1) && !prune_list.is_pruned(x - 1)) .map(|x| x as u32) .collect() } @@ -142,13 +142,13 @@ impl LeafSet { } /// Append a new position to the leaf_set. - pub fn add(&mut self, pos: u64) { - self.bitmap.add(pos as u32); + pub fn add(&mut self, pos0: u64) { + self.bitmap.add(1 + pos0 as u32); } /// Remove the provided position from the leaf_set. - pub fn remove(&mut self, pos: u64) { - self.bitmap.remove(pos as u32); + pub fn remove(&mut self, pos0: u64) { + self.bitmap.remove(1 + pos0 as u32); } /// Saves the utxo file tagged with block hash as filename suffix. @@ -187,8 +187,8 @@ impl LeafSet { } /// Whether the leaf_set includes the provided position. - pub fn includes(&self, pos: u64) -> bool { - self.bitmap.contains(pos as u32) + pub fn includes(&self, pos0: u64) -> bool { + self.bitmap.contains(1 + pos0 as u32) } /// Number of positions stored in the leaf_set. diff --git a/store/src/pmmr.rs b/store/src/pmmr.rs index f62118b0d..44f32d141 100644 --- a/store/src/pmmr.rs +++ b/store/src/pmmr.rs @@ -78,7 +78,8 @@ impl Backend for PMMRBackend { if self.prunable { // (Re)calculate the latest pos given updated size of data file // and the total leaf_shift, and add to our leaf_set. - let pos = pmmr::insertion_to_pmmr_index(size + self.prune_list.get_total_leaf_shift()); + let pos = + pmmr::insertion_to_pmmr_index(size + self.prune_list.get_total_leaf_shift() - 1); self.leaf_set.add(pos); } @@ -87,7 +88,7 @@ impl Backend for PMMRBackend { // Supports appending a pruned subtree (single root hash) to an existing hash file. // Update the prune_list "shift cache" to reflect the new pruned leaf pos in the subtree. - fn append_pruned_subtree(&mut self, hash: Hash, pos: u64) -> Result<(), String> { + fn append_pruned_subtree(&mut self, hash: Hash, pos0: u64) -> Result<(), String> { if !self.prunable { return Err("Not prunable, cannot append pruned subtree.".into()); } @@ -96,56 +97,56 @@ impl Backend for PMMRBackend { .append(&hash) .map_err(|e| format!("Failed to append subtree hash to file. {}", e))?; - self.prune_list.append(pos); + self.prune_list.append(pos0); Ok(()) } - fn get_from_file(&self, position: u64) -> Option { - if self.is_compacted(position) { + fn get_from_file(&self, pos0: u64) -> Option { + if self.is_compacted(pos0) { return None; } - let shift = self.prune_list.get_shift(position); - self.hash_file.read(position - shift) + let shift = self.prune_list.get_shift(pos0); + self.hash_file.read(1 + pos0 - shift) } - fn get_peak_from_file(&self, position: u64) -> Option { - let shift = self.prune_list.get_shift(position); - self.hash_file.read(position - shift) + fn get_peak_from_file(&self, pos0: u64) -> Option { + let shift = self.prune_list.get_shift(pos0); + self.hash_file.read(1 + pos0 - shift) } - fn get_data_from_file(&self, position: u64) -> Option { - if !pmmr::is_leaf(position) { + fn get_data_from_file(&self, pos0: u64) -> Option { + if !pmmr::is_leaf(pos0) { return None; } - if self.is_compacted(position) { + if self.is_compacted(pos0) { return None; } - let flatfile_pos = pmmr::n_leaves(position); - let shift = self.prune_list.get_leaf_shift(position); + let flatfile_pos = pmmr::n_leaves(pos0 + 1); + let shift = self.prune_list.get_leaf_shift(1 + pos0); self.data_file.read(flatfile_pos - shift) } /// Get the hash at pos. /// Return None if pos is a leaf and it has been removed (or pruned or /// compacted). - fn get_hash(&self, pos: u64) -> Option { - if self.prunable && pmmr::is_leaf(pos) && !self.leaf_set.includes(pos) { + fn get_hash(&self, pos0: u64) -> Option { + if self.prunable && pmmr::is_leaf(pos0) && !self.leaf_set.includes(pos0) { return None; } - self.get_from_file(pos) + self.get_from_file(pos0) } /// Get the data at pos. /// Return None if it has been removed or if pos is not a leaf node. - fn get_data(&self, pos: u64) -> Option { - if !pmmr::is_leaf(pos) { + fn get_data(&self, pos0: u64) -> Option { + if !pmmr::is_leaf(pos0) { return None; } - if self.prunable && !self.leaf_set.includes(pos) { + if self.prunable && !self.leaf_set.includes(pos0) { return None; } - self.get_data_from_file(pos) + self.get_data_from_file(pos0) } /// Returns an iterator over all the leaf positions. @@ -153,7 +154,7 @@ impl Backend for PMMRBackend { /// For a non-prunable PMMR this is *all* leaves (this is not yet implemented). fn leaf_pos_iter(&self) -> Box + '_> { if self.prunable { - Box::new(self.leaf_set.iter()) + Box::new(self.leaf_set.iter().map(|x| x - 1)) } else { panic!("leaf_pos_iter not implemented for non-prunable PMMR") } @@ -175,7 +176,7 @@ impl Backend for PMMRBackend { // iterate, skipping everything prior to this // pass in from_idx=0 then we want to convert to pos=1 - let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1); + let from_pos = 1 + pmmr::insertion_to_pmmr_index(from_idx); if self.prunable { Box::new( @@ -197,21 +198,29 @@ impl Backend for PMMRBackend { } // Rewind the hash file accounting for pruned/compacted pos - let shift = self.prune_list.get_shift(position); + let shift = if position == 0 { + 0 + } else { + self.prune_list.get_shift(position - 1) + }; self.hash_file.rewind(position - shift); // Rewind the data file accounting for pruned/compacted pos let flatfile_pos = pmmr::n_leaves(position); - let leaf_shift = self.prune_list.get_leaf_shift(position); + let leaf_shift = if position == 0 { + 0 + } else { + self.prune_list.get_leaf_shift(position) + }; self.data_file.rewind(flatfile_pos - leaf_shift); Ok(()) } /// Remove by insertion position. - fn remove(&mut self, pos: u64) -> Result<(), String> { + fn remove(&mut self, pos0: u64) -> Result<(), String> { assert!(self.prunable, "Remove on non-prunable MMR"); - self.leaf_set.remove(pos); + self.leaf_set.remove(pos0); Ok(()) } @@ -296,23 +305,23 @@ impl PMMRBackend { }) } - fn is_pruned(&self, pos: u64) -> bool { - self.prune_list.is_pruned(pos) + fn is_pruned(&self, pos0: u64) -> bool { + self.prune_list.is_pruned(pos0) } - fn is_pruned_root(&self, pos: u64) -> bool { - self.prune_list.is_pruned_root(pos) + fn is_pruned_root(&self, pos0: u64) -> bool { + self.prune_list.is_pruned_root(pos0) } // Check if pos is pruned but not a pruned root itself. // Checking for pruned root is faster so we do this check first. // We can do a fast initial check as well - // if its in the current leaf_set then we know it is not compacted. - fn is_compacted(&self, pos: u64) -> bool { - if self.leaf_set.includes(pos) { + fn is_compacted(&self, pos0: u64) -> bool { + if self.leaf_set.includes(pos0) { return false; } - !self.is_pruned_root(pos) && self.is_pruned(pos) + !self.is_pruned_root(pos0) && self.is_pruned(pos0) } /// Number of hashes in the PMMR stored by this backend. Only produces the @@ -381,9 +390,9 @@ impl PMMRBackend { // Save compact copy of the hash file, skipping removed data. { - let pos_to_rm = map_vec!(pos_to_rm, |pos| { - let shift = self.prune_list.get_shift(pos.into()); - pos as u64 - shift + let pos_to_rm = map_vec!(pos_to_rm, |pos1| { + let shift = self.prune_list.get_shift(pos1 as u64 - 1); + pos1 as u64 - shift }); self.hash_file.write_tmp_pruned(&pos_to_rm)?; @@ -393,8 +402,8 @@ impl PMMRBackend { { let leaf_pos_to_rm = pos_to_rm .iter() - .filter(|&x| pmmr::is_leaf(x.into())) .map(|x| x as u64) + .filter(|x| pmmr::is_leaf(x - 1)) .collect::>(); let pos_to_rm = map_vec!(leaf_pos_to_rm, |&pos| { @@ -449,19 +458,19 @@ impl PMMRBackend { expanded.add(x); let mut current = x as u64; loop { - let (parent, sibling) = family(current); - let sibling_pruned = self.is_pruned_root(sibling); + let (parent0, sibling0) = family(current - 1); + let sibling_pruned = self.is_pruned_root(sibling0); // if sibling previously pruned // push it back onto list of pos to remove // so we can remove it and traverse up to parent if sibling_pruned { - expanded.add(sibling as u32); + expanded.add(1 + sibling0 as u32); } - if sibling_pruned || expanded.contains(sibling as u32) { - expanded.add(parent as u32); - current = parent; + if sibling_pruned || expanded.contains(1 + sibling0 as u32) { + expanded.add(1 + parent0 as u32); + current = 1 + parent0; } else { break; } @@ -477,8 +486,8 @@ fn removed_excl_roots(removed: &Bitmap) -> Bitmap { removed .iter() .filter(|pos| { - let (parent_pos, _) = family(*pos as u64); - removed.contains(parent_pos as u32) + let (parent_pos0, _) = family(*pos as u64 - 1); + removed.contains(1 + parent_pos0 as u32) }) .collect() } diff --git a/store/src/prune_list.rs b/store/src/prune_list.rs index 8f488e594..77c261e0e 100644 --- a/store/src/prune_list.rs +++ b/store/src/prune_list.rs @@ -32,6 +32,7 @@ use grin_core::core::pmmr; use crate::core::core::pmmr::{bintree_leftmost, bintree_postorder_height, family}; use crate::{read_bitmap, save_via_temp_file}; +use std::cmp::min; /// Maintains a list of previously pruned nodes in PMMR, compacting the list as /// parents get pruned and allowing checking whether a leaf is pruned. Given @@ -54,9 +55,10 @@ pub struct PruneList { } impl PruneList { - /// Instantiate a new prune list from the provided path and bitmap. + /// Instantiate a new prune list from the provided path and 1-based bitmap. /// Note: Does not flush the bitmap to disk. Caller is responsible for doing this. pub fn new(path: Option, bitmap: Bitmap) -> PruneList { + assert!(!bitmap.contains(0)); let mut prune_list = PruneList { path, bitmap: Bitmap::create(), @@ -64,8 +66,8 @@ impl PruneList { leaf_shift_cache: vec![], }; - for pos in bitmap.iter().filter(|x| *x > 0) { - prune_list.append(pos as u64) + for pos1 in bitmap.iter() { + prune_list.append(pos1 as u64 - 1) } prune_list.bitmap.run_optimize(); @@ -86,6 +88,7 @@ impl PruneList { } else { Bitmap::create() }; + assert!(!bitmap.contains(0)); let mut prune_list = PruneList::new(Some(file_path), bitmap); @@ -129,48 +132,39 @@ impl PruneList { /// Return the total shift from all entries in the prune_list. /// This is the shift we need to account for when adding new entries to our PMMR. pub fn get_total_shift(&self) -> u64 { - self.get_shift(self.bitmap.maximum().unwrap_or(0) as u64) + self.get_shift(self.bitmap.maximum().unwrap_or(1) as u64 - 1) } /// Return the total leaf_shift from all entries in the prune_list. /// This is the leaf_shift we need to account for when adding new entries to our PMMR. pub fn get_total_leaf_shift(&self) -> u64 { - self.get_leaf_shift(self.bitmap.maximum().unwrap_or(0) as u64) + self.get_leaf_shift(self.bitmap.maximum().unwrap_or(1) as u64 - 1) } /// Computes by how many positions a node at pos should be shifted given the /// number of nodes that have already been pruned before it. /// Note: the node at pos may be pruned and may be compacted away itself and /// the caller needs to be aware of this. - pub fn get_shift(&self, pos: u64) -> u64 { - if self.bitmap.is_empty() { - return 0; - } - - let idx = self.bitmap.rank(pos as u32); + pub fn get_shift(&self, pos0: u64) -> u64 { + let idx = self.bitmap.rank(1 + pos0 as u32); if idx == 0 { return 0; } - - if idx > self.shift_cache.len() as u64 { - self.shift_cache[self.shift_cache.len().saturating_sub(1)] - } else { - self.shift_cache[(idx as usize).saturating_sub(1)] - } + self.shift_cache[min(idx as usize, self.shift_cache.len()) - 1] } fn build_shift_cache(&mut self) { - if self.bitmap.is_empty() { - return; - } - self.shift_cache.clear(); - for pos in self.bitmap.iter().filter(|x| *x > 0) { - let pos = pos as u64; - let prev_shift = self.get_shift(pos.saturating_sub(1)); + for pos1 in self.bitmap.iter() { + let pos0 = pos1 as u64 - 1; + let prev_shift = if pos0 == 0 { + 0 + } else { + self.get_shift(pos0 - 1) + }; - let curr_shift = if self.is_pruned_root(pos) { - let height = bintree_postorder_height(pos); + let curr_shift = if self.is_pruned_root(pos0) { + let height = bintree_postorder_height(pos0); 2 * ((1 << height) - 1) } else { 0 @@ -181,10 +175,14 @@ impl PruneList { } // Calculate the next shift based on provided pos and the previous shift. - fn calculate_next_shift(&self, pos: u64) -> u64 { - let prev_shift = self.get_shift(pos.saturating_sub(1)); - let shift = if self.is_pruned_root(pos) { - let height = bintree_postorder_height(pos); + fn calculate_next_shift(&self, pos0: u64) -> u64 { + let prev_shift = if pos0 == 0 { + 0 + } else { + self.get_shift(pos0 - 1) + }; + let shift = if self.is_pruned_root(pos0) { + let height = bintree_postorder_height(pos0); 2 * ((1 << height) - 1) } else { 0 @@ -195,36 +193,26 @@ impl PruneList { /// As above, but only returning the number of leaf nodes to skip for a /// given leaf. Helpful if, for instance, data for each leaf is being stored /// separately in a continuous flat-file. - pub fn get_leaf_shift(&self, pos: u64) -> u64 { - if self.bitmap.is_empty() { - return 0; - } - - let idx = self.bitmap.rank(pos as u32); + pub fn get_leaf_shift(&self, pos0: u64) -> u64 { + let idx = self.bitmap.rank(1 + pos0 as u32); if idx == 0 { return 0; } - - if idx > self.leaf_shift_cache.len() as u64 { - self.leaf_shift_cache[self.leaf_shift_cache.len().saturating_sub(1)] - } else { - self.leaf_shift_cache[(idx as usize).saturating_sub(1)] - } + self.leaf_shift_cache[min(idx as usize, self.leaf_shift_cache.len()) - 1] } fn build_leaf_shift_cache(&mut self) { - if self.bitmap.is_empty() { - return; - } - self.leaf_shift_cache.clear(); + for pos1 in self.bitmap.iter() { + let pos0 = pos1 as u64 - 1; + let prev_shift = if pos0 == 0 { + 0 + } else { + self.get_leaf_shift(pos0 - 1) + }; - for pos in self.bitmap.iter().filter(|x| *x > 0) { - let pos = pos as u64; - let prev_shift = self.get_leaf_shift(pos.saturating_sub(1)); - - let curr_shift = if self.is_pruned_root(pos) { - let height = bintree_postorder_height(pos); + let curr_shift = if self.is_pruned_root(pos0) { + let height = bintree_postorder_height(pos0); if height == 0 { 0 } else { @@ -239,10 +227,14 @@ impl PruneList { } // Calculate the next leaf shift based on provided pos and the previous leaf shift. - fn calculate_next_leaf_shift(&self, pos: u64) -> u64 { - let prev_shift = self.get_leaf_shift(pos.saturating_sub(1) as u64); - let shift = if self.is_pruned_root(pos) { - let height = bintree_postorder_height(pos); + fn calculate_next_leaf_shift(&self, pos0: u64) -> u64 { + let prev_shift = if pos0 == 0 { + 0 + } else { + self.get_leaf_shift(pos0 - 1) + }; + let shift = if self.is_pruned_root(pos0) { + let height = bintree_postorder_height(pos0); if height == 0 { 0 } else { @@ -256,68 +248,65 @@ impl PruneList { // Remove any existing entries in shift_cache and leaf_shift_cache // for any pos contained in the subtree with provided root. - fn cleanup_subtree(&mut self, pos: u64) { - assert!(pos > 0, "prune list 1-indexed, 0 not valid pos"); - - let lc = bintree_leftmost(pos) as u32; - let last_pos = self.bitmap.maximum().unwrap_or(1); + fn cleanup_subtree(&mut self, pos0: u64) { + let lc0 = bintree_leftmost(pos0) as u32; + let size = self.bitmap.maximum().unwrap_or(0); // If this subtree does not intersect with existing bitmap then nothing to cleanup. - if lc > last_pos { + if lc0 >= size { return; } // Note: We will treat this as a "closed range" below (croaring api weirdness). - let cleanup_pos = lc..last_pos; + let cleanup_pos1 = (lc0 + 1)..size; // Find point where we can truncate based on bitmap "rank" (index) of pos to the left of subtree. - let idx = self.bitmap.rank(lc - 1); + let idx = self.bitmap.rank(lc0); self.shift_cache.truncate(idx as usize); self.leaf_shift_cache.truncate(idx as usize); - self.bitmap.remove_range_closed(cleanup_pos) + self.bitmap.remove_range_closed(cleanup_pos1) } /// Push the node at the provided position in the prune list. /// Assumes rollup of siblings and children has already been handled. - fn append_single(&mut self, pos: u64) { - assert!(pos > 0, "prune list 1-indexed, 0 not valid pos"); + fn append_single(&mut self, pos0: u64) { assert!( - pos > self.bitmap.maximum().unwrap_or(0) as u64, + pos0 >= self.bitmap.maximum().unwrap_or(0) as u64, "prune list append only" ); // Add this pos to the bitmap (leaf or subtree root) - self.bitmap.add(pos as u32); + self.bitmap.add(1 + pos0 as u32); // Calculate shift and leaf_shift for this pos. - self.shift_cache.push(self.calculate_next_shift(pos)); + self.shift_cache.push(self.calculate_next_shift(pos0)); self.leaf_shift_cache - .push(self.calculate_next_leaf_shift(pos)); + .push(self.calculate_next_leaf_shift(pos0)); } /// Push the node at the provided position in the prune list. /// Handles rollup of siblings and children as we go (relatively slow). /// Once we find a subtree root that can not be rolled up any further /// we cleanup everything beneath it and replace it with a single appended node. - pub fn append(&mut self, pos: u64) { - assert!(pos > 0, "prune list 1-indexed, 0 not valid pos"); + pub fn append(&mut self, pos0: u64) { + let max = self.bitmap.maximum().unwrap_or(0) as u64; assert!( - pos > self.bitmap.maximum().unwrap_or(0) as u64, + pos0 >= max, "prune list append only - pos={} bitmap.maximum={}", - pos, - self.bitmap.maximum().unwrap_or(0) + pos0, + max ); - let (parent, sibling) = family(pos); - if self.is_pruned(sibling) { + let (parent0, sibling0) = family(pos0); + if self.is_pruned(sibling0) { // Recursively append the parent (removing our sibling in the process). - self.append(parent) + self.append(parent0) } else { // Make sure we roll anything beneath this up into this higher level pruned subtree root. // We should have no nested entries in the prune_list. - self.cleanup_subtree(pos); - self.append_single(pos); + self.cleanup_subtree(pos0); + self.append_single(pos0); } } @@ -334,15 +323,14 @@ impl PruneList { /// A pos is pruned if it is a pruned root directly or if it is /// beneath the "next" pruned subtree. /// We only need to consider the "next" subtree due to the append-only MMR structure. - pub fn is_pruned(&self, pos: u64) -> bool { - assert!(pos > 0, "prune list 1-indexed, 0 not valid pos"); - if self.is_pruned_root(pos) { + pub fn is_pruned(&self, pos0: u64) -> bool { + if self.is_pruned_root(pos0) { return true; } - let rank = self.bitmap.rank(pos as u32); + let rank = self.bitmap.rank(1 + pos0 as u32); if let Some(root) = self.bitmap.select(rank as u32) { - let range = pmmr::bintree_range(root as u64); - range.contains(&pos) + let range = pmmr::bintree_range(root as u64 - 1); + range.contains(&pos0) } else { false } @@ -354,19 +342,20 @@ impl PruneList { } /// Internal shift cache as slice. + /// only used in store/tests/prune_list.rs tests pub fn shift_cache(&self) -> &[u64] { self.shift_cache.as_slice() } /// Internal leaf shift cache as slice. + /// only used in store/tests/prune_list.rs tests pub fn leaf_shift_cache(&self) -> &[u64] { self.leaf_shift_cache.as_slice() } /// Is the specified position a root of a pruned subtree? - pub fn is_pruned_root(&self, pos: u64) -> bool { - assert!(pos > 0, "prune list 1-indexed, 0 not valid pos"); - self.bitmap.contains(pos as u32) + pub fn is_pruned_root(&self, pos0: u64) -> bool { + self.bitmap.contains(1 + pos0 as u32) } /// Iterator over the entries in the prune list (pruned roots). @@ -376,7 +365,10 @@ impl PruneList { /// Iterator over the pruned "bintree range" for each pruned root. pub fn pruned_bintree_range_iter(&self) -> impl Iterator> + '_ { - self.iter().map(|x| pmmr::bintree_range(x)) + self.iter().map(|x| { + let rng = pmmr::bintree_range(x - 1); + (1 + rng.start)..(1 + rng.end) + }) } /// Iterator over all pos that are *not* pruned based on current prune_list. @@ -389,7 +381,8 @@ impl PruneList { /// Note this is not necessarily the same as the "leaf_set" as an output /// can be spent but not yet pruned. pub fn unpruned_leaf_iter(&self, cutoff_pos: u64) -> impl Iterator + '_ { - self.unpruned_iter(cutoff_pos).filter(|x| pmmr::is_leaf(*x)) + self.unpruned_iter(cutoff_pos) + .filter(|x| pmmr::is_leaf(*x - 1)) } /// Return a clone of our internal bitmap. diff --git a/store/src/types.rs b/store/src/types.rs index fa68379c5..3f317d011 100644 --- a/store/src/types.rs +++ b/store/src/types.rs @@ -309,7 +309,7 @@ where let offset = if next_pos == 0 { 0 } else { - let prev_entry = size_file.read_as_elmt(next_pos.saturating_sub(1))?; + let prev_entry = size_file.read_as_elmt(next_pos - 1)?; prev_entry.offset + prev_entry.size as u64 }; size_file.append_elmt(&SizeEntry { @@ -374,8 +374,7 @@ where if self.buffer_start_pos == 0 { file.set_len(0)?; } else { - let (offset, size) = - self.offset_and_size(self.buffer_start_pos.saturating_sub(1))?; + let (offset, size) = self.offset_and_size(self.buffer_start_pos - 1)?; file.set_len(offset + size as u64)?; }; } diff --git a/store/tests/pmmr.rs b/store/tests/pmmr.rs index 08381c63f..411d3bb2a 100644 --- a/store/tests/pmmr.rs +++ b/store/tests/pmmr.rs @@ -46,7 +46,7 @@ fn pmmr_leaf_idx_iter() { // The first 5 leaves [0,1,2,3,4] are at pos [1,2,4,5,8] in the MMR. assert_eq!(leaf_idx, vec![0, 1, 2, 3, 4]); - assert_eq!(leaf_pos, vec![1, 2, 4, 5, 8]); + assert_eq!(leaf_pos, vec![0, 1, 3, 4, 7]); } } teardown(data_dir); @@ -73,12 +73,12 @@ fn pmmr_append() { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(pmmr.n_unpruned_leaves(), 4); - assert_eq!(pmmr.get_data(1), Some(elems[0])); - assert_eq!(pmmr.get_data(2), Some(elems[1])); + assert_eq!(pmmr.get_data(0), Some(elems[0])); + assert_eq!(pmmr.get_data(1), Some(elems[1])); - assert_eq!(pmmr.get_hash(1), Some(pos_0)); - assert_eq!(pmmr.get_hash(2), Some(pos_1)); - assert_eq!(pmmr.get_hash(3), Some(pos_2)); + assert_eq!(pmmr.get_hash(0), Some(pos_0)); + assert_eq!(pmmr.get_hash(1), Some(pos_1)); + assert_eq!(pmmr.get_hash(2), Some(pos_2)); } // adding the rest and sync again @@ -111,22 +111,22 @@ fn pmmr_append() { assert_eq!(pmmr.n_unpruned_leaves(), 9); // First pair of leaves. - assert_eq!(pmmr.get_data(1), Some(elems[0])); - assert_eq!(pmmr.get_data(2), Some(elems[1])); + assert_eq!(pmmr.get_data(0), Some(elems[0])); + assert_eq!(pmmr.get_data(1), Some(elems[1])); // Second pair of leaves. - assert_eq!(pmmr.get_data(4), Some(elems[2])); - assert_eq!(pmmr.get_data(5), Some(elems[3])); + assert_eq!(pmmr.get_data(3), Some(elems[2])); + assert_eq!(pmmr.get_data(4), Some(elems[3])); // Third pair of leaves. - assert_eq!(pmmr.get_data(8), Some(elems[4])); - assert_eq!(pmmr.get_data(9), Some(elems[5])); - assert_eq!(pmmr.get_hash(10), Some(pos_9)); + assert_eq!(pmmr.get_data(7), Some(elems[4])); + assert_eq!(pmmr.get_data(8), Some(elems[5])); + assert_eq!(pmmr.get_hash(9), Some(pos_9)); } // check the resulting backend store and the computation of the root let node_hash = elems[0].hash_with_index(0); - assert_eq!(backend.get_hash(1).unwrap(), node_hash); + assert_eq!(backend.get_hash(0).unwrap(), node_hash); { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); @@ -154,22 +154,22 @@ fn pmmr_compact_leaf_sibling() { // pos 1 and 2 are leaves (and siblings) // the parent is pos 3 - let (pos_1_hash, pos_2_hash, pos_3_hash) = { + let (pos_0_hash, pos_1_hash, pos_2_hash) = { let pmmr = PMMR::at(&mut backend, mmr_size); ( + pmmr.get_hash(0).unwrap(), pmmr.get_hash(1).unwrap(), pmmr.get_hash(2).unwrap(), - pmmr.get_hash(3).unwrap(), ) }; // prune pos 1 { let mut pmmr = PMMR::at(&mut backend, mmr_size); - pmmr.prune(1).unwrap(); + pmmr.prune(0).unwrap(); // prune pos 8 as well to push the remove list past the cutoff - pmmr.prune(8).unwrap(); + pmmr.prune(7).unwrap(); } backend.sync().unwrap(); @@ -179,36 +179,36 @@ fn pmmr_compact_leaf_sibling() { assert_eq!(pmmr.n_unpruned_leaves(), 17); - // check that pos 1 is "removed" - assert_eq!(pmmr.get_hash(1), None); + // check that pos 0 is "removed" + assert_eq!(pmmr.get_hash(0), None); - // check that pos 2 and 3 are unchanged + // check that pos 1 and 2 are unchanged + assert_eq!(pmmr.get_hash(1).unwrap(), pos_1_hash); assert_eq!(pmmr.get_hash(2).unwrap(), pos_2_hash); - assert_eq!(pmmr.get_hash(3).unwrap(), pos_3_hash); } - // check we can still retrieve the "removed" element at pos 1 + // check we can still retrieve the "removed" element at pos 0 // from the backend hash file. - assert_eq!(backend.get_from_file(1).unwrap(), pos_1_hash); + assert_eq!(backend.get_from_file(0).unwrap(), pos_0_hash); // aggressively compact the PMMR files backend.check_compact(1, &Bitmap::create()).unwrap(); - // check pos 1, 2, 3 are in the state we expect after compacting + // check pos 0, 1, 2 are in the state we expect after compacting { let pmmr = PMMR::at(&mut backend, mmr_size); - // check that pos 1 is "removed" - assert_eq!(pmmr.get_hash(1), None); + // check that pos 0 is "removed" + assert_eq!(pmmr.get_hash(0), None); - // check that pos 2 and 3 are unchanged + // check that pos 1 and 2 are unchanged + assert_eq!(pmmr.get_hash(1).unwrap(), pos_1_hash); assert_eq!(pmmr.get_hash(2).unwrap(), pos_2_hash); - assert_eq!(pmmr.get_hash(3).unwrap(), pos_3_hash); } // Check we can still retrieve the "removed" hash at pos 1 from the hash file. // It should still be available even after pruning and compacting. - assert_eq!(backend.get_from_file(1).unwrap(), pos_1_hash); + assert_eq!(backend.get_from_file(0).unwrap(), pos_0_hash); } teardown(data_dir); @@ -235,9 +235,9 @@ fn pmmr_prune_compact() { // pruning some choice nodes { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(1).unwrap(); + pmmr.prune(0).unwrap(); + pmmr.prune(3).unwrap(); pmmr.prune(4).unwrap(); - pmmr.prune(5).unwrap(); } backend.sync().unwrap(); @@ -246,9 +246,9 @@ fn pmmr_prune_compact() { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); // check we can still retrieve same element from leaf index 2 - assert_eq!(pmmr.get_data(2).unwrap(), TestElem(2)); + assert_eq!(pmmr.get_data(1).unwrap(), TestElem(2)); // and the same for leaf index 7 - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } // compact @@ -258,8 +258,8 @@ fn pmmr_prune_compact() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(2).unwrap(), TestElem(2)); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(1).unwrap(), TestElem(2)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } } @@ -279,9 +279,9 @@ fn pmmr_reload() { let mmr_size = load(0, &elems[..], &mut backend); // retrieve entries from the hash file for comparison later + let pos_2_hash = backend.get_hash(2).unwrap(); let pos_3_hash = backend.get_hash(3).unwrap(); let pos_4_hash = backend.get_hash(4).unwrap(); - let pos_5_hash = backend.get_hash(5).unwrap(); // save the root let root = { @@ -296,7 +296,7 @@ fn pmmr_reload() { // prune a node so we have prune data { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(1).unwrap(); + pmmr.prune(0).unwrap(); } backend.sync().unwrap(); assert_eq!(backend.unpruned_size(), mmr_size); @@ -310,8 +310,8 @@ fn pmmr_reload() { // prune another node to force compact to actually do something { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(4).unwrap(); - pmmr.prune(2).unwrap(); + pmmr.prune(3).unwrap(); + pmmr.prune(1).unwrap(); } backend.sync().unwrap(); assert_eq!(backend.unpruned_size(), mmr_size); @@ -324,7 +324,7 @@ fn pmmr_reload() { // prune some more to get rm log data { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(5).unwrap(); + pmmr.prune(4).unwrap(); } backend.sync().unwrap(); assert_eq!(backend.unpruned_size(), mmr_size); @@ -342,29 +342,29 @@ fn pmmr_reload() { assert_eq!(root, pmmr.root().unwrap()); } - // pos 1 and pos 2 are both removed (via parent pos 3 in prune list) + // pos 0 and pos 1 are both removed (via parent pos 2 in prune list) + assert_eq!(backend.get_hash(0), None); assert_eq!(backend.get_hash(1), None); - assert_eq!(backend.get_hash(2), None); - // pos 3 is "removed" but we keep the hash around for root of pruned subtree - assert_eq!(backend.get_hash(3), Some(pos_3_hash)); + // pos 2 is "removed" but we keep the hash around for root of pruned subtree + assert_eq!(backend.get_hash(2), Some(pos_2_hash)); - // pos 4 is removed (via prune list) + // pos 3 is removed (via prune list) + assert_eq!(backend.get_hash(3), None); + // pos 4 is removed (via leaf_set) assert_eq!(backend.get_hash(4), None); - // pos 5 is removed (via leaf_set) - assert_eq!(backend.get_hash(5), None); // now check contents of the hash file - // pos 1 and pos 2 are no longer in the hash file + // pos 0 and pos 1 are no longer in the hash file + assert_eq!(backend.get_from_file(0), None); assert_eq!(backend.get_from_file(1), None); - assert_eq!(backend.get_from_file(2), None); - // pos 3 is still in there + // pos 2 is still in there + assert_eq!(backend.get_from_file(2), Some(pos_2_hash)); + + // pos 3 and pos 4 are also still in there assert_eq!(backend.get_from_file(3), Some(pos_3_hash)); - - // pos 4 and pos 5 are also still in there assert_eq!(backend.get_from_file(4), Some(pos_4_hash)); - assert_eq!(backend.get_from_file(5), Some(pos_5_hash)); } } @@ -406,10 +406,10 @@ fn pmmr_rewind() { // prune the first 4 elements (leaves at pos 1, 2, 4, 5) { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(0).unwrap(); pmmr.prune(1).unwrap(); - pmmr.prune(2).unwrap(); + pmmr.prune(3).unwrap(); pmmr.prune(4).unwrap(); - pmmr.prune(5).unwrap(); } backend.sync().unwrap(); @@ -436,26 +436,26 @@ fn pmmr_rewind() { } // Also check the data file looks correct. - // pos 1, 2, 4, 5 are all leaves but these have been pruned. - for pos in vec![1, 2, 4, 5] { + // pos 0, 1, 3, 4 are all leaves but these have been pruned. + for pos in vec![0, 1, 3, 4] { assert_eq!(backend.get_data(pos), None); } - // pos 3, 6, 7 are non-leaves so we have no data for these - for pos in vec![3, 6, 7] { + // pos 2, 5, 6 are non-leaves so we have no data for these + for pos in vec![2, 5, 6] { assert_eq!(backend.get_data(pos), None); } - // pos 8 and 9 are both leaves and should be unaffected by prior pruning + // pos 7 and 8 are both leaves and should be unaffected by prior pruning - assert_eq!(backend.get_data(8), Some(elems[4])); - assert_eq!(backend.get_hash(8), Some(elems[4].hash_with_index(7))); + assert_eq!(backend.get_data(7), Some(elems[4])); + assert_eq!(backend.get_hash(7), Some(elems[4].hash_with_index(7))); - assert_eq!(backend.get_data(9), Some(elems[5])); - assert_eq!(backend.get_hash(9), Some(elems[5].hash_with_index(8))); + assert_eq!(backend.get_data(8), Some(elems[5])); + assert_eq!(backend.get_hash(8), Some(elems[5].hash_with_index(8))); // TODO - Why is this 2 here? println!("***** backend size here: {}", backend.data_size()); - // assert_eq!(backend.data_size(), 2); + assert_eq!(backend.data_size(), 2); { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, 10); @@ -470,9 +470,9 @@ fn pmmr_rewind() { } // also check the data file looks correct - // everything up to and including pos 7 should be pruned from the data file - // but we have rewound to pos 5 so everything after that should be None - for pos in 1..17 { + // everything up to and including pos 6 should be pruned from the data file + // but we have rewound to pos 4 so everything after that should be None + for pos in 0..16 { assert_eq!(backend.get_data(pos), None); } @@ -500,8 +500,8 @@ fn pmmr_compact_single_leaves() { { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(1).unwrap(); - pmmr.prune(4).unwrap(); + pmmr.prune(0).unwrap(); + pmmr.prune(3).unwrap(); } backend.sync().unwrap(); @@ -511,8 +511,8 @@ fn pmmr_compact_single_leaves() { { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(2).unwrap(); - pmmr.prune(5).unwrap(); + pmmr.prune(1).unwrap(); + pmmr.prune(4).unwrap(); } backend.sync().unwrap(); @@ -534,18 +534,18 @@ fn pmmr_compact_entire_peak() { let mmr_size = load(0, &elems[0..5], &mut backend); backend.sync().unwrap(); - let pos_7_hash = backend.get_hash(7).unwrap(); + let pos_6_hash = backend.get_hash(6).unwrap(); - let pos_8 = backend.get_data(8).unwrap(); - let pos_8_hash = backend.get_hash(8).unwrap(); + let pos_7 = backend.get_data(7).unwrap(); + let pos_7_hash = backend.get_hash(7).unwrap(); // prune all leaves under the peak at pos 7 { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(0).unwrap(); pmmr.prune(1).unwrap(); - pmmr.prune(2).unwrap(); + pmmr.prune(3).unwrap(); pmmr.prune(4).unwrap(); - pmmr.prune(5).unwrap(); } backend.sync().unwrap(); @@ -555,13 +555,13 @@ fn pmmr_compact_entire_peak() { // now check we have pruned up to and including the peak at pos 7 // hash still available in underlying hash file - assert_eq!(backend.get_hash(7), Some(pos_7_hash)); - assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); + assert_eq!(backend.get_hash(6), Some(pos_6_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); // now check we still have subsequent hash and data where we expect - assert_eq!(backend.get_data(8), Some(pos_8)); - assert_eq!(backend.get_hash(8), Some(pos_8_hash)); - assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); + assert_eq!(backend.get_data(7), Some(pos_7)); + assert_eq!(backend.get_hash(7), Some(pos_7_hash)); + assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); } teardown(data_dir); @@ -571,17 +571,17 @@ fn pmmr_compact_entire_peak() { fn pmmr_compact_horizon() { let (data_dir, elems) = setup("compact_horizon"); { + let pos_0_hash; let pos_1_hash; let pos_2_hash; - let pos_3_hash; + let pos_5_hash; let pos_6_hash; + + let pos_7; let pos_7_hash; - let pos_8; - let pos_8_hash; - - let pos_11; - let pos_11_hash; + let pos_10; + let pos_10_hash; let mmr_size; { @@ -596,46 +596,46 @@ fn pmmr_compact_horizon() { assert_eq!(backend.data_size(), 19); assert_eq!(backend.hash_size(), 35); + pos_0_hash = backend.get_hash(0).unwrap(); pos_1_hash = backend.get_hash(1).unwrap(); pos_2_hash = backend.get_hash(2).unwrap(); - pos_3_hash = backend.get_hash(3).unwrap(); + pos_5_hash = backend.get_hash(5).unwrap(); pos_6_hash = backend.get_hash(6).unwrap(); + + pos_7 = backend.get_data(7).unwrap(); pos_7_hash = backend.get_hash(7).unwrap(); - pos_8 = backend.get_data(8).unwrap(); - pos_8_hash = backend.get_hash(8).unwrap(); - - pos_11 = backend.get_data(11).unwrap(); - pos_11_hash = backend.get_hash(11).unwrap(); + pos_10 = backend.get_data(10).unwrap(); + pos_10_hash = backend.get_hash(10).unwrap(); // pruning some choice nodes { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(3).unwrap(); pmmr.prune(4).unwrap(); - pmmr.prune(5).unwrap(); + pmmr.prune(0).unwrap(); pmmr.prune(1).unwrap(); - pmmr.prune(2).unwrap(); } backend.sync().unwrap(); // check we can read hashes and data correctly after pruning { // assert_eq!(backend.get_hash(3), None); - assert_eq!(backend.get_from_file(3), Some(pos_3_hash)); + assert_eq!(backend.get_from_file(2), Some(pos_2_hash)); // assert_eq!(backend.get_hash(6), None); - assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); + assert_eq!(backend.get_from_file(5), Some(pos_5_hash)); // assert_eq!(backend.get_hash(7), None); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); + + assert_eq!(backend.get_hash(7), Some(pos_7_hash)); + assert_eq!(backend.get_data(7), Some(pos_7)); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - assert_eq!(backend.get_hash(8), Some(pos_8_hash)); - assert_eq!(backend.get_data(8), Some(pos_8)); - assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); - - assert_eq!(backend.get_hash(11), Some(pos_11_hash)); - assert_eq!(backend.get_data(11), Some(pos_11)); - assert_eq!(backend.get_from_file(11), Some(pos_11_hash)); + assert_eq!(backend.get_hash(10), Some(pos_10_hash)); + assert_eq!(backend.get_data(10), Some(pos_10)); + assert_eq!(backend.get_from_file(10), Some(pos_10_hash)); } // compact @@ -644,22 +644,22 @@ fn pmmr_compact_horizon() { // check we can read a hash by pos correctly after compaction { + assert_eq!(backend.get_hash(0), None); + assert_eq!(backend.get_from_file(0), Some(pos_0_hash)); + assert_eq!(backend.get_hash(1), None); assert_eq!(backend.get_from_file(1), Some(pos_1_hash)); - assert_eq!(backend.get_hash(2), None); - assert_eq!(backend.get_from_file(2), Some(pos_2_hash)); - - assert_eq!(backend.get_hash(3), Some(pos_3_hash)); + assert_eq!(backend.get_hash(2), Some(pos_2_hash)); + assert_eq!(backend.get_hash(3), None); assert_eq!(backend.get_hash(4), None); - assert_eq!(backend.get_hash(5), None); - assert_eq!(backend.get_hash(6), Some(pos_6_hash)); + assert_eq!(backend.get_hash(5), Some(pos_5_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); + + assert_eq!(backend.get_hash(7), Some(pos_7_hash)); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - - assert_eq!(backend.get_hash(8), Some(pos_8_hash)); - assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); } } @@ -678,11 +678,11 @@ fn pmmr_compact_horizon() { assert_eq!(backend.hash_size(), 35); // check we can read a hash by pos correctly from recreated backend + assert_eq!(backend.get_hash(6), Some(pos_6_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); + assert_eq!(backend.get_hash(7), Some(pos_7_hash)); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - - assert_eq!(backend.get_hash(8), Some(pos_8_hash)); - assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); } { @@ -697,8 +697,8 @@ fn pmmr_compact_horizon() { { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(7).unwrap(); pmmr.prune(8).unwrap(); - pmmr.prune(9).unwrap(); } // compact some more @@ -723,13 +723,13 @@ fn pmmr_compact_horizon() { // check we can read a hash by pos correctly from recreated backend // get_hash() and get_from_file() should return the same value - // and we only store leaves in the leaf_set so pos 7 still has a hash in there - assert_eq!(backend.get_hash(7), Some(pos_7_hash)); - assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); + // and we only store leaves in the leaf_set so pos 6 still has a hash in there + assert_eq!(backend.get_hash(6), Some(pos_6_hash)); + assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); - assert_eq!(backend.get_hash(11), Some(pos_11_hash)); - assert_eq!(backend.get_data(11), Some(pos_11)); - assert_eq!(backend.get_from_file(11), Some(pos_11_hash)); + assert_eq!(backend.get_hash(10), Some(pos_10_hash)); + assert_eq!(backend.get_data(10), Some(pos_10)); + assert_eq!(backend.get_from_file(10), Some(pos_10_hash)); } } @@ -758,9 +758,9 @@ fn compact_twice() { // pruning some choice nodes { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); + pmmr.prune(0).unwrap(); pmmr.prune(1).unwrap(); - pmmr.prune(2).unwrap(); - pmmr.prune(4).unwrap(); + pmmr.prune(3).unwrap(); } backend.sync().unwrap(); @@ -768,8 +768,8 @@ fn compact_twice() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(5).unwrap(), TestElem(4)); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(4).unwrap(), TestElem(4)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } // compact @@ -779,16 +779,16 @@ fn compact_twice() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(5).unwrap(), TestElem(4)); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(4).unwrap(), TestElem(4)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } // now prune some more nodes { let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); - pmmr.prune(5).unwrap(); + pmmr.prune(4).unwrap(); + pmmr.prune(7).unwrap(); pmmr.prune(8).unwrap(); - pmmr.prune(9).unwrap(); } backend.sync().unwrap(); @@ -796,7 +796,7 @@ fn compact_twice() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } // compact @@ -806,7 +806,7 @@ fn compact_twice() { { let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root().unwrap()); - assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(10).unwrap(), TestElem(7)); } } diff --git a/store/tests/prune_list.rs b/store/tests/prune_list.rs index 056118f84..a781dfbf4 100644 --- a/store/tests/prune_list.rs +++ b/store/tests/prune_list.rs @@ -15,54 +15,38 @@ use grin_store as store; use crate::store::prune_list::PruneList; -use croaring::Bitmap; - -// Prune list is 1-indexed but we implement this internally with a bitmap that supports a 0 value. -// We need to make sure we safely handle 0 safely. -#[test] -fn test_zero_value() { - // Create a bitmap with a 0 value in it. - let mut bitmap = Bitmap::create(); - bitmap.add(0); - - // Instantiate a prune list from our existing bitmap. - let pl = PruneList::new(None, bitmap); - - // Our prune list should be empty (0 filtered out during creation). - assert!(pl.is_empty()); -} #[test] fn test_is_pruned() { let mut pl = PruneList::empty(); assert_eq!(pl.len(), 0); + assert_eq!(pl.is_pruned(0), false); assert_eq!(pl.is_pruned(1), false); assert_eq!(pl.is_pruned(2), false); - assert_eq!(pl.is_pruned(3), false); - pl.append(2); + pl.append(1); pl.flush().unwrap(); assert_eq!(pl.iter().collect::>(), [2]); - assert_eq!(pl.is_pruned(1), false); - assert_eq!(pl.is_pruned(2), true); + assert_eq!(pl.is_pruned(0), false); + assert_eq!(pl.is_pruned(1), true); + assert_eq!(pl.is_pruned(2), false); assert_eq!(pl.is_pruned(3), false); - assert_eq!(pl.is_pruned(4), false); let mut pl = PruneList::empty(); + pl.append(0); pl.append(1); - pl.append(2); pl.flush().unwrap(); assert_eq!(pl.len(), 1); assert_eq!(pl.iter().collect::>(), [3]); + assert_eq!(pl.is_pruned(0), true); assert_eq!(pl.is_pruned(1), true); assert_eq!(pl.is_pruned(2), true); - assert_eq!(pl.is_pruned(3), true); - assert_eq!(pl.is_pruned(4), false); + assert_eq!(pl.is_pruned(3), false); - pl.append(4); + pl.append(3); // Flushing the prune_list removes any individual leaf positions. // This assumes we will track these outside the prune_list via the leaf_set. @@ -70,11 +54,11 @@ fn test_is_pruned() { assert_eq!(pl.len(), 2); assert_eq!(pl.to_vec(), [3, 4]); + assert_eq!(pl.is_pruned(0), true); assert_eq!(pl.is_pruned(1), true); assert_eq!(pl.is_pruned(2), true); assert_eq!(pl.is_pruned(3), true); - assert_eq!(pl.is_pruned(4), true); - assert_eq!(pl.is_pruned(5), false); + assert_eq!(pl.is_pruned(4), false); } #[test] @@ -83,147 +67,147 @@ fn test_get_leaf_shift() { // start with an empty prune list (nothing shifted) assert_eq!(pl.len(), 0); + assert_eq!(pl.get_leaf_shift(4), 0); assert_eq!(pl.get_leaf_shift(1), 0); assert_eq!(pl.get_leaf_shift(2), 0); assert_eq!(pl.get_leaf_shift(3), 0); - assert_eq!(pl.get_leaf_shift(4), 0); // now add a single leaf pos to the prune list // leaves will not shift shift anything // we only start shifting after pruning a parent - pl.append(1); + pl.append(0); pl.flush().unwrap(); assert_eq!(pl.iter().collect::>(), [1]); + assert_eq!(pl.get_leaf_shift(0), 0); assert_eq!(pl.get_leaf_shift(1), 0); assert_eq!(pl.get_leaf_shift(2), 0); assert_eq!(pl.get_leaf_shift(3), 0); - assert_eq!(pl.get_leaf_shift(4), 0); - // now add the sibling leaf pos (pos 2) which will prune the parent - // at pos 3 this in turn will "leaf shift" the leaf at pos 3 by 2 - pl.append(2); + // now add the sibling leaf pos (pos 1) which will prune the parent + // at pos 2 this in turn will "leaf shift" the leaf at pos 2 by 2 + pl.append(1); pl.flush().unwrap(); assert_eq!(pl.len(), 1); + assert_eq!(pl.get_leaf_shift(0), 0); assert_eq!(pl.get_leaf_shift(1), 0); - assert_eq!(pl.get_leaf_shift(2), 0); + assert_eq!(pl.get_leaf_shift(2), 2); assert_eq!(pl.get_leaf_shift(3), 2); assert_eq!(pl.get_leaf_shift(4), 2); - assert_eq!(pl.get_leaf_shift(5), 2); - // now prune an additional leaf at pos 4 + // now prune an additional leaf at pos 3 // leaf offset of subsequent pos will be 2 // 00100120 - pl.append(4); + pl.append(3); pl.flush().unwrap(); assert_eq!(pl.len(), 2); assert_eq!(pl.iter().collect::>(), [3, 4]); + assert_eq!(pl.get_leaf_shift(0), 0); assert_eq!(pl.get_leaf_shift(1), 0); - assert_eq!(pl.get_leaf_shift(2), 0); + assert_eq!(pl.get_leaf_shift(2), 2); assert_eq!(pl.get_leaf_shift(3), 2); assert_eq!(pl.get_leaf_shift(4), 2); assert_eq!(pl.get_leaf_shift(5), 2); assert_eq!(pl.get_leaf_shift(6), 2); assert_eq!(pl.get_leaf_shift(7), 2); - assert_eq!(pl.get_leaf_shift(8), 2); - // now prune the sibling at pos 5 - // the two smaller subtrees (pos 3 and pos 6) are rolled up to larger subtree - // (pos 7) the leaf offset is now 4 to cover entire subtree containing first + // now prune the sibling at pos 4 + // the two smaller subtrees (pos 2 and pos 5) are rolled up to larger subtree + // (pos 6) the leaf offset is now 4 to cover entire subtree containing first // 4 leaves 00100120 - pl.append(5); + pl.append(4); pl.flush().unwrap(); assert_eq!(pl.len(), 1); assert_eq!(pl.iter().collect::>(), [7]); + assert_eq!(pl.get_leaf_shift(0), 0); assert_eq!(pl.get_leaf_shift(1), 0); assert_eq!(pl.get_leaf_shift(2), 0); assert_eq!(pl.get_leaf_shift(3), 0); assert_eq!(pl.get_leaf_shift(4), 0); assert_eq!(pl.get_leaf_shift(5), 0); - assert_eq!(pl.get_leaf_shift(6), 0); + assert_eq!(pl.get_leaf_shift(6), 4); assert_eq!(pl.get_leaf_shift(7), 4); assert_eq!(pl.get_leaf_shift(8), 4); - assert_eq!(pl.get_leaf_shift(9), 4); // now check we can prune some unconnected nodes // and that leaf_shift is correct for various pos let mut pl = PruneList::empty(); + pl.append(3); pl.append(4); - pl.append(5); + pl.append(10); pl.append(11); - pl.append(12); pl.flush().unwrap(); assert_eq!(pl.len(), 2); assert_eq!(pl.iter().collect::>(), [6, 13]); - assert_eq!(pl.get_leaf_shift(2), 0); - assert_eq!(pl.get_leaf_shift(4), 0); + assert_eq!(pl.get_leaf_shift(1), 0); + assert_eq!(pl.get_leaf_shift(3), 0); + assert_eq!(pl.get_leaf_shift(7), 2); assert_eq!(pl.get_leaf_shift(8), 2); - assert_eq!(pl.get_leaf_shift(9), 2); + assert_eq!(pl.get_leaf_shift(12), 4); assert_eq!(pl.get_leaf_shift(13), 4); - assert_eq!(pl.get_leaf_shift(14), 4); } #[test] fn test_get_shift() { let mut pl = PruneList::empty(); assert!(pl.is_empty()); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); assert_eq!(pl.get_shift(2), 0); - assert_eq!(pl.get_shift(3), 0); // prune a single leaf node // pruning only a leaf node does not shift any subsequent pos // we will only start shifting when a parent can be pruned - pl.append(1); + pl.append(0); pl.flush().unwrap(); assert_eq!(pl.iter().collect::>(), [1]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); assert_eq!(pl.get_shift(2), 0); - assert_eq!(pl.get_shift(3), 0); - pl.append(2); + pl.append(1); pl.flush().unwrap(); assert_eq!(pl.iter().collect::>(), [3]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); - assert_eq!(pl.get_shift(2), 0); + assert_eq!(pl.get_shift(2), 2); + assert_eq!(pl.get_shift(3), 2); + assert_eq!(pl.get_shift(4), 2); + assert_eq!(pl.get_shift(5), 2); + + pl.append(3); + pl.flush().unwrap(); + + assert_eq!(pl.iter().collect::>(), [3, 4]); + assert_eq!(pl.get_shift(0), 0); + assert_eq!(pl.get_shift(1), 0); + assert_eq!(pl.get_shift(2), 2); assert_eq!(pl.get_shift(3), 2); assert_eq!(pl.get_shift(4), 2); assert_eq!(pl.get_shift(5), 2); - assert_eq!(pl.get_shift(6), 2); pl.append(4); pl.flush().unwrap(); - assert_eq!(pl.iter().collect::>(), [3, 4]); - assert_eq!(pl.get_shift(1), 0); - assert_eq!(pl.get_shift(2), 0); - assert_eq!(pl.get_shift(3), 2); - assert_eq!(pl.get_shift(4), 2); - assert_eq!(pl.get_shift(5), 2); - assert_eq!(pl.get_shift(6), 2); - - pl.append(5); - pl.flush().unwrap(); - assert_eq!(pl.iter().collect::>(), [7]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); assert_eq!(pl.get_shift(2), 0); assert_eq!(pl.get_shift(3), 0); assert_eq!(pl.get_shift(4), 0); assert_eq!(pl.get_shift(5), 0); - assert_eq!(pl.get_shift(6), 0); + assert_eq!(pl.get_shift(6), 6); assert_eq!(pl.get_shift(7), 6); assert_eq!(pl.get_shift(8), 6); - assert_eq!(pl.get_shift(9), 6); // prune a bunch more - for x in 6..1000 { + for x in 5..999 { if !pl.is_pruned(x) { pl.append(x); } @@ -231,61 +215,61 @@ fn test_get_shift() { pl.flush().unwrap(); // and check we shift by a large number (hopefully the correct number...) - assert_eq!(pl.get_shift(1010), 996); + assert_eq!(pl.get_shift(1009), 996); // now check we can do some sparse pruning let mut pl = PruneList::empty(); + pl.append(3); pl.append(4); - pl.append(5); + pl.append(7); pl.append(8); - pl.append(9); pl.flush().unwrap(); assert_eq!(pl.iter().collect::>(), [6, 10]); + assert_eq!(pl.get_shift(0), 0); assert_eq!(pl.get_shift(1), 0); assert_eq!(pl.get_shift(2), 0); assert_eq!(pl.get_shift(3), 0); assert_eq!(pl.get_shift(4), 0); - assert_eq!(pl.get_shift(5), 0); + assert_eq!(pl.get_shift(5), 2); assert_eq!(pl.get_shift(6), 2); assert_eq!(pl.get_shift(7), 2); assert_eq!(pl.get_shift(8), 2); - assert_eq!(pl.get_shift(9), 2); + assert_eq!(pl.get_shift(9), 4); assert_eq!(pl.get_shift(10), 4); assert_eq!(pl.get_shift(11), 4); - assert_eq!(pl.get_shift(12), 4); } #[test] pub fn test_iter() { let mut pl = PruneList::empty(); + pl.append(0); pl.append(1); - pl.append(2); - pl.append(4); + pl.append(3); assert_eq!(pl.iter().collect::>(), [3, 4]); let mut pl = PruneList::empty(); + pl.append(0); pl.append(1); - pl.append(2); - pl.append(5); + pl.append(4); assert_eq!(pl.iter().collect::>(), [3, 5]); } #[test] pub fn test_pruned_bintree_range_iter() { let mut pl = PruneList::empty(); + pl.append(0); pl.append(1); - pl.append(2); - pl.append(4); + pl.append(3); assert_eq!( pl.pruned_bintree_range_iter().collect::>(), [1..4, 4..5] ); let mut pl = PruneList::empty(); + pl.append(0); pl.append(1); - pl.append(2); - pl.append(5); + pl.append(4); assert_eq!( pl.pruned_bintree_range_iter().collect::>(), [1..4, 5..6] @@ -298,15 +282,15 @@ pub fn test_unpruned_iter() { assert_eq!(pl.unpruned_iter(5).collect::>(), [1, 2, 3, 4, 5]); let mut pl = PruneList::empty(); - pl.append(2); + pl.append(1); assert_eq!(pl.iter().collect::>(), [2]); assert_eq!(pl.pruned_bintree_range_iter().collect::>(), [2..3]); assert_eq!(pl.unpruned_iter(4).collect::>(), [1, 3, 4]); let mut pl = PruneList::empty(); - pl.append(2); + pl.append(1); + pl.append(3); pl.append(4); - pl.append(5); assert_eq!(pl.iter().collect::>(), [2, 6]); assert_eq!( pl.pruned_bintree_range_iter().collect::>(), @@ -324,15 +308,15 @@ fn test_unpruned_leaf_iter() { ); let mut pl = PruneList::empty(); - pl.append(2); + pl.append(1); assert_eq!(pl.iter().collect::>(), [2]); assert_eq!(pl.pruned_bintree_range_iter().collect::>(), [2..3]); assert_eq!(pl.unpruned_leaf_iter(5).collect::>(), [1, 4, 5]); let mut pl = PruneList::empty(); - pl.append(2); + pl.append(1); + pl.append(3); pl.append(4); - pl.append(5); assert_eq!(pl.iter().collect::>(), [2, 6]); assert_eq!( pl.pruned_bintree_range_iter().collect::>(), @@ -345,48 +329,48 @@ pub fn test_append_pruned_subtree() { let mut pl = PruneList::empty(); // append a pruned leaf pos (shift and leaf shift are unaffected). - pl.append(1); + pl.append(0); assert_eq!(pl.to_vec(), [1]); - assert_eq!(pl.get_shift(2), 0); - assert_eq!(pl.get_leaf_shift(2), 0); + assert_eq!(pl.get_shift(1), 0); + assert_eq!(pl.get_leaf_shift(1), 0); - pl.append(3); + pl.append(2); - // subtree beneath root at 3 is pruned - // pos 4 is shifted by 2 pruned hashes [1, 2] - // pos 4 is shifted by 2 leaves [1, 2] + // subtree beneath root at 2 is pruned + // pos 3 is shifted by 2 pruned hashes [1, 2] + // pos 3 is shifted by 2 leaves [1, 2] assert_eq!(pl.to_vec(), [3]); - assert_eq!(pl.get_shift(4), 2); - assert_eq!(pl.get_leaf_shift(4), 2); + assert_eq!(pl.get_shift(3), 2); + assert_eq!(pl.get_leaf_shift(3), 2); // append another pruned subtree (ancester of previous one) - pl.append(7); + pl.append(6); - // subtree beneath root at 7 is pruned - // pos 8 is shifted by 6 pruned hashes [1, 2, 3, 4, 5, 6] - // pos 4 is shifted by 4 leaves [1, 2, 4, 5] + // subtree beneath root at 6 is pruned + // pos 7 is shifted by 6 pruned hashes [1, 2, 3, 4, 5, 6] + // pos 3 is shifted by 4 leaves [1, 2, 4, 5] assert_eq!(pl.to_vec(), [7]); - assert_eq!(pl.get_shift(8), 6); - assert_eq!(pl.get_leaf_shift(8), 4); + assert_eq!(pl.get_shift(7), 6); + assert_eq!(pl.get_leaf_shift(7), 4); // now append another pruned leaf pos - pl.append(8); + pl.append(7); // additional pruned leaf does not affect the shift or leaf shift - // pos 9 is shifted by 6 pruned hashes [1, 2, 3, 4, 5, 6] - // pos 4 is shifted by 4 leaves [1, 2, 4, 5] + // pos 8 is shifted by 6 pruned hashes [1, 2, 3, 4, 5, 6] + // pos 8 is shifted by 4 leaves [1, 2, 4, 5] assert_eq!(pl.to_vec(), [7, 8]); - assert_eq!(pl.get_shift(9), 6); - assert_eq!(pl.get_leaf_shift(9), 4); + assert_eq!(pl.get_shift(8), 6); + assert_eq!(pl.get_leaf_shift(8), 4); } #[test] fn test_recreate_prune_list() { let mut pl = PruneList::empty(); + pl.append(3); pl.append(4); - pl.append(5); - pl.append(11); + pl.append(10); let pl2 = PruneList::new(None, vec![4, 5, 11].into_iter().collect()); diff --git a/store/tests/segment.rs b/store/tests/segment.rs index 61fef7828..809595d7a 100644 --- a/store/tests/segment.rs +++ b/store/tests/segment.rs @@ -56,7 +56,7 @@ fn prunable_mmr() { let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); assert_eq!( segment.root(last_pos, Some(&bitmap)).unwrap().unwrap(), - mmr.get_hash(30).unwrap() + mmr.get_hash(29).unwrap() ); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -72,7 +72,7 @@ fn prunable_mmr() { let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); assert_eq!( segment.root(last_pos, Some(&bitmap)).unwrap().unwrap(), - mmr.get_hash(30).unwrap() + mmr.get_hash(29).unwrap() ); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -88,7 +88,7 @@ fn prunable_mmr() { let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); assert_eq!( segment.root(last_pos, Some(&bitmap)).unwrap().unwrap(), - mmr.get_hash(30).unwrap() + mmr.get_hash(29).unwrap() ); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -104,7 +104,7 @@ fn prunable_mmr() { let segment = Segment::from_pmmr(id, &mmr, true).unwrap(); assert_eq!( segment.root(last_pos, Some(&bitmap)).unwrap().unwrap(), - mmr.get_hash(30).unwrap() + mmr.get_hash(29).unwrap() ); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -191,7 +191,7 @@ fn pruned_segment() { segment .first_unpruned_parent(last_pos, Some(&bitmap)) .unwrap(), - (ba.get_hash(14).unwrap(), 14) + (ba.get_hash(13).unwrap(), 14) ); assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_none()); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -213,7 +213,7 @@ fn pruned_segment() { segment .first_unpruned_parent(last_pos, Some(&bitmap)) .unwrap(), - (ba.get_hash(15).unwrap(), 15) + (ba.get_hash(14).unwrap(), 15) ); assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_none()); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -266,7 +266,7 @@ fn pruned_segment() { segment .first_unpruned_parent(last_pos, Some(&bitmap)) .unwrap(), - (ba.get_hash(38).unwrap(), 38) + (ba.get_hash(37).unwrap(), 38) ); assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_none()); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -281,7 +281,7 @@ fn pruned_segment() { .first_unpruned_parent(last_pos, Some(&bitmap)) .unwrap() .1, - segment.segment_pos_range(last_pos).1 + 1 + segment.segment_pos_range(last_pos).1 ); assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_some()); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -317,7 +317,7 @@ fn pruned_segment() { .first_unpruned_parent(last_pos, Some(&bitmap)) .unwrap() .1, - segment.segment_pos_range(last_pos).1 + 1 + segment.segment_pos_range(last_pos).1 ); assert!(segment.root(last_pos, Some(&bitmap)).unwrap().is_some()); segment.validate(last_pos, Some(&bitmap), root).unwrap(); @@ -378,8 +378,7 @@ where B: Backend, { for &leaf_idx in leaf_idxs { - mmr.prune(pmmr::insertion_to_pmmr_index(leaf_idx + 1)) - .unwrap(); + mmr.prune(pmmr::insertion_to_pmmr_index(leaf_idx)).unwrap(); bitmap.remove(leaf_idx as u32); } } diff --git a/store/tests/utxo_set_perf.rs b/store/tests/utxo_set_perf.rs index e3e95a46a..a1d89b2f9 100644 --- a/store/tests/utxo_set_perf.rs +++ b/store/tests/utxo_set_perf.rs @@ -39,7 +39,7 @@ fn test_leaf_set_performance() { let now = Instant::now(); for x in 0..1_000 { for y in 0..1_000 { - let pos = (x * 1_000) + y + 1; + let pos = (x * 1_000) + y; leaf_set.add(pos); } leaf_set.flush().unwrap(); @@ -53,7 +53,7 @@ fn test_leaf_set_performance() { // Simulate looking up existence of a large number of pos in the leaf_set. let now = Instant::now(); for x in 0..1_000_000 { - assert!(leaf_set.includes(x + 1)); + assert!(leaf_set.includes(x)); } println!( "Checking 1,000,000 inclusions in leaf_set took {}ms", @@ -65,7 +65,7 @@ fn test_leaf_set_performance() { let now = Instant::now(); for x in 0..1_000 { for y in 0..1_000 { - let pos = (x * 1_000) + y + 1; + let pos = (x * 1_000) + y; leaf_set.remove(pos); } leaf_set.flush().unwrap();