From 7a8d6147f466051fa972e391ffc4ae46502eb311 Mon Sep 17 00:00:00 2001 From: Antioch Peverell <30642645+antiochp@users.noreply.github.com> Date: Fri, 23 Mar 2018 19:33:59 -0400 Subject: [PATCH] Split pmmr.get() into get_hash() and get_data() (#855) --- chain/src/chain.rs | 6 +-- chain/src/txhashset.rs | 27 +++++----- core/src/core/pmmr.rs | 93 +++++++++++++++++++++++------------ store/src/pmmr.rs | 33 +++++++------ store/tests/pmmr.rs | 109 +++++++++++++++++++---------------------- 5 files changed, 148 insertions(+), 120 deletions(-) diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 904b992f8..93f426034 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -627,19 +627,19 @@ impl Chain { } /// returns the last n nodes inserted into the output sum tree - pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, Option)> { + pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> { let mut txhashset = self.txhashset.write().unwrap(); txhashset.last_n_output(distance) } /// as above, for rangeproofs - pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, Option)> { + pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> { let mut txhashset = self.txhashset.write().unwrap(); txhashset.last_n_rangeproof(distance) } /// as above, for kernels - pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, Option)> { + pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> { let mut txhashset = self.txhashset.write().unwrap(); txhashset.last_n_kernel(distance) } diff --git a/chain/src/txhashset.rs b/chain/src/txhashset.rs index ed787e0d3..3cc5621ac 100644 --- a/chain/src/txhashset.rs +++ b/chain/src/txhashset.rs @@ -145,7 +145,7 @@ impl TxHashSet { Ok(pos) => { let output_pmmr: PMMR = PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); - if let Some((hash, _)) = output_pmmr.get(pos, false) { + if let Some(hash) = output_pmmr.get_hash(pos) { if hash == output_id.hash_with_index(pos) { Ok(hash) } else { @@ -163,21 +163,21 @@ impl TxHashSet { /// returns the last N nodes inserted into the tree (i.e. the 'bottom' /// nodes at level 0 /// TODO: These need to return the actual data from the flat-files instead of hashes now - pub fn last_n_output(&mut self, distance: u64) -> Vec<(Hash, Option)> { + pub fn last_n_output(&mut self, distance: u64) -> Vec<(Hash, OutputIdentifier)> { let output_pmmr: PMMR = PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); output_pmmr.get_last_n_insertions(distance) } /// as above, for range proofs - pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<(Hash, Option)> { + pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<(Hash, RangeProof)> { let rproof_pmmr: PMMR = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos); rproof_pmmr.get_last_n_insertions(distance) } /// as above, for kernels - pub fn last_n_kernel(&mut self, distance: u64) -> Vec<(Hash, Option)> { + pub fn last_n_kernel(&mut self, distance: u64) -> Vec<(Hash, TxKernel)> { let kernel_pmmr: PMMR = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); kernel_pmmr.get_last_n_insertions(distance) @@ -387,11 +387,13 @@ impl<'a> Extension<'a> { let pos_res = self.get_output_pos(&commit); if let Ok(pos) = pos_res { let output_id_hash = OutputIdentifier::from_input(input).hash_with_index(pos); - if let Some((read_hash, read_elem)) = self.output_pmmr.get(pos, true) { + if let Some(read_hash) = self.output_pmmr.get_hash(pos) { // check hash from pmmr matches hash from input (or corresponding output) // if not then the input is not being honest about // what it is attempting to spend... + let read_elem = self.output_pmmr.get_data(pos); + if output_id_hash != read_hash || output_id_hash != read_elem @@ -435,7 +437,7 @@ impl<'a> Extension<'a> { // (non-historical node will have a much smaller one) // note that this doesn't show the commitment *never* existed, just // that this is not an existing unspent commitment right now - if let Some((hash, _)) = self.output_pmmr.get(pos, false) { + if let Some(hash) = self.output_pmmr.get_hash(pos) { // processing a new fork so we may get a position on the old // fork that exists but matches a different node // filtering that case out @@ -635,9 +637,8 @@ impl<'a> Extension<'a> { for n in 1..self.output_pmmr.unpruned_size() + 1 { // non-pruned leaves only if pmmr::bintree_postorder_height(n) == 0 { - if let Some((_, out)) = self.output_pmmr.get(n, true) { - self.commit_index - .save_output_pos(&out.expect("not a leaf node").commit, n)?; + if let Some(out) = self.output_pmmr.get_data(n) { + self.commit_index.save_output_pos(&out.commit, n)?; } } } @@ -708,7 +709,7 @@ impl<'a> Extension<'a> { for n in 1..self.kernel_pmmr.unpruned_size() + 1 { if pmmr::is_leaf(n) { - if let Some((_, Some(kernel))) = self.kernel_pmmr.get(n, true) { + if let Some(kernel) = self.kernel_pmmr.get_data(n) { kernel.verify()?; commitments.push(kernel.excess.clone()); } @@ -736,8 +737,8 @@ impl<'a> Extension<'a> { let mut proof_count = 0; for n in 1..self.output_pmmr.unpruned_size() + 1 { if pmmr::is_leaf(n) { - if let Some((_, Some(out))) = self.output_pmmr.get(n, true) { - if let Some((_, Some(rp))) = self.rproof_pmmr.get(n, true) { + if let Some(out) = self.output_pmmr.get_data(n) { + if let Some(rp) = self.rproof_pmmr.get_data(n) { out.to_output(rp).verify_proof()?; } else { // TODO - rangeproof not found @@ -764,7 +765,7 @@ impl<'a> Extension<'a> { let mut commitments = vec![]; for n in 1..self.output_pmmr.unpruned_size() + 1 { if pmmr::is_leaf(n) { - if let Some((_, Some(out))) = self.output_pmmr.get(n, true) { + if let Some(out) = self.output_pmmr.get_data(n) { commitments.push(out.commit.clone()); } } diff --git a/core/src/core/pmmr.rs b/core/src/core/pmmr.rs index e68feb41c..35e5eba26 100644 --- a/core/src/core/pmmr.rs +++ b/core/src/core/pmmr.rs @@ -63,16 +63,18 @@ where /// occurred (see remove). fn rewind(&mut self, position: u64, index: u32) -> Result<(), String>; - /// Get a Hash by insertion position. If include_data is true, will - /// also return the associated data element - fn get(&self, position: u64, include_data: bool) -> Option<(Hash, Option)>; + /// Get a Hash by insertion position. + fn get_hash(&self, position: u64) -> Option; - /// Get a Hash by original insertion position (ignoring the remove - /// list). + /// Get underlying data by insertion position. + fn get_data(&self, position: u64) -> Option; + + /// Get a Hash by original insertion position + /// (ignoring the remove log). fn get_from_file(&self, position: u64) -> Option; - /// Get a Data Element by original insertion position (ignoring the remove - /// list). + /// Get a Data Element by original insertion position + /// (ignoring the remove log). fn get_data_from_file(&self, position: u64) -> Option; /// Remove HashSums by insertion position. An index is also provided so the @@ -327,9 +329,8 @@ where let root = self.root(); - let node = self.get(pos, false) - .ok_or(format!("no element at pos {}", pos))? - .0; + let node = self.get_hash(pos) + .ok_or(format!("no element at pos {}", pos))?; let family_branch = family_branch(pos, self.last_pos); @@ -413,7 +414,7 @@ where /// to keep an index of elements to positions in the tree. Prunes parent /// nodes as well when they become childless. pub fn prune(&mut self, position: u64, index: u32) -> Result { - if let None = self.backend.get(position, false) { + if let None = self.backend.get_hash(position) { return Ok(false); } let prunable_height = bintree_postorder_height(position); @@ -439,7 +440,7 @@ where // if we have a pruned sibling, we can continue up the tree // otherwise we're done - if let None = self.backend.get(sibling, false) { + if let None = self.backend.get_hash(sibling) { current = parent; } else { break; @@ -450,34 +451,47 @@ where Ok(true) } - /// Helper function to get a node at a given position from - /// the backend. - pub fn get(&self, position: u64, include_data: bool) -> Option<(Hash, Option)> { - if position > self.last_pos { + /// Get a hash at provided position in the MMR. + pub fn get_hash(&self, pos: u64) -> Option { + if pos > self.last_pos { None } else { - self.backend.get(position, include_data) + self.backend.get_hash(pos) } } - fn get_from_file(&self, position: u64) -> Option { - if position > self.last_pos { + /// Get the data element at provided in the MMR. + pub fn get_data(&self, pos: u64) -> Option { + if pos > self.last_pos { None } else { - self.backend.get_from_file(position) + self.backend.get_data(pos) + } + } + + /// Get the hash from the underlying MMR file + /// (ignores the remove log). + fn get_from_file(&self, pos: u64) -> Option { + if pos > self.last_pos { + None + } else { + self.backend.get_from_file(pos) } } /// Helper function to get the last N nodes inserted, i.e. the last /// n nodes along the bottom of the tree - pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, Option)> { - let mut return_vec = Vec::new(); + pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, T)> { + let mut return_vec = vec![]; let mut last_leaf = self.last_pos; let size = self.unpruned_size(); // Special case that causes issues in bintree functions, // just return if size == 1 { - return_vec.push(self.backend.get(last_leaf, true).unwrap()); + return_vec.push(( + self.backend.get_hash(last_leaf).unwrap(), + self.backend.get_data(last_leaf).unwrap(), + )); return return_vec; } // if size is even, we're already at the bottom, otherwise @@ -492,7 +506,10 @@ where if bintree_postorder_height(last_leaf) > 0 { last_leaf = bintree_rightmost(last_leaf); } - return_vec.push(self.backend.get(last_leaf, true).unwrap()); + return_vec.push(( + self.backend.get_hash(last_leaf).unwrap(), + self.backend.get_data(last_leaf).unwrap(), + )); last_leaf = bintree_jump_left_sibling(last_leaf); } @@ -504,7 +521,7 @@ where // iterate on all parent nodes for n in 1..(self.last_pos + 1) { if bintree_postorder_height(n) > 0 { - if let Some(hs) = self.get(n, false) { + if let Some(hash) = self.get_hash(n) { // take the left and right children, if they exist let left_pos = bintree_move_down_left(n).ok_or(format!("left_pos not found"))?; let right_pos = bintree_jump_right_sibling(left_pos); @@ -514,7 +531,7 @@ where if let Some(right_child_hs) = self.get_from_file(right_pos) { // hash the two child nodes together with parent_pos and compare let (parent_pos, _) = family(left_pos); - if (left_child_hs, right_child_hs).hash_with_index(parent_pos) != hs.0 { + if (left_child_hs, right_child_hs).hash_with_index(parent_pos) != hash { return Err(format!( "Invalid MMR, hash of parent at {} does \ not match children.", @@ -556,9 +573,9 @@ where break; } idx.push_str(&format!("{:>8} ", m + 1)); - let ohs = self.get(m + 1, false); + let ohs = self.get_hash(m + 1); match ohs { - Some(hs) => hashes.push_str(&format!("{} ", hs.0)), + Some(hs) => hashes.push_str(&format!("{} ", hs)), None => hashes.push_str(&format!("{:>8} ", "??")), } } @@ -1015,11 +1032,27 @@ mod test { Ok(()) } - fn get(&self, position: u64, _include_data: bool) -> Option<(Hash, Option)> { + fn get_hash(&self, position: u64) -> Option { if self.remove_list.contains(&position) { None } else { - self.elems[(position - 1) as usize].clone() + if let Some(ref elem) = self.elems[(position - 1) as usize] { + Some(elem.0) + } else { + None + } + } + } + + fn get_data(&self, position: u64) -> Option { + if self.remove_list.contains(&position) { + None + } else { + if let Some(ref elem) = self.elems[(position - 1) as usize] { + elem.1.clone() + } else { + None + } } } diff --git a/store/src/pmmr.rs b/store/src/pmmr.rs index 375984ff6..ebe32d1b6 100644 --- a/store/src/pmmr.rs +++ b/store/src/pmmr.rs @@ -161,26 +161,27 @@ where } } - /// Get a Hash by insertion position - fn get(&self, position: u64, include_data: bool) -> Option<(Hash, Option)> { + /// Get the hash at pos. + /// Return None if it has been removed. + fn get_hash(&self, pos: u64) -> Option<(Hash)> { // Check if this position has been pruned in the remove log... - if self.rm_log.includes(position) { - return None; + if self.rm_log.includes(pos) { + None + } else { + self.get_from_file(pos) } + } - let hash_val = self.get_from_file(position); - if !include_data { - return hash_val.map(|hash| (hash, None)); + /// Get the data at pos. + /// Return None if it has been removed or if pos is not a leaf node. + fn get_data(&self, pos: u64) -> Option<(T)> { + if self.rm_log.includes(pos) { + None + } else if !pmmr::is_leaf(pos) { + None + } else { + self.get_data_from_file(pos) } - - // if this is not a leaf then we have no data - if !pmmr::is_leaf(position) { - return hash_val.map(|hash| (hash, None)); - } - - let data = self.get_data_from_file(position); - - hash_val.map(|x| (x, data)) } fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> { diff --git a/store/tests/pmmr.rs b/store/tests/pmmr.rs index 14c6b234b..32b5c3595 100644 --- a/store/tests/pmmr.rs +++ b/store/tests/pmmr.rs @@ -39,7 +39,7 @@ fn pmmr_append() { // check the resulting backend store and the computation of the root let node_hash = elems[0].hash_with_index(1); - assert_eq!(backend.get(1, false).expect("").0, node_hash); + assert_eq!(backend.get_hash(1).unwrap(), node_hash); // 0010012001001230 @@ -88,9 +88,9 @@ fn pmmr_compact_leaf_sibling() { let (pos_1_hash, pos_2_hash, pos_3_hash) = { let mut pmmr = PMMR::at(&mut backend, mmr_size); ( - pmmr.get(1, false).unwrap().0, - pmmr.get(2, false).unwrap().0, - pmmr.get(3, false).unwrap().0, + pmmr.get_hash(1).unwrap(), + pmmr.get_hash(2).unwrap(), + pmmr.get_hash(3).unwrap(), ) }; @@ -109,11 +109,11 @@ fn pmmr_compact_leaf_sibling() { let pmmr = PMMR::at(&mut backend, mmr_size); // check that pos 1 is "removed" - assert_eq!(pmmr.get(1, false), None); + assert_eq!(pmmr.get_hash(1), None); // check that pos 2 and 3 are unchanged - assert_eq!(pmmr.get(2, false).unwrap().0, pos_2_hash); - assert_eq!(pmmr.get(3, false).unwrap().0, pos_3_hash); + assert_eq!(pmmr.get_hash(2).unwrap(), pos_2_hash); + assert_eq!(pmmr.get_hash(3).unwrap(), pos_3_hash); } // check we can still retrieve the "removed" element at pos 1 @@ -128,11 +128,11 @@ fn pmmr_compact_leaf_sibling() { let pmmr = PMMR::at(&mut backend, mmr_size); // check that pos 1 is "removed" - assert_eq!(pmmr.get(1, false), None); + assert_eq!(pmmr.get_hash(1), None); // check that pos 2 and 3 are unchanged - assert_eq!(pmmr.get(2, false).unwrap().0, pos_2_hash); - assert_eq!(pmmr.get(3, false).unwrap().0, pos_3_hash); + assert_eq!(pmmr.get_hash(2).unwrap(), pos_2_hash); + assert_eq!(pmmr.get_hash(3).unwrap(), pos_3_hash); } // Check we can still retrieve the "removed" hash at pos 1 from the hash file. @@ -171,9 +171,9 @@ fn pmmr_prune_compact() { let pmmr: PMMR = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root()); // check we can still retrieve same element from leaf index 2 - assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem(2)); + assert_eq!(pmmr.get_data(2).unwrap(), TestElem(2)); // and the same for leaf index 7 - assert_eq!(pmmr.get(11, true).unwrap().1.unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); } // compact @@ -183,8 +183,8 @@ fn pmmr_prune_compact() { { let pmmr: PMMR = PMMR::at(&mut backend, mmr_size); assert_eq!(root, pmmr.root()); - assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem(2)); - assert_eq!(pmmr.get(11, true).unwrap().1.unwrap(), TestElem(7)); + assert_eq!(pmmr.get_data(2).unwrap(), TestElem(2)); + assert_eq!(pmmr.get_data(11).unwrap(), TestElem(7)); } teardown(data_dir); @@ -200,9 +200,9 @@ fn pmmr_reload() { let mmr_size = load(0, &elems[..], &mut backend); // retrieve entries from the hash file for comparison later - let (pos_3_hash, _) = backend.get(3, false).unwrap(); - let (pos_4_hash, _) = backend.get(4, false).unwrap(); - let (pos_5_hash, _) = backend.get(5, false).unwrap(); + let pos_3_hash = backend.get_hash(3).unwrap(); + let pos_4_hash = backend.get_hash(4).unwrap(); + let pos_5_hash = backend.get_hash(5).unwrap(); // save the root let root = { @@ -257,16 +257,16 @@ fn pmmr_reload() { } // pos 1 and pos 2 are both removed (via parent pos 3 in prune list) - assert_eq!(backend.get(1, false), None); - assert_eq!(backend.get(2, false), None); + assert_eq!(backend.get_hash(1), None); + assert_eq!(backend.get_hash(2), None); // pos 3 is removed (via prune list) - assert_eq!(backend.get(3, false), None); + assert_eq!(backend.get_hash(3), None); // pos 4 is removed (via prune list) - assert_eq!(backend.get(4, false), None); + assert_eq!(backend.get_hash(4), None); // pos 5 is removed (via rm_log) - assert_eq!(backend.get(5, false), None); + assert_eq!(backend.get_hash(5), None); // now check contents of the hash file // pos 1 and pos 2 are no longer in the hash file @@ -383,13 +383,10 @@ fn pmmr_compact_entire_peak() { let mmr_size = load(0, &elems[0..5], &mut backend); backend.sync().unwrap(); - let pos_7 = backend.get(7, true).unwrap(); - let pos_7_hash = backend.get_from_file(7).unwrap(); - assert_eq!(pos_7.0, pos_7_hash); + let pos_7_hash = backend.get_hash(7).unwrap(); - let pos_8 = backend.get(8, true).unwrap(); - let pos_8_hash = backend.get_from_file(8).unwrap(); - assert_eq!(pos_8.0, pos_8_hash); + let pos_8 = backend.get_data(8).unwrap(); + let pos_8_hash = backend.get_hash(8).unwrap(); // prune all leaves under the peak at pos 7 { @@ -407,11 +404,12 @@ fn pmmr_compact_entire_peak() { // now check we have pruned up to and including the peak at pos 7 // hash still available in underlying hash file - assert_eq!(backend.get(7, false), None); + assert_eq!(backend.get_hash(7), None); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); // now check we still have subsequent hash and data where we expect - assert_eq!(backend.get(8, true), Some(pos_8)); + assert_eq!(backend.get_hash(8), Some(pos_8_hash)); + assert_eq!(backend.get_data(8), Some(pos_8)); assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); teardown(data_dir); @@ -429,25 +427,17 @@ fn pmmr_compact_horizon() { assert_eq!(backend.data_size().unwrap(), 19); assert_eq!(backend.hash_size().unwrap(), 35); - let pos_3 = backend.get(3, false).unwrap(); - let pos_3_hash = backend.get_from_file(3).unwrap(); - assert_eq!(pos_3.0, pos_3_hash); + let pos_3_hash = backend.get_hash(3).unwrap(); - let pos_6 = backend.get(6, false).unwrap(); - let pos_6_hash = backend.get_from_file(6).unwrap(); - assert_eq!(pos_6.0, pos_6_hash); + let pos_6_hash = backend.get_hash(6).unwrap(); - let pos_7 = backend.get(7, false).unwrap(); - let pos_7_hash = backend.get_from_file(7).unwrap(); - assert_eq!(pos_7.0, pos_7_hash); + let pos_7_hash = backend.get_hash(7).unwrap(); - let pos_8 = backend.get(8, true).unwrap(); - let pos_8_hash = backend.get_from_file(8).unwrap(); - assert_eq!(pos_8.0, pos_8_hash); + let pos_8 = backend.get_data(8).unwrap(); + let pos_8_hash = backend.get_hash(8).unwrap(); - let pos_11 = backend.get(11, true).unwrap(); - let pos_11_hash = backend.get_from_file(11).unwrap(); - assert_eq!(pos_11.0, pos_11_hash); + let pos_11 = backend.get_data(11).unwrap(); + let pos_11_hash = backend.get_hash(11).unwrap(); { // pruning some choice nodes with an increasing block height @@ -462,19 +452,21 @@ fn pmmr_compact_horizon() { // check we can read hashes and data correctly after pruning { - assert_eq!(backend.get(3, false), None); + assert_eq!(backend.get_hash(3), None); assert_eq!(backend.get_from_file(3), Some(pos_3_hash)); - assert_eq!(backend.get(6, false), None); + assert_eq!(backend.get_hash(6), None); assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); - assert_eq!(backend.get(7, true), None); + assert_eq!(backend.get_hash(7), None); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - assert_eq!(backend.get(8, true), Some(pos_8)); + assert_eq!(backend.get_hash(8), Some(pos_8_hash)); + assert_eq!(backend.get_data(8), Some(pos_8)); assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); - assert_eq!(backend.get(11, true), Some(pos_11)); + assert_eq!(backend.get_hash(11), Some(pos_11_hash)); + assert_eq!(backend.get_data(11), Some(pos_11)); assert_eq!(backend.get_from_file(11), Some(pos_11_hash)); } @@ -484,16 +476,16 @@ fn pmmr_compact_horizon() { // check we can read a hash by pos correctly after compaction { - assert_eq!(backend.get(3, false), None); + assert_eq!(backend.get_hash(3), None); assert_eq!(backend.get_from_file(3), Some(pos_3_hash)); - assert_eq!(backend.get(6, false), None); + assert_eq!(backend.get_hash(6), None); assert_eq!(backend.get_from_file(6), Some(pos_6_hash)); - assert_eq!(backend.get(7, true), None); + assert_eq!(backend.get_hash(7), None); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - assert_eq!(backend.get(8, true), Some(pos_8)); + assert_eq!(backend.get_hash(8), Some(pos_8_hash)); assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); } } @@ -508,10 +500,10 @@ fn pmmr_compact_horizon() { assert_eq!(backend.hash_size().unwrap(), 33); // check we can read a hash by pos correctly from recreated backend - assert_eq!(backend.get(7, true), None); + assert_eq!(backend.get_hash(7), None); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - assert_eq!(backend.get(8, true), Some(pos_8)); + assert_eq!(backend.get_hash(8), Some(pos_8_hash)); assert_eq!(backend.get_from_file(8), Some(pos_8_hash)); } @@ -542,10 +534,11 @@ fn pmmr_compact_horizon() { assert_eq!(backend.hash_size().unwrap(), 29); // check we can read a hash by pos correctly from recreated backend - assert_eq!(backend.get(7, true), None); + assert_eq!(backend.get_hash(7), None); assert_eq!(backend.get_from_file(7), Some(pos_7_hash)); - assert_eq!(backend.get(11, true), Some(pos_11)); + assert_eq!(backend.get_hash(11), Some(pos_11_hash)); + assert_eq!(backend.get_data(11), Some(pos_11)); assert_eq!(backend.get_from_file(11), Some(pos_11_hash)); }