mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
check_compact retains leaves and roots until parents are pruned (#753)
* wip * failing test for being too eager when pruning a sibling * commit * rustfmt * [WIP] modified get_shift and get_leaf_shift to account for leaving "pruned but not compacted" leaves in place Note: this currently breaks check_compact as nothing else is aware of the modified behavior * rustfmt * commit * rustfmt * basic prune/compact/shift working * rustfmt * commit * rustfmt * next_pruned_idx working (I think) * commit * horizon test uncovered some subtle issues - wip * rustfmt * cleanup * rustfmt * commit * cleanup * cleanup * commit * rustfmt * contains -> binary_search * rustfmt * no need for height==0 special case * wip - works for single compact, 2nd one breaks the mmr hashes * commit * rustfmt * fixed it (needs a lot of cleanup) we were not traversing all the way up to the peak if we pruned an entire tree so rm_log and prune list were inconsistent * multiple compact steps are working data file not being copmacted currently (still to investigate) * cleanup store tests * cleanup * cleanup up debug * rustfmt * take kernel offsets into account when summing kernels and outputs for full txhashset validation validate chain state pre and post compaction * rustfmt * fix wallet refresh (we need block height to be refreshed on non-coinbase outputs) otherwise we cannot spend them... * rustfmt
This commit is contained in:
parent
e268993f5e
commit
65633c7611
16 changed files with 1004 additions and 293 deletions
|
@ -118,18 +118,35 @@ impl OutputHandler {
|
|||
include_proof: bool,
|
||||
) -> BlockOutputs {
|
||||
let header = w(&self.chain).get_header_by_height(block_height).unwrap();
|
||||
let block = w(&self.chain).get_block(&header.hash()).unwrap();
|
||||
let outputs = block
|
||||
.outputs
|
||||
.iter()
|
||||
.filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
|
||||
.map(|output| {
|
||||
OutputPrintable::from_output(output, w(&self.chain), &header, include_proof)
|
||||
})
|
||||
.collect();
|
||||
BlockOutputs {
|
||||
header: BlockHeaderInfo::from_header(&header),
|
||||
outputs: outputs,
|
||||
|
||||
// TODO - possible to compact away blocks we care about
|
||||
// in the period between accepting the block and refreshing the wallet
|
||||
if let Ok(block) = w(&self.chain).get_block(&header.hash()) {
|
||||
let outputs = block
|
||||
.outputs
|
||||
.iter()
|
||||
.filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
|
||||
.map(|output| {
|
||||
OutputPrintable::from_output(output, w(&self.chain), &header, include_proof)
|
||||
})
|
||||
.collect();
|
||||
|
||||
BlockOutputs {
|
||||
header: BlockHeaderInfo::from_header(&header),
|
||||
outputs: outputs,
|
||||
}
|
||||
} else {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"could not find block {:?} at height {}, maybe compacted?",
|
||||
&header.hash(),
|
||||
block_height,
|
||||
);
|
||||
|
||||
BlockOutputs {
|
||||
header: BlockHeaderInfo::from_header(&header),
|
||||
outputs: vec![],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -556,11 +556,36 @@ impl Chain {
|
|||
/// Meanwhile, the chain will not be able to accept new blocks. It should
|
||||
/// therefore be called judiciously.
|
||||
pub fn compact(&self) -> Result<(), Error> {
|
||||
let mut sumtrees = self.txhashset.write().unwrap();
|
||||
sumtrees.compact()?;
|
||||
// First check we can successfully validate the full chain state.
|
||||
// If we cannot then do not attempt to compact.
|
||||
// This should not be required long term - but doing this for debug purposes.
|
||||
self.validate()?;
|
||||
|
||||
// Now compact the txhashset via the extension.
|
||||
{
|
||||
let mut txhashes = self.txhashset.write().unwrap();
|
||||
txhashes.compact()?;
|
||||
|
||||
// print out useful debug info after compaction
|
||||
txhashset::extending(&mut txhashes, |extension| {
|
||||
extension.dump_output_pmmr();
|
||||
Ok(())
|
||||
})?;
|
||||
}
|
||||
|
||||
// Now check we can still successfully validate the chain state after
|
||||
// compacting.
|
||||
self.validate()?;
|
||||
|
||||
// we need to be careful here in testing as 20 blocks is not that long
|
||||
// in wall clock time
|
||||
let horizon = global::cut_through_horizon() as u64;
|
||||
let head = self.head()?;
|
||||
|
||||
if head.height <= horizon {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut current = self.store.get_header_by_height(head.height - horizon - 1)?;
|
||||
loop {
|
||||
match self.store.get_block(¤t.hash()) {
|
||||
|
|
|
@ -31,6 +31,7 @@ extern crate slog;
|
|||
extern crate time;
|
||||
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_store;
|
||||
extern crate grin_util as util;
|
||||
|
||||
|
|
|
@ -35,6 +35,7 @@ use core::ser::{self, PMMRIndexHashable, PMMRable};
|
|||
use grin_store;
|
||||
use grin_store::pmmr::{PMMRBackend, PMMRFileMetadata};
|
||||
use grin_store::types::prune_noop;
|
||||
use keychain::BlindingFactor;
|
||||
use types::{ChainStore, Error, PMMRFileMetadataCollection, TxHashSetRoots};
|
||||
use util::{zip, LOGGER};
|
||||
|
||||
|
@ -54,7 +55,7 @@ where
|
|||
|
||||
impl<T> PMMRHandle<T>
|
||||
where
|
||||
T: PMMRable,
|
||||
T: PMMRable + ::std::fmt::Debug,
|
||||
{
|
||||
fn new(
|
||||
root_dir: String,
|
||||
|
@ -210,15 +211,24 @@ impl TxHashSet {
|
|||
|
||||
/// Compact the MMR data files and flush the rm logs
|
||||
pub fn compact(&mut self) -> Result<(), Error> {
|
||||
let horizon = global::cut_through_horizon();
|
||||
let commit_index = self.commit_index.clone();
|
||||
let head = commit_index.head()?;
|
||||
let current_height = head.height;
|
||||
|
||||
// horizon for compacting is based on current_height
|
||||
let horizon = (current_height as u32).saturating_sub(global::cut_through_horizon());
|
||||
|
||||
let clean_output_index = |commit: &[u8]| {
|
||||
// do we care if this fails?
|
||||
let _ = commit_index.delete_output_pos(commit);
|
||||
};
|
||||
|
||||
let min_rm = (horizon / 10) as usize;
|
||||
|
||||
self.output_pmmr_h
|
||||
.backend
|
||||
.check_compact(min_rm, horizon, clean_output_index)?;
|
||||
|
||||
self.rproof_pmmr_h
|
||||
.backend
|
||||
.check_compact(min_rm, horizon, &prune_noop)?;
|
||||
|
@ -381,6 +391,7 @@ impl<'a> Extension<'a> {
|
|||
// check hash from pmmr matches hash from input (or corresponding output)
|
||||
// if not then the input is not being honest about
|
||||
// what it is attempting to spend...
|
||||
|
||||
if output_id_hash != read_hash
|
||||
|| output_id_hash
|
||||
!= read_elem
|
||||
|
@ -562,14 +573,19 @@ impl<'a> Extension<'a> {
|
|||
|
||||
// the real magicking: the sum of all kernel excess should equal the sum
|
||||
// of all Output commitments, minus the total supply
|
||||
let (kernel_sum, fees) = self.sum_kernels()?;
|
||||
let kernel_offset = self.sum_kernel_offsets(&header)?;
|
||||
let kernel_sum = self.sum_kernels(kernel_offset)?;
|
||||
let output_sum = self.sum_outputs()?;
|
||||
|
||||
// supply is the sum of the coinbase outputs from all the block headers
|
||||
let supply = header.height * REWARD;
|
||||
|
||||
{
|
||||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
let over_commit = secp.commit_value(header.height * REWARD)?;
|
||||
let adjusted_sum_output = secp.commit_sum(vec![output_sum], vec![over_commit])?;
|
||||
|
||||
let over_commit = secp.commit_value(supply)?;
|
||||
let adjusted_sum_output = secp.commit_sum(vec![output_sum], vec![over_commit])?;
|
||||
if adjusted_sum_output != kernel_sum {
|
||||
return Err(Error::InvalidTxHashSet(
|
||||
"Differing Output commitment and kernel excess sums.".to_owned(),
|
||||
|
@ -601,6 +617,14 @@ impl<'a> Extension<'a> {
|
|||
self.rollback = true;
|
||||
}
|
||||
|
||||
/// Dumps the output MMR.
|
||||
/// We use this after compacting for visual confirmation that it worked.
|
||||
pub fn dump_output_pmmr(&self) {
|
||||
debug!(LOGGER, "-- outputs --");
|
||||
self.output_pmmr.dump_from_file(false);
|
||||
debug!(LOGGER, "-- end of outputs --");
|
||||
}
|
||||
|
||||
/// Dumps the state of the 3 sum trees to stdout for debugging. Short
|
||||
/// version only prints the Output tree.
|
||||
pub fn dump(&self, short: bool) {
|
||||
|
@ -623,9 +647,46 @@ impl<'a> Extension<'a> {
|
|||
)
|
||||
}
|
||||
|
||||
/// TODO - Just use total_offset from latest header once this is available.
|
||||
/// So we do not need to iterate over all the headers to calculate it.
|
||||
fn sum_kernel_offsets(&self, header: &BlockHeader) -> Result<Option<Commitment>, Error> {
|
||||
let mut kernel_offsets = vec![];
|
||||
|
||||
// iterate back up the chain collecting the kernel offset for each block header
|
||||
let mut current = header.clone();
|
||||
while current.height > 0 {
|
||||
kernel_offsets.push(current.kernel_offset);
|
||||
current = self.commit_index.get_block_header(¤t.previous)?;
|
||||
}
|
||||
|
||||
// now sum the kernel_offset from each block header
|
||||
// to give us an aggregate offset for the entire
|
||||
// blockchain
|
||||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
|
||||
let keys = kernel_offsets
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|x| *x != BlindingFactor::zero())
|
||||
.filter_map(|x| x.secret_key(&secp).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let offset = if keys.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let sum = secp.blind_sum(keys, vec![])?;
|
||||
let offset = BlindingFactor::from_secret_key(sum);
|
||||
let skey = offset.secret_key(&secp)?;
|
||||
Some(secp.commit(0, skey)?)
|
||||
};
|
||||
|
||||
Ok(offset)
|
||||
}
|
||||
|
||||
/// Sums the excess of all our kernels, validating their signatures on the
|
||||
/// way
|
||||
fn sum_kernels(&self) -> Result<(Commitment, u64), Error> {
|
||||
fn sum_kernels(&self, kernel_offset: Option<Commitment>) -> Result<Commitment, Error> {
|
||||
// make sure we have the right count of kernels using the MMR, the storage
|
||||
// file may have a few more
|
||||
let mmr_sz = self.kernel_pmmr.unpruned_size();
|
||||
|
@ -635,7 +696,6 @@ impl<'a> Extension<'a> {
|
|||
let first: TxKernel = ser::deserialize(&mut kernel_file)?;
|
||||
first.verify()?;
|
||||
let mut sum_kernel = first.excess;
|
||||
let mut fees = first.fee;
|
||||
|
||||
let secp = static_secp_instance();
|
||||
let mut kern_count = 1;
|
||||
|
@ -645,7 +705,6 @@ impl<'a> Extension<'a> {
|
|||
kernel.verify()?;
|
||||
let secp = secp.lock().unwrap();
|
||||
sum_kernel = secp.commit_sum(vec![sum_kernel, kernel.excess], vec![])?;
|
||||
fees += kernel.fee;
|
||||
kern_count += 1;
|
||||
if kern_count == count {
|
||||
break;
|
||||
|
@ -654,8 +713,20 @@ impl<'a> Extension<'a> {
|
|||
Err(_) => break,
|
||||
}
|
||||
}
|
||||
debug!(LOGGER, "Validated and summed {} kernels", kern_count);
|
||||
Ok((sum_kernel, fees))
|
||||
|
||||
// now apply the kernel offset of we have one
|
||||
{
|
||||
let secp = secp.lock().unwrap();
|
||||
if let Some(kernel_offset) = kernel_offset {
|
||||
sum_kernel = secp.commit_sum(vec![sum_kernel, kernel_offset], vec![])?;
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Validated, summed (and offset) {} kernels", kern_count
|
||||
);
|
||||
Ok(sum_kernel)
|
||||
}
|
||||
|
||||
/// Sums all our Output commitments, checking range proofs at the same time
|
||||
|
@ -664,7 +735,7 @@ impl<'a> Extension<'a> {
|
|||
let mut output_count = 0;
|
||||
let secp = static_secp_instance();
|
||||
for n in 1..self.output_pmmr.unpruned_size() + 1 {
|
||||
if pmmr::bintree_postorder_height(n) == 0 {
|
||||
if pmmr::is_leaf(n) {
|
||||
if let Some((_, output)) = self.output_pmmr.get(n, true) {
|
||||
let out = output.expect("not a leaf node");
|
||||
let commit = out.commit.clone();
|
||||
|
|
|
@ -24,6 +24,7 @@ use core::core::{block, transaction, Block, BlockHeader};
|
|||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::target::Difficulty;
|
||||
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use keychain;
|
||||
use grin_store;
|
||||
use grin_store::pmmr::PMMRFileMetadata;
|
||||
|
||||
|
@ -75,6 +76,10 @@ pub enum Error {
|
|||
InvalidRoot,
|
||||
/// Something does not look right with the switch commitment
|
||||
InvalidSwitchCommit,
|
||||
/// Error from underlying keychain impl
|
||||
Keychain(keychain::Error),
|
||||
/// Error from underlying secp lib
|
||||
Secp(secp::Error),
|
||||
/// One of the inputs in the block has already been spent
|
||||
AlreadySpent(Commitment),
|
||||
/// An output with that commitment already exists (should be unique)
|
||||
|
@ -108,19 +113,28 @@ impl From<grin_store::Error> for Error {
|
|||
Error::StoreErr(e, "wrapped".to_owned())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ser::Error> for Error {
|
||||
fn from(e: ser::Error) -> Error {
|
||||
Error::SerErr(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<io::Error> for Error {
|
||||
fn from(e: io::Error) -> Error {
|
||||
Error::TxHashSetErr(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<keychain::Error> for Error {
|
||||
fn from(e: keychain::Error) -> Error {
|
||||
Error::Keychain(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<secp::Error> for Error {
|
||||
fn from(e: secp::Error) -> Error {
|
||||
Error::TxHashSetErr(format!("Sum validation error: {}", e.to_string()))
|
||||
Error::Secp(e)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ use std::collections::HashSet;
|
|||
use core::{Committed, Input, KernelFeatures, Output, OutputFeatures, Proof, ProofMessageElements,
|
||||
ShortId, SwitchCommitHash, Transaction, TxKernel};
|
||||
use consensus;
|
||||
use consensus::{exceeds_weight, reward, REWARD, VerifySortOrder};
|
||||
use consensus::{exceeds_weight, reward, VerifySortOrder, REWARD};
|
||||
use core::hash::{Hash, Hashed, ZERO_HASH};
|
||||
use core::id::ShortIdentifiable;
|
||||
use core::target::Difficulty;
|
||||
|
@ -121,6 +121,9 @@ pub struct BlockHeader {
|
|||
pub total_difficulty: Difficulty,
|
||||
/// The single aggregate "offset" that needs to be applied for all
|
||||
/// commitments to sum
|
||||
/// TODO - maintain total_offset (based on sum of all headers)
|
||||
/// If we need the individual offset for this block we can derive
|
||||
/// it easily from current - previous
|
||||
pub kernel_offset: BlindingFactor,
|
||||
}
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ where
|
|||
|
||||
/// Remove HashSums by insertion position. An index is also provided so the
|
||||
/// underlying backend can implement some rollback of positions up to a
|
||||
/// given index (practically the index is a the height of a block that
|
||||
/// given index (practically the index is the height of a block that
|
||||
/// triggered removal).
|
||||
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String>;
|
||||
|
||||
|
@ -263,7 +263,7 @@ where
|
|||
|
||||
impl<'a, T, B> PMMR<'a, T, B>
|
||||
where
|
||||
T: PMMRable,
|
||||
T: PMMRable + ::std::fmt::Debug,
|
||||
B: 'a + Backend<T>,
|
||||
{
|
||||
/// Build a new prunable Merkle Mountain Range using the provided backend.
|
||||
|
@ -290,9 +290,13 @@ where
|
|||
/// tree and "bags" them to get a single peak.
|
||||
pub fn root(&self) -> Hash {
|
||||
let peaks_pos = peaks(self.last_pos);
|
||||
let peaks: Vec<Option<(Hash, Option<T>)>> = peaks_pos
|
||||
let peaks: Vec<Option<Hash>> = peaks_pos
|
||||
.into_iter()
|
||||
.map(|pi| self.backend.get(pi, false))
|
||||
.map(|pi| {
|
||||
// here we want to get from underlying hash file
|
||||
// as the pos *may* have been "removed"
|
||||
self.backend.get_from_file(pi)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let mut ret = None;
|
||||
|
@ -300,10 +304,10 @@ where
|
|||
ret = match (ret, peak) {
|
||||
(None, x) => x,
|
||||
(Some(hash), None) => Some(hash),
|
||||
(Some(lhash), Some(rhash)) => Some((lhash.0.hash_with(rhash.0), None)),
|
||||
(Some(lhash), Some(rhash)) => Some(lhash.hash_with(rhash)),
|
||||
}
|
||||
}
|
||||
ret.expect("no root, invalid tree").0
|
||||
ret.expect("no root, invalid tree")
|
||||
}
|
||||
|
||||
/// Build a Merkle proof for the element at the given position in the MMR
|
||||
|
@ -331,9 +335,7 @@ where
|
|||
.filter_map(|x| {
|
||||
// we want to find siblings here even if they
|
||||
// have been "removed" from the MMR
|
||||
// TODO - pruned/compacted MMR will need to maintain hashes of removed nodes
|
||||
let res = self.get_from_file(x.1);
|
||||
res
|
||||
self.get_from_file(x.1)
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
|
@ -361,6 +363,7 @@ where
|
|||
pub fn push(&mut self, elmt: T) -> Result<u64, String> {
|
||||
let elmt_pos = self.last_pos + 1;
|
||||
let mut current_hash = elmt.hash_with_index(elmt_pos);
|
||||
|
||||
let mut to_append = vec![(current_hash, Some(elmt))];
|
||||
let mut height = 0;
|
||||
let mut pos = elmt_pos;
|
||||
|
@ -371,10 +374,12 @@ where
|
|||
// creation of another parent.
|
||||
while bintree_postorder_height(pos + 1) > height {
|
||||
let left_sibling = bintree_jump_left_sibling(pos);
|
||||
let left_elem = self.backend
|
||||
.get(left_sibling, false)
|
||||
.expect("missing left sibling in tree, should not have been pruned");
|
||||
current_hash = left_elem.0 + current_hash;
|
||||
|
||||
let left_hash = self.backend
|
||||
.get_from_file(left_sibling)
|
||||
.ok_or("missing left sibling in tree, should not have been pruned")?;
|
||||
|
||||
current_hash = left_hash + current_hash;
|
||||
|
||||
to_append.push((current_hash.clone(), None));
|
||||
height += 1;
|
||||
|
@ -421,14 +426,17 @@ where
|
|||
// loop going up the tree, from node to parent, as long as we stay inside
|
||||
// the tree.
|
||||
let mut to_prune = vec![];
|
||||
|
||||
let mut current = position;
|
||||
while current + 1 < self.last_pos {
|
||||
while current + 1 <= self.last_pos {
|
||||
let (parent, sibling, _) = family(current);
|
||||
|
||||
to_prune.push(current);
|
||||
|
||||
if parent > self.last_pos {
|
||||
// can't prune when our parent isn't here yet
|
||||
break;
|
||||
}
|
||||
to_prune.push(current);
|
||||
|
||||
// if we have a pruned sibling, we can continue up the tree
|
||||
// otherwise we're done
|
||||
|
@ -520,7 +528,7 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Total size of the tree, including intermediary nodes an ignoring any
|
||||
/// Total size of the tree, including intermediary nodes and ignoring any
|
||||
/// pruning.
|
||||
pub fn unpruned_size(&self) -> u64 {
|
||||
self.last_pos
|
||||
|
@ -557,6 +565,34 @@ where
|
|||
trace!(LOGGER, "{}", hashes);
|
||||
}
|
||||
}
|
||||
|
||||
/// Debugging utility to print information about the MMRs. Short version
|
||||
/// only prints the last 8 nodes.
|
||||
/// Looks in the underlying hash file and so ignores the remove log.
|
||||
pub fn dump_from_file(&self, short: bool) {
|
||||
let sz = self.unpruned_size();
|
||||
if sz > 2000 && !short {
|
||||
return;
|
||||
}
|
||||
let start = if short && sz > 7 { sz / 8 - 1 } else { 0 };
|
||||
for n in start..(sz / 8 + 1) {
|
||||
let mut idx = "".to_owned();
|
||||
let mut hashes = "".to_owned();
|
||||
for m in (n * 8)..(n + 1) * 8 {
|
||||
if m >= sz {
|
||||
break;
|
||||
}
|
||||
idx.push_str(&format!("{:>8} ", m + 1));
|
||||
let ohs = self.get_from_file(m + 1);
|
||||
match ohs {
|
||||
Some(hs) => hashes.push_str(&format!("{} ", hs)),
|
||||
None => hashes.push_str(&format!("{:>8} ", " .")),
|
||||
}
|
||||
}
|
||||
debug!(LOGGER, "{}", idx);
|
||||
debug!(LOGGER, "{}", hashes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Maintains a list of previously pruned nodes in PMMR, compacting the list as
|
||||
|
@ -589,7 +625,10 @@ impl PruneList {
|
|||
pub fn get_shift(&self, pos: u64) -> Option<u64> {
|
||||
// get the position where the node at pos would fit in the pruned list, if
|
||||
// it's already pruned, nothing to skip
|
||||
match self.pruned_pos(pos) {
|
||||
|
||||
let pruned_idx = self.next_pruned_idx(pos);
|
||||
let next_idx = self.pruned_nodes.binary_search(&pos).map(|x| x + 1).ok();
|
||||
match pruned_idx.or(next_idx) {
|
||||
None => None,
|
||||
Some(idx) => {
|
||||
// skip by the number of elements pruned in the preceding subtrees,
|
||||
|
@ -597,7 +636,14 @@ impl PruneList {
|
|||
Some(
|
||||
self.pruned_nodes[0..(idx as usize)]
|
||||
.iter()
|
||||
.map(|n| (1 << (bintree_postorder_height(*n) + 1)) - 1)
|
||||
.map(|n| {
|
||||
let height = bintree_postorder_height(*n);
|
||||
// height 0, 1 node, offset 0 = 0 + 0
|
||||
// height 1, 3 nodes, offset 2 = 1 + 1
|
||||
// height 2, 7 nodes, offset 6 = 3 + 3
|
||||
// height 3, 15 nodes, offset 14 = 7 + 7
|
||||
2 * ((1 << height) - 1)
|
||||
})
|
||||
.sum(),
|
||||
)
|
||||
}
|
||||
|
@ -611,15 +657,28 @@ impl PruneList {
|
|||
pub fn get_leaf_shift(&self, pos: u64) -> Option<u64> {
|
||||
// get the position where the node at pos would fit in the pruned list, if
|
||||
// it's already pruned, nothing to skip
|
||||
match self.pruned_pos(pos) {
|
||||
|
||||
let pruned_idx = self.next_pruned_idx(pos);
|
||||
let next_idx = self.pruned_nodes.binary_search(&pos).map(|x| x + 1).ok();
|
||||
|
||||
match pruned_idx.or(next_idx) {
|
||||
None => None,
|
||||
Some(idx) => {
|
||||
// skip by the number of leaf nodes pruned in the preceeding subtrees
|
||||
// which just 2^height
|
||||
Some(
|
||||
// skip by the number of leaf nodes pruned in the preceeding subtrees
|
||||
// which just 2^height
|
||||
// except in the case of height==0
|
||||
// (where we want to treat the pruned tree as 0 leaves)
|
||||
self.pruned_nodes[0..(idx as usize)]
|
||||
.iter()
|
||||
.map(|n| 1 << bintree_postorder_height(*n))
|
||||
.map(|n| {
|
||||
let height = bintree_postorder_height(*n);
|
||||
if height == 0 {
|
||||
0
|
||||
} else {
|
||||
(1 << height)
|
||||
}
|
||||
})
|
||||
.sum(),
|
||||
)
|
||||
}
|
||||
|
@ -633,13 +692,14 @@ impl PruneList {
|
|||
let mut current = pos;
|
||||
loop {
|
||||
let (parent, sibling, _) = family(current);
|
||||
|
||||
match self.pruned_nodes.binary_search(&sibling) {
|
||||
Ok(idx) => {
|
||||
self.pruned_nodes.remove(idx);
|
||||
current = parent;
|
||||
}
|
||||
Err(_) => {
|
||||
if let Err(idx) = self.pruned_nodes.binary_search(¤t) {
|
||||
if let Some(idx) = self.next_pruned_idx(current) {
|
||||
self.pruned_nodes.insert(idx, current);
|
||||
}
|
||||
break;
|
||||
|
@ -648,10 +708,10 @@ impl PruneList {
|
|||
}
|
||||
}
|
||||
|
||||
/// Gets the position a new pruned node should take in the prune list.
|
||||
/// Gets the index a new pruned node should take in the prune list.
|
||||
/// If the node has already been pruned, either directly or through one of
|
||||
/// its parents contained in the prune list, returns None.
|
||||
pub fn pruned_pos(&self, pos: u64) -> Option<usize> {
|
||||
pub fn next_pruned_idx(&self, pos: u64) -> Option<usize> {
|
||||
match self.pruned_nodes.binary_search(&pos) {
|
||||
Ok(_) => None,
|
||||
Err(idx) => {
|
||||
|
@ -923,7 +983,7 @@ mod test {
|
|||
|
||||
/// Simple MMR backend implementation based on a Vector. Pruning does not
|
||||
/// compact the Vec itself.
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VecBackend<T>
|
||||
where
|
||||
T: PMMRable,
|
||||
|
@ -1492,7 +1552,7 @@ mod test {
|
|||
}
|
||||
assert_eq!(ba.used_size(), 9);
|
||||
|
||||
// pruning everything should only leave us the peaks
|
||||
// pruning everything should only leave us with a single peak
|
||||
{
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
for n in 1..16 {
|
||||
|
@ -1500,46 +1560,267 @@ mod test {
|
|||
}
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
assert_eq!(ba.used_size(), 2);
|
||||
assert_eq!(ba.used_size(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pmmr_prune_list() {
|
||||
fn pmmr_next_pruned_idx() {
|
||||
let mut pl = PruneList::new();
|
||||
pl.add(4);
|
||||
assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
assert_eq!(pl.pruned_nodes[0], 4);
|
||||
assert_eq!(pl.get_shift(5), Some(1));
|
||||
assert_eq!(pl.get_shift(2), Some(0));
|
||||
assert_eq!(pl.get_shift(4), None);
|
||||
|
||||
pl.add(5);
|
||||
assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
assert_eq!(pl.pruned_nodes[0], 6);
|
||||
assert_eq!(pl.get_shift(8), Some(3));
|
||||
assert_eq!(pl.get_shift(2), Some(0));
|
||||
assert_eq!(pl.get_shift(5), None);
|
||||
assert_eq!(pl.pruned_nodes.len(), 0);
|
||||
assert_eq!(pl.next_pruned_idx(1), Some(0));
|
||||
assert_eq!(pl.next_pruned_idx(2), Some(0));
|
||||
assert_eq!(pl.next_pruned_idx(3), Some(0));
|
||||
|
||||
pl.add(2);
|
||||
assert_eq!(pl.pruned_nodes.len(), 2);
|
||||
assert_eq!(pl.pruned_nodes[0], 2);
|
||||
assert_eq!(pl.get_shift(8), Some(4));
|
||||
assert_eq!(pl.get_shift(1), Some(0));
|
||||
|
||||
pl.add(8);
|
||||
pl.add(11);
|
||||
assert_eq!(pl.pruned_nodes.len(), 4);
|
||||
assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
assert_eq!(pl.pruned_nodes, [2]);
|
||||
assert_eq!(pl.next_pruned_idx(1), Some(0));
|
||||
assert_eq!(pl.next_pruned_idx(2), None);
|
||||
assert_eq!(pl.next_pruned_idx(3), Some(1));
|
||||
assert_eq!(pl.next_pruned_idx(4), Some(1));
|
||||
|
||||
pl.add(1);
|
||||
assert_eq!(pl.pruned_nodes.len(), 3);
|
||||
assert_eq!(pl.pruned_nodes[0], 7);
|
||||
assert_eq!(pl.get_shift(12), Some(9));
|
||||
assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
assert_eq!(pl.pruned_nodes, [3]);
|
||||
assert_eq!(pl.next_pruned_idx(1), None);
|
||||
assert_eq!(pl.next_pruned_idx(2), None);
|
||||
assert_eq!(pl.next_pruned_idx(3), None);
|
||||
assert_eq!(pl.next_pruned_idx(4), Some(1));
|
||||
assert_eq!(pl.next_pruned_idx(5), Some(1));
|
||||
|
||||
pl.add(12);
|
||||
assert_eq!(pl.pruned_nodes.len(), 3);
|
||||
assert_eq!(pl.get_shift(12), None);
|
||||
assert_eq!(pl.get_shift(9), Some(8));
|
||||
assert_eq!(pl.get_shift(17), Some(11));
|
||||
pl.add(3);
|
||||
assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
assert_eq!(pl.pruned_nodes, [3]);
|
||||
assert_eq!(pl.next_pruned_idx(1), None);
|
||||
assert_eq!(pl.next_pruned_idx(2), None);
|
||||
assert_eq!(pl.next_pruned_idx(3), None);
|
||||
assert_eq!(pl.next_pruned_idx(4), Some(1));
|
||||
assert_eq!(pl.next_pruned_idx(5), Some(1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pmmr_prune_leaf_shift() {
|
||||
let mut pl = PruneList::new();
|
||||
|
||||
// start with an empty prune list (nothing shifted)
|
||||
assert_eq!(pl.pruned_nodes.len(), 0);
|
||||
assert_eq!(pl.get_leaf_shift(1), Some(0));
|
||||
assert_eq!(pl.get_leaf_shift(2), Some(0));
|
||||
assert_eq!(pl.get_leaf_shift(4), Some(0));
|
||||
|
||||
// now add a single leaf pos to the prune list
|
||||
// note this does not shift anything (we only start shifting after pruning a
|
||||
// parent)
|
||||
pl.add(1);
|
||||
assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
assert_eq!(pl.pruned_nodes, [1]);
|
||||
assert_eq!(pl.get_leaf_shift(1), Some(0));
|
||||
assert_eq!(pl.get_leaf_shift(2), Some(0));
|
||||
assert_eq!(pl.get_leaf_shift(3), Some(0));
|
||||
assert_eq!(pl.get_leaf_shift(4), Some(0));
|
||||
|
||||
// now add the sibling leaf pos (pos 1 and pos 2) which will prune the parent
|
||||
// at pos 3 this in turn will "leaf shift" the leaf at pos 3 by 2
|
||||
pl.add(2);
|
||||
assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
assert_eq!(pl.pruned_nodes, [3]);
|
||||
assert_eq!(pl.get_leaf_shift(1), None);
|
||||
assert_eq!(pl.get_leaf_shift(2), None);
|
||||
assert_eq!(pl.get_leaf_shift(3), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(4), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(5), Some(2));
|
||||
|
||||
// now prune an additional leaf at pos 4
|
||||
// leaf offset of subsequent pos will be 2
|
||||
// 00100120
|
||||
pl.add(4);
|
||||
assert_eq!(pl.pruned_nodes, [3, 4]);
|
||||
assert_eq!(pl.get_leaf_shift(1), None);
|
||||
assert_eq!(pl.get_leaf_shift(2), None);
|
||||
assert_eq!(pl.get_leaf_shift(3), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(4), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(5), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(6), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(7), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(8), Some(2));
|
||||
|
||||
// now prune the sibling at pos 5
|
||||
// the two smaller subtrees (pos 3 and pos 6) are rolled up to larger subtree
|
||||
// (pos 7) the leaf offset is now 4 to cover entire subtree containing first
|
||||
// 4 leaves 00100120
|
||||
pl.add(5);
|
||||
assert_eq!(pl.pruned_nodes, [7]);
|
||||
assert_eq!(pl.get_leaf_shift(1), None);
|
||||
assert_eq!(pl.get_leaf_shift(2), None);
|
||||
assert_eq!(pl.get_leaf_shift(3), None);
|
||||
assert_eq!(pl.get_leaf_shift(4), None);
|
||||
assert_eq!(pl.get_leaf_shift(5), None);
|
||||
assert_eq!(pl.get_leaf_shift(6), None);
|
||||
assert_eq!(pl.get_leaf_shift(7), Some(4));
|
||||
assert_eq!(pl.get_leaf_shift(8), Some(4));
|
||||
assert_eq!(pl.get_leaf_shift(9), Some(4));
|
||||
|
||||
// now check we can prune some of these in an arbitrary order
|
||||
// final result is one leaf (pos 2) and one small subtree (pos 6) pruned
|
||||
// with leaf offset of 2 to account for the pruned subtree
|
||||
let mut pl = PruneList::new();
|
||||
pl.add(2);
|
||||
pl.add(5);
|
||||
pl.add(4);
|
||||
assert_eq!(pl.pruned_nodes, [2, 6]);
|
||||
assert_eq!(pl.get_leaf_shift(1), Some(0));
|
||||
assert_eq!(pl.get_leaf_shift(2), Some(0));
|
||||
assert_eq!(pl.get_leaf_shift(3), Some(0));
|
||||
assert_eq!(pl.get_leaf_shift(4), None);
|
||||
assert_eq!(pl.get_leaf_shift(5), None);
|
||||
assert_eq!(pl.get_leaf_shift(6), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(7), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(8), Some(2));
|
||||
assert_eq!(pl.get_leaf_shift(9), Some(2));
|
||||
|
||||
pl.add(1);
|
||||
assert_eq!(pl.pruned_nodes, [7]);
|
||||
assert_eq!(pl.get_leaf_shift(1), None);
|
||||
assert_eq!(pl.get_leaf_shift(2), None);
|
||||
assert_eq!(pl.get_leaf_shift(3), None);
|
||||
assert_eq!(pl.get_leaf_shift(4), None);
|
||||
assert_eq!(pl.get_leaf_shift(5), None);
|
||||
assert_eq!(pl.get_leaf_shift(6), None);
|
||||
assert_eq!(pl.get_leaf_shift(7), Some(4));
|
||||
assert_eq!(pl.get_leaf_shift(8), Some(4));
|
||||
assert_eq!(pl.get_leaf_shift(9), Some(4));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pmmr_prune_shift() {
|
||||
let mut pl = PruneList::new();
|
||||
assert!(pl.pruned_nodes.is_empty());
|
||||
assert_eq!(pl.get_shift(1), Some(0));
|
||||
assert_eq!(pl.get_shift(2), Some(0));
|
||||
assert_eq!(pl.get_shift(3), Some(0));
|
||||
|
||||
// prune a single leaf node
|
||||
// pruning only a leaf node does not shift any subsequent pos
|
||||
// we will only start shifting when a parent can be pruned
|
||||
pl.add(1);
|
||||
assert_eq!(pl.pruned_nodes, [1]);
|
||||
assert_eq!(pl.get_shift(1), Some(0));
|
||||
assert_eq!(pl.get_shift(2), Some(0));
|
||||
assert_eq!(pl.get_shift(3), Some(0));
|
||||
|
||||
pl.add(2);
|
||||
assert_eq!(pl.pruned_nodes, [3]);
|
||||
assert_eq!(pl.get_shift(1), None);
|
||||
assert_eq!(pl.get_shift(2), None);
|
||||
// pos 3 is in the prune list, so removed but not compacted, but still shifted
|
||||
assert_eq!(pl.get_shift(3), Some(2));
|
||||
assert_eq!(pl.get_shift(4), Some(2));
|
||||
assert_eq!(pl.get_shift(5), Some(2));
|
||||
assert_eq!(pl.get_shift(6), Some(2));
|
||||
|
||||
// pos 3 is not a leaf and is already in prune list
|
||||
// prune it and check we are still consistent
|
||||
pl.add(3);
|
||||
assert_eq!(pl.pruned_nodes, [3]);
|
||||
assert_eq!(pl.get_shift(1), None);
|
||||
assert_eq!(pl.get_shift(2), None);
|
||||
// pos 3 is in the prune list, so removed but not compacted, but still shifted
|
||||
assert_eq!(pl.get_shift(3), Some(2));
|
||||
assert_eq!(pl.get_shift(4), Some(2));
|
||||
assert_eq!(pl.get_shift(5), Some(2));
|
||||
assert_eq!(pl.get_shift(6), Some(2));
|
||||
|
||||
pl.add(4);
|
||||
assert_eq!(pl.pruned_nodes, [3, 4]);
|
||||
assert_eq!(pl.get_shift(1), None);
|
||||
assert_eq!(pl.get_shift(2), None);
|
||||
// pos 3 is in the prune list, so removed but not compacted, but still shifted
|
||||
assert_eq!(pl.get_shift(3), Some(2));
|
||||
// pos 4 is also in the prune list and also shifted by same amount
|
||||
assert_eq!(pl.get_shift(4), Some(2));
|
||||
// subsequent nodes also shifted consistently
|
||||
assert_eq!(pl.get_shift(5), Some(2));
|
||||
assert_eq!(pl.get_shift(6), Some(2));
|
||||
|
||||
pl.add(5);
|
||||
assert_eq!(pl.pruned_nodes, [7]);
|
||||
assert_eq!(pl.get_shift(1), None);
|
||||
assert_eq!(pl.get_shift(2), None);
|
||||
assert_eq!(pl.get_shift(3), None);
|
||||
assert_eq!(pl.get_shift(4), None);
|
||||
assert_eq!(pl.get_shift(5), None);
|
||||
assert_eq!(pl.get_shift(6), None);
|
||||
// everything prior to pos 7 is compacted away
|
||||
// pos 7 is shifted by 6 to account for this
|
||||
assert_eq!(pl.get_shift(7), Some(6));
|
||||
assert_eq!(pl.get_shift(8), Some(6));
|
||||
assert_eq!(pl.get_shift(9), Some(6));
|
||||
|
||||
// prune a bunch more
|
||||
for x in 6..1000 {
|
||||
pl.add(x);
|
||||
}
|
||||
// and check we shift by a large number (hopefully the correct number...)
|
||||
assert_eq!(pl.get_shift(1010), Some(996));
|
||||
|
||||
let mut pl = PruneList::new();
|
||||
pl.add(2);
|
||||
pl.add(5);
|
||||
pl.add(4);
|
||||
assert_eq!(pl.pruned_nodes, [2, 6]);
|
||||
assert_eq!(pl.get_shift(1), Some(0));
|
||||
assert_eq!(pl.get_shift(2), Some(0));
|
||||
assert_eq!(pl.get_shift(3), Some(0));
|
||||
assert_eq!(pl.get_shift(4), None);
|
||||
assert_eq!(pl.get_shift(5), None);
|
||||
assert_eq!(pl.get_shift(6), Some(2));
|
||||
assert_eq!(pl.get_shift(7), Some(2));
|
||||
assert_eq!(pl.get_shift(8), Some(2));
|
||||
assert_eq!(pl.get_shift(9), Some(2));
|
||||
|
||||
// TODO - put some of these tests back in place for completeness
|
||||
|
||||
//
|
||||
// let mut pl = PruneList::new();
|
||||
// pl.add(4);
|
||||
// assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
// assert_eq!(pl.pruned_nodes, [4]);
|
||||
// assert_eq!(pl.get_shift(1), Some(0));
|
||||
// assert_eq!(pl.get_shift(2), Some(0));
|
||||
// assert_eq!(pl.get_shift(3), Some(0));
|
||||
// assert_eq!(pl.get_shift(4), None);
|
||||
// assert_eq!(pl.get_shift(5), Some(1));
|
||||
// assert_eq!(pl.get_shift(6), Some(1));
|
||||
//
|
||||
//
|
||||
// pl.add(5);
|
||||
// assert_eq!(pl.pruned_nodes.len(), 1);
|
||||
// assert_eq!(pl.pruned_nodes[0], 6);
|
||||
// assert_eq!(pl.get_shift(8), Some(3));
|
||||
// assert_eq!(pl.get_shift(2), Some(0));
|
||||
// assert_eq!(pl.get_shift(5), None);
|
||||
//
|
||||
// pl.add(2);
|
||||
// assert_eq!(pl.pruned_nodes.len(), 2);
|
||||
// assert_eq!(pl.pruned_nodes[0], 2);
|
||||
// assert_eq!(pl.get_shift(8), Some(4));
|
||||
// assert_eq!(pl.get_shift(1), Some(0));
|
||||
//
|
||||
// pl.add(8);
|
||||
// pl.add(11);
|
||||
// assert_eq!(pl.pruned_nodes.len(), 4);
|
||||
//
|
||||
// pl.add(1);
|
||||
// assert_eq!(pl.pruned_nodes.len(), 3);
|
||||
// assert_eq!(pl.pruned_nodes[0], 7);
|
||||
// assert_eq!(pl.get_shift(12), Some(9));
|
||||
//
|
||||
// pl.add(12);
|
||||
// assert_eq!(pl.pruned_nodes.len(), 3);
|
||||
// assert_eq!(pl.get_shift(12), None);
|
||||
// assert_eq!(pl.get_shift(9), Some(8));
|
||||
// assert_eq!(pl.get_shift(17), Some(11));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -657,11 +657,10 @@ impl Input {
|
|||
if lock_height > height {
|
||||
return Err(Error::ImmatureCoinbase);
|
||||
}
|
||||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"input: verify_maturity: success, coinbase maturity via Merkle proof: {} vs. {}",
|
||||
lock_height,
|
||||
height,
|
||||
"input: verify_maturity: success via Merkle proof: {} vs {}", lock_height, height,
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -38,7 +38,7 @@ macro_rules! try_map_vec {
|
|||
}
|
||||
|
||||
/// Eliminates some of the verbosity in having iter and collect
|
||||
/// around every fitler_map call.
|
||||
/// around every filter_map call.
|
||||
#[macro_export]
|
||||
macro_rules! filter_map_vec {
|
||||
($thing:expr, $mapfn:expr ) => {
|
||||
|
@ -63,17 +63,6 @@ macro_rules! tee {
|
|||
}
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! try_to_o {
|
||||
($trying:expr) => {{
|
||||
let tried = $trying;
|
||||
if let Err(e) = tried {
|
||||
return Some(e);
|
||||
}
|
||||
tried.unwrap()
|
||||
}}
|
||||
}
|
||||
|
||||
/// Eliminate some of the boilerplate of deserialization (package ser) by
|
||||
/// passing just the list of reader function (with optional single param)
|
||||
/// Example before:
|
||||
|
|
|
@ -393,7 +393,7 @@ impl NetToChainAdapter {
|
|||
Err(e) => {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"adapter: process_block :block {} refused by chain: {:?}", bhash, e
|
||||
"adapter: process_block: block {} refused by chain: {:?}", bhash, e
|
||||
);
|
||||
true
|
||||
}
|
||||
|
|
|
@ -245,7 +245,7 @@ impl Keychain {
|
|||
&self,
|
||||
amount: u64,
|
||||
key_id: &Identifier,
|
||||
commit: Commitment,
|
||||
_commit: Commitment,
|
||||
extra_data: Option<Vec<u8>>,
|
||||
msg: ProofMessage,
|
||||
) -> Result<RangeProof, Error> {
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
|
||||
extern crate byteorder;
|
||||
extern crate env_logger;
|
||||
#[macro_use]
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_util as util;
|
||||
extern crate libc;
|
||||
|
|
|
@ -17,7 +17,7 @@ use std::fs;
|
|||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use core::core::pmmr::{self, Backend};
|
||||
use core::core::pmmr::{self, family, Backend};
|
||||
use core::ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
|
||||
use core::core::hash::Hash;
|
||||
use util::LOGGER;
|
||||
|
@ -29,7 +29,7 @@ const PMMR_RM_LOG_FILE: &'static str = "pmmr_rm_log.bin";
|
|||
const PMMR_PRUNED_FILE: &'static str = "pmmr_pruned.bin";
|
||||
|
||||
/// Maximum number of nodes in the remove log before it gets flushed
|
||||
pub const RM_LOG_MAX_NODES: usize = 10000;
|
||||
pub const RM_LOG_MAX_NODES: usize = 10_000;
|
||||
|
||||
/// Metadata for the PMMR backend's AppendOnlyFile, which can be serialized and
|
||||
/// stored
|
||||
|
@ -143,19 +143,27 @@ where
|
|||
if self.rm_log.includes(position) {
|
||||
return None;
|
||||
}
|
||||
|
||||
// ... or in the prune list
|
||||
let prune_shift = match self.pruned_nodes.get_leaf_shift(position) {
|
||||
Some(shift) => shift,
|
||||
None => return None,
|
||||
};
|
||||
// let prune_shift = match self.pruned_nodes.get_leaf_shift(position) {
|
||||
// Some(shift) => shift,
|
||||
// None => return None,
|
||||
// };
|
||||
|
||||
let hash_val = self.get_from_file(position);
|
||||
if !include_data {
|
||||
return hash_val.map(|hash| (hash, None));
|
||||
}
|
||||
|
||||
// if this is not a leaf then we have no data
|
||||
if !pmmr::is_leaf(position) {
|
||||
return hash_val.map(|hash| (hash, None));
|
||||
}
|
||||
|
||||
// Optionally read flatfile storage to get data element
|
||||
let flatfile_pos = pmmr::n_leaves(position) - 1 - prune_shift;
|
||||
// let flatfile_pos = pmmr::n_leaves(position) - 1 - prune_shift;
|
||||
let flatfile_pos = pmmr::n_leaves(position) - 1;
|
||||
|
||||
let record_len = T::len();
|
||||
let file_offset = flatfile_pos as usize * T::len();
|
||||
let data = self.data_file.read(file_offset, record_len);
|
||||
|
@ -171,12 +179,7 @@ where
|
|||
}
|
||||
};
|
||||
|
||||
// TODO - clean this up
|
||||
if let Some(hash) = hash_val {
|
||||
return Some((hash, data));
|
||||
} else {
|
||||
return None;
|
||||
}
|
||||
hash_val.map(|x| (x, data))
|
||||
}
|
||||
|
||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||
|
@ -189,7 +192,7 @@ where
|
|||
let file_pos = (position - shift) * (record_len as u64);
|
||||
self.hash_file.rewind(file_pos);
|
||||
|
||||
//Data file
|
||||
// Data file
|
||||
let flatfile_pos = pmmr::n_leaves(position) - 1;
|
||||
let file_pos = (flatfile_pos as usize + 1) * T::len();
|
||||
self.data_file.rewind(file_pos as u64);
|
||||
|
@ -211,7 +214,7 @@ where
|
|||
|
||||
impl<T> PMMRBackend<T>
|
||||
where
|
||||
T: PMMRable,
|
||||
T: PMMRable + ::std::fmt::Debug,
|
||||
{
|
||||
/// Instantiates a new PMMR backend that will use the provided directly to
|
||||
/// store its files.
|
||||
|
@ -247,6 +250,7 @@ where
|
|||
/// fully sync'd size.
|
||||
pub fn unpruned_size(&self) -> io::Result<u64> {
|
||||
let total_shift = self.pruned_nodes.get_shift(::std::u64::MAX).unwrap();
|
||||
|
||||
let record_len = 32;
|
||||
let sz = self.hash_file.size()?;
|
||||
Ok(sz / record_len + total_shift)
|
||||
|
@ -281,6 +285,7 @@ where
|
|||
));
|
||||
}
|
||||
self.rm_log.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -334,82 +339,118 @@ where
|
|||
return Ok(false);
|
||||
}
|
||||
|
||||
// 0. validate none of the nodes in the rm log are in the prune list (to
|
||||
// avoid accidental double compaction)
|
||||
for pos in &self.rm_log.removed[..] {
|
||||
if let None = self.pruned_nodes.pruned_pos(pos.0) {
|
||||
// TODO we likely can recover from this by directly jumping to 3
|
||||
error!(
|
||||
LOGGER,
|
||||
"The remove log contains nodes that are already in the pruned \
|
||||
list, a previous compaction likely failed."
|
||||
);
|
||||
return Ok(false);
|
||||
}
|
||||
}
|
||||
|
||||
// 1. save hash file to a compact copy, skipping data that's in the
|
||||
// remove list
|
||||
// Paths for tmp hash and data files.
|
||||
let tmp_prune_file_hash = format!("{}/{}.hashprune", self.data_dir, PMMR_HASH_FILE);
|
||||
let record_len = 32;
|
||||
let to_rm = filter_map_vec!(self.rm_log.removed, |&(pos, idx)| if idx < cutoff_index {
|
||||
let shift = self.pruned_nodes.get_shift(pos);
|
||||
Some((pos - 1 - shift.unwrap()) * record_len)
|
||||
} else {
|
||||
None
|
||||
});
|
||||
self.hash_file
|
||||
.save_prune(tmp_prune_file_hash.clone(), to_rm, record_len, &prune_noop)?;
|
||||
// let tmp_prune_file_data = format!("{}/{}.dataprune", self.data_dir,
|
||||
// PMMR_DATA_FILE);
|
||||
|
||||
// 2. And the same with the data file
|
||||
let tmp_prune_file_data = format!("{}/{}.dataprune", self.data_dir, PMMR_DATA_FILE);
|
||||
let record_len = T::len() as u64;
|
||||
let to_rm = filter_map_vec!(self.rm_log.removed, |&(pos, idx)| {
|
||||
if pmmr::bintree_postorder_height(pos) == 0 && idx < cutoff_index {
|
||||
let shift = self.pruned_nodes.get_leaf_shift(pos).unwrap();
|
||||
let pos = pmmr::n_leaves(pos as u64);
|
||||
Some((pos - 1 - shift) * record_len)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
self.data_file
|
||||
.save_prune(tmp_prune_file_data.clone(), to_rm, record_len, prune_cb)?;
|
||||
// Pos we want to get rid of.
|
||||
// Filtered by cutoff index.
|
||||
let rm_pre_cutoff = self.rm_log.removed_pre_cutoff(cutoff_index);
|
||||
// Filtered to exclude the subtree "roots".
|
||||
let pos_to_rm = removed_excl_roots(rm_pre_cutoff.clone());
|
||||
// Filtered for leaves only.
|
||||
// let leaf_pos_to_rm = removed_leaves(pos_to_rm.clone());
|
||||
|
||||
// 3. update the prune list and save it in place
|
||||
for &(rm_pos, idx) in &self.rm_log.removed[..] {
|
||||
if idx < cutoff_index {
|
||||
self.pruned_nodes.add(rm_pos);
|
||||
}
|
||||
// 1. Save compact copy of the hash file, skipping removed data.
|
||||
{
|
||||
let record_len = 32;
|
||||
|
||||
let off_to_rm = pos_to_rm
|
||||
.iter()
|
||||
.map(|&pos| {
|
||||
let shift = self.pruned_nodes.get_shift(pos);
|
||||
(pos - 1 - shift.unwrap()) * record_len
|
||||
})
|
||||
.collect();
|
||||
|
||||
self.hash_file.save_prune(
|
||||
tmp_prune_file_hash.clone(),
|
||||
off_to_rm,
|
||||
record_len,
|
||||
&prune_noop,
|
||||
)?;
|
||||
}
|
||||
write_vec(
|
||||
format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE),
|
||||
&self.pruned_nodes.pruned_nodes,
|
||||
)?;
|
||||
|
||||
// 4. move the compact copy of hashes to the hash file and re-open it
|
||||
// 2. Save compact copy of the data file, skipping removed leaves.
|
||||
// {
|
||||
// let record_len = T::len() as u64;
|
||||
//
|
||||
// let off_to_rm = leaf_pos_to_rm
|
||||
// .iter()
|
||||
// .map(|pos| {
|
||||
// let shift = self.pruned_nodes.get_leaf_shift(*pos);
|
||||
// (pos - 1 - shift.unwrap()) * record_len
|
||||
// })
|
||||
// .collect::<Vec<_>>();
|
||||
//
|
||||
// println!("compacting the data file: pos {:?}, offs {:?}", leaf_pos_to_rm,
|
||||
// off_to_rm);
|
||||
//
|
||||
// self.data_file.save_prune(
|
||||
// tmp_prune_file_data.clone(),
|
||||
// off_to_rm,
|
||||
// record_len,
|
||||
// prune_cb,
|
||||
// )?;
|
||||
// }
|
||||
|
||||
// 3. Update the prune list and save it in place.
|
||||
{
|
||||
for &pos in &rm_pre_cutoff {
|
||||
self.pruned_nodes.add(pos);
|
||||
}
|
||||
|
||||
write_vec(
|
||||
format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE),
|
||||
&self.pruned_nodes.pruned_nodes,
|
||||
)?;
|
||||
}
|
||||
|
||||
// 4. Rename the compact copy of hash file and reopen it.
|
||||
fs::rename(
|
||||
tmp_prune_file_hash.clone(),
|
||||
format!("{}/{}", self.data_dir, PMMR_HASH_FILE),
|
||||
)?;
|
||||
self.hash_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_HASH_FILE), 0)?;
|
||||
|
||||
// 5. and the same with the data file
|
||||
fs::rename(
|
||||
tmp_prune_file_data.clone(),
|
||||
format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
|
||||
)?;
|
||||
self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE), 0)?;
|
||||
// 5. Rename the compact copy of the data file and reopen it.
|
||||
// fs::rename(
|
||||
// tmp_prune_file_data.clone(),
|
||||
// format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
|
||||
// )?;
|
||||
// self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir,
|
||||
// PMMR_DATA_FILE), 0)?;
|
||||
|
||||
// 6. truncate the rm log
|
||||
self.rm_log.removed = self.rm_log
|
||||
// 6. Truncate the rm log based on pos removed.
|
||||
// Excluding roots which remain in rm log.
|
||||
self.rm_log
|
||||
.removed
|
||||
.iter()
|
||||
.filter(|&&(_, idx)| idx >= cutoff_index)
|
||||
.map(|x| *x)
|
||||
.collect();
|
||||
.retain(|&(pos, _)| !pos_to_rm.binary_search(&&pos).is_ok());
|
||||
self.rm_log.flush()?;
|
||||
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
|
||||
/// Filter remove list to exclude roots.
|
||||
/// We want to keep roots around so we have hashes for Merkle proofs.
|
||||
fn removed_excl_roots(removed: Vec<u64>) -> Vec<u64> {
|
||||
removed
|
||||
.iter()
|
||||
.filter(|&pos| {
|
||||
let (parent_pos, _, _) = family(*pos);
|
||||
removed.binary_search(&parent_pos).is_ok()
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Filter remove list to only include leaf positions.
|
||||
fn removed_leaves(removed: Vec<u64>) -> Vec<u64> {
|
||||
removed
|
||||
.iter()
|
||||
.filter(|&pos| pmmr::is_leaf(*pos))
|
||||
.cloned()
|
||||
.collect()
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ use libc::{ftruncate as ftruncate64, off_t as off64_t};
|
|||
|
||||
use core::ser;
|
||||
|
||||
/// Noop
|
||||
/// A no-op function for doing nothing with some pruned data.
|
||||
pub fn prune_noop(_pruned_data: &[u8]) {}
|
||||
|
||||
/// Wrapper for a file that can be read at any position (random read) but for
|
||||
|
@ -163,41 +163,46 @@ impl AppendOnlyFile {
|
|||
where
|
||||
T: Fn(&[u8]),
|
||||
{
|
||||
let mut reader = File::open(self.path.clone())?;
|
||||
let mut writer = File::create(target)?;
|
||||
if prune_offs.is_empty() {
|
||||
fs::copy(self.path.clone(), target.clone())?;
|
||||
Ok(())
|
||||
} else {
|
||||
let mut reader = File::open(self.path.clone())?;
|
||||
let mut writer = File::create(target.clone())?;
|
||||
|
||||
// align the buffer on prune_len to avoid misalignments
|
||||
let mut buf = vec![0; (prune_len * 256) as usize];
|
||||
let mut read = 0;
|
||||
let mut prune_pos = 0;
|
||||
loop {
|
||||
// fill our buffer
|
||||
let len = match reader.read(&mut buf) {
|
||||
Ok(0) => return Ok(()),
|
||||
Ok(len) => len,
|
||||
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
} as u64;
|
||||
// align the buffer on prune_len to avoid misalignments
|
||||
let mut buf = vec![0; (prune_len * 256) as usize];
|
||||
let mut read = 0;
|
||||
let mut prune_pos = 0;
|
||||
loop {
|
||||
// fill our buffer
|
||||
let len = match reader.read(&mut buf) {
|
||||
Ok(0) => return Ok(()),
|
||||
Ok(len) => len,
|
||||
Err(ref e) if e.kind() == ErrorKind::Interrupted => continue,
|
||||
Err(e) => return Err(e),
|
||||
} as u64;
|
||||
|
||||
// write the buffer, except if we prune offsets in the current span,
|
||||
// in which case we skip
|
||||
let mut buf_start = 0;
|
||||
while prune_offs[prune_pos] >= read && prune_offs[prune_pos] < read + len {
|
||||
let prune_at = prune_offs[prune_pos] as usize;
|
||||
if prune_at != buf_start {
|
||||
writer.write_all(&buf[buf_start..prune_at])?;
|
||||
} else {
|
||||
prune_cb(&buf[buf_start..prune_at]);
|
||||
}
|
||||
buf_start = prune_at + (prune_len as usize);
|
||||
if prune_offs.len() > prune_pos + 1 {
|
||||
prune_pos += 1;
|
||||
} else {
|
||||
break;
|
||||
// write the buffer, except if we prune offsets in the current span,
|
||||
// in which case we skip
|
||||
let mut buf_start = 0;
|
||||
while prune_offs[prune_pos] >= read && prune_offs[prune_pos] < read + len {
|
||||
let prune_at = prune_offs[prune_pos] as usize;
|
||||
if prune_at != buf_start {
|
||||
writer.write_all(&buf[buf_start..prune_at])?;
|
||||
} else {
|
||||
prune_cb(&buf[buf_start..prune_at]);
|
||||
}
|
||||
buf_start = prune_at + (prune_len as usize);
|
||||
if prune_offs.len() > prune_pos + 1 {
|
||||
prune_pos += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
writer.write_all(&mut buf[buf_start..(len as usize)])?;
|
||||
read += len;
|
||||
}
|
||||
writer.write_all(&mut buf[buf_start..(len as usize)])?;
|
||||
read += len;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -333,6 +338,23 @@ impl RemoveLog {
|
|||
pub fn len(&self) -> usize {
|
||||
self.removed.len()
|
||||
}
|
||||
|
||||
/// Return vec of pos for removed elements before the provided cutoff index.
|
||||
/// Useful for when we prune and compact an MMR.
|
||||
pub fn removed_pre_cutoff(&self, cutoff_idx: u32) -> Vec<u64> {
|
||||
self.removed
|
||||
.iter()
|
||||
.filter_map(
|
||||
|&(pos, idx)| {
|
||||
if idx < cutoff_idx {
|
||||
Some(pos)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
},
|
||||
)
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
fn include_tuple(v: &Vec<(u64, u32)>, e: u64) -> bool {
|
||||
|
|
|
@ -21,7 +21,7 @@ use std::fs;
|
|||
|
||||
use core::ser::*;
|
||||
use core::core::pmmr::{Backend, PMMR};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::hash::Hash;
|
||||
use store::types::prune_noop;
|
||||
|
||||
#[test]
|
||||
|
@ -56,6 +56,74 @@ fn pmmr_append() {
|
|||
teardown(data_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pmmr_compact_leaf_sibling() {
|
||||
let (data_dir, elems) = setup("compact_leaf_sibling");
|
||||
|
||||
// setup the mmr store with all elements
|
||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
||||
let mmr_size = load(0, &elems[..], &mut backend);
|
||||
backend.sync().unwrap();
|
||||
|
||||
// On far left of the MMR -
|
||||
// pos 1 and 2 are leaves (and siblings)
|
||||
// the parent is pos 3
|
||||
|
||||
let (pos_1_hash, pos_2_hash, pos_3_hash) = {
|
||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
(
|
||||
pmmr.get(1, false).unwrap().0,
|
||||
pmmr.get(2, false).unwrap().0,
|
||||
pmmr.get(3, false).unwrap().0,
|
||||
)
|
||||
};
|
||||
|
||||
// prune pos 1
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
|
||||
// prune pos 8 as well to push the remove list past the cutoff
|
||||
pmmr.prune(8, 1).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
|
||||
// // check pos 1, 2, 3 are in the state we expect after pruning
|
||||
{
|
||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
|
||||
// check that pos 1 is "removed"
|
||||
assert_eq!(pmmr.get(1, false), None);
|
||||
|
||||
// check that pos 2 and 3 are unchanged
|
||||
assert_eq!(pmmr.get(2, false).unwrap().0, pos_2_hash);
|
||||
assert_eq!(pmmr.get(3, false).unwrap().0, pos_3_hash);
|
||||
}
|
||||
|
||||
// check we can still retrieve the "removed" element at pos 1
|
||||
// from the backend hash file.
|
||||
assert_eq!(backend.get_from_file(1).unwrap(), pos_1_hash);
|
||||
|
||||
// aggressively compact the PMMR files
|
||||
backend.check_compact(1, 2, &prune_noop).unwrap();
|
||||
|
||||
// check pos 1, 2, 3 are in the state we expect after compacting
|
||||
{
|
||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
|
||||
// check that pos 1 is "removed"
|
||||
assert_eq!(pmmr.get(1, false), None);
|
||||
|
||||
// check that pos 2 and 3 are unchanged
|
||||
assert_eq!(pmmr.get(2, false).unwrap().0, pos_2_hash);
|
||||
assert_eq!(pmmr.get(3, false).unwrap().0, pos_3_hash);
|
||||
}
|
||||
|
||||
// Check we can still retrieve the "removed" hash at pos 1 from the hash file.
|
||||
// It should still be available even after pruning and compacting.
|
||||
assert_eq!(backend.get_from_file(1).unwrap(), pos_1_hash);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pmmr_prune_compact() {
|
||||
let (data_dir, elems) = setup("prune_compact");
|
||||
|
@ -66,11 +134,10 @@ fn pmmr_prune_compact() {
|
|||
backend.sync().unwrap();
|
||||
|
||||
// save the root
|
||||
let root: Hash;
|
||||
{
|
||||
let root = {
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
root = pmmr.root();
|
||||
}
|
||||
pmmr.root()
|
||||
};
|
||||
|
||||
// pruning some choice nodes
|
||||
{
|
||||
|
@ -86,10 +153,9 @@ fn pmmr_prune_compact() {
|
|||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
assert_eq!(root, pmmr.root());
|
||||
// check we can still retrieve same element from leaf index 2
|
||||
assert_eq!(
|
||||
pmmr.get(2, true).unwrap().1.unwrap(),
|
||||
TestElem([0, 0, 0, 2])
|
||||
);
|
||||
assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem(2));
|
||||
// and the same for leaf index 7
|
||||
assert_eq!(pmmr.get(11, true).unwrap().1.unwrap(), TestElem(7));
|
||||
}
|
||||
|
||||
// compact
|
||||
|
@ -99,14 +165,8 @@ fn pmmr_prune_compact() {
|
|||
{
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
assert_eq!(root, pmmr.root());
|
||||
assert_eq!(
|
||||
pmmr.get(2, true).unwrap().1.unwrap(),
|
||||
TestElem([0, 0, 0, 2])
|
||||
);
|
||||
assert_eq!(
|
||||
pmmr.get(11, true).unwrap().1.unwrap(),
|
||||
TestElem([0, 0, 0, 7])
|
||||
);
|
||||
assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem(2));
|
||||
assert_eq!(pmmr.get(11, true).unwrap().1.unwrap(), TestElem(7));
|
||||
}
|
||||
|
||||
teardown(data_dir);
|
||||
|
@ -116,26 +176,47 @@ fn pmmr_prune_compact() {
|
|||
fn pmmr_reload() {
|
||||
let (data_dir, elems) = setup("reload");
|
||||
|
||||
// set everything up with a first backend
|
||||
let mmr_size: u64;
|
||||
let root: Hash;
|
||||
// set everything up with an initial backend
|
||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
||||
|
||||
let mmr_size = load(0, &elems[..], &mut backend);
|
||||
|
||||
// retrieve entries from the hash file for comparison later
|
||||
let (pos_3_hash, _) = backend.get(3, false).unwrap();
|
||||
let (pos_4_hash, _) = backend.get(4, false).unwrap();
|
||||
let (pos_5_hash, _) = backend.get(5, false).unwrap();
|
||||
|
||||
// save the root
|
||||
let root = {
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.root()
|
||||
};
|
||||
|
||||
{
|
||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
||||
mmr_size = load(0, &elems[..], &mut backend);
|
||||
backend.sync().unwrap();
|
||||
|
||||
// save the root and prune some nodes so we have prune data
|
||||
// prune a node so we have prune data
|
||||
{
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.dump(false);
|
||||
root = pmmr.root();
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
|
||||
// now check and compact the backend
|
||||
backend.check_compact(1, 2, &prune_noop).unwrap();
|
||||
backend.sync().unwrap();
|
||||
|
||||
// prune another node to force compact to actually do something
|
||||
{
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(4, 1).unwrap();
|
||||
pmmr.prune(2, 1).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
|
||||
backend.check_compact(1, 2, &prune_noop).unwrap();
|
||||
backend.sync().unwrap();
|
||||
|
||||
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
||||
|
||||
// prune some more to get rm log data
|
||||
|
@ -147,16 +228,39 @@ fn pmmr_reload() {
|
|||
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
||||
}
|
||||
|
||||
// create a new backend and check everything is kosher
|
||||
// create a new backend referencing the data files
|
||||
// and check everything still works as expected
|
||||
{
|
||||
let mut backend: store::pmmr::PMMRBackend<TestElem> =
|
||||
store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
||||
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
||||
{
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
assert_eq!(root, pmmr.root());
|
||||
}
|
||||
|
||||
// pos 1 and pos 2 are both removed (via parent pos 3 in prune list)
|
||||
assert_eq!(backend.get(1, false), None);
|
||||
assert_eq!(backend.get(2, false), None);
|
||||
|
||||
// pos 3 is removed (via prune list)
|
||||
assert_eq!(backend.get(3, false), None);
|
||||
|
||||
// pos 4 is removed (via prune list)
|
||||
assert_eq!(backend.get(4, false), None);
|
||||
// pos 5 is removed (via rm_log)
|
||||
assert_eq!(backend.get(5, false), None);
|
||||
|
||||
// now check contents of the hash file
|
||||
// pos 1 and pos 2 are no longer in the hash file
|
||||
assert_eq!(backend.get_from_file(1), None);
|
||||
assert_eq!(backend.get_from_file(2), None);
|
||||
|
||||
// pos 3 is still in there
|
||||
assert_eq!(backend.get_from_file(3), Some(pos_3_hash));
|
||||
|
||||
// pos 4 and pos 5 are also still in there
|
||||
assert_eq!(backend.get_from_file(4), Some(pos_4_hash));
|
||||
assert_eq!(backend.get_from_file(5), Some(pos_5_hash));
|
||||
}
|
||||
|
||||
teardown(data_dir);
|
||||
|
@ -222,48 +326,160 @@ fn pmmr_rewind() {
|
|||
teardown(data_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pmmr_compact_single_leaves() {
|
||||
let (data_dir, elems) = setup("compact_single_leaves");
|
||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap();
|
||||
let mmr_size = load(0, &elems[0..5], &mut backend);
|
||||
backend.sync().unwrap();
|
||||
|
||||
{
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
pmmr.prune(4, 1).unwrap();
|
||||
}
|
||||
|
||||
backend.sync().unwrap();
|
||||
|
||||
// compact
|
||||
backend.check_compact(2, 2, &prune_noop).unwrap();
|
||||
|
||||
{
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(2, 2).unwrap();
|
||||
pmmr.prune(5, 2).unwrap();
|
||||
}
|
||||
|
||||
backend.sync().unwrap();
|
||||
|
||||
// compact
|
||||
backend.check_compact(2, 3, &prune_noop).unwrap();
|
||||
|
||||
teardown(data_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pmmr_compact_entire_peak() {
|
||||
let (data_dir, elems) = setup("compact_entire_peak");
|
||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap();
|
||||
let mmr_size = load(0, &elems[0..5], &mut backend);
|
||||
backend.sync().unwrap();
|
||||
|
||||
let pos_7 = backend.get(7, true).unwrap();
|
||||
let pos_7_hash = backend.get_from_file(7).unwrap();
|
||||
assert_eq!(pos_7.0, pos_7_hash);
|
||||
|
||||
let pos_8 = backend.get(8, true).unwrap();
|
||||
let pos_8_hash = backend.get_from_file(8).unwrap();
|
||||
assert_eq!(pos_8.0, pos_8_hash);
|
||||
|
||||
// prune all leaves under the peak at pos 7
|
||||
{
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
pmmr.prune(2, 1).unwrap();
|
||||
pmmr.prune(4, 1).unwrap();
|
||||
pmmr.prune(5, 1).unwrap();
|
||||
}
|
||||
|
||||
backend.sync().unwrap();
|
||||
|
||||
// compact
|
||||
backend.check_compact(2, 2, &prune_noop).unwrap();
|
||||
|
||||
// now check we have pruned up to and including the peak at pos 7
|
||||
// hash still available in underlying hash file
|
||||
assert_eq!(backend.get(7, false), None);
|
||||
assert_eq!(backend.get_from_file(7), Some(pos_7_hash));
|
||||
|
||||
// now check we still have subsequent hash and data where we expect
|
||||
assert_eq!(backend.get(8, true), Some(pos_8));
|
||||
assert_eq!(backend.get_from_file(8), Some(pos_8_hash));
|
||||
|
||||
teardown(data_dir);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pmmr_compact_horizon() {
|
||||
let (data_dir, elems) = setup("compact_horizon");
|
||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap();
|
||||
let mmr_size = load(0, &elems[..], &mut backend);
|
||||
backend.sync().unwrap();
|
||||
|
||||
// 0010012001001230
|
||||
// 9 leaves
|
||||
// data file compaction commented out for now
|
||||
// assert_eq!(backend.data_size().unwrap(), 9);
|
||||
assert_eq!(backend.data_size().unwrap(), 19);
|
||||
assert_eq!(backend.hash_size().unwrap(), 35);
|
||||
|
||||
let pos_3 = backend.get(3, false).unwrap();
|
||||
let pos_3_hash = backend.get_from_file(3).unwrap();
|
||||
assert_eq!(pos_3.0, pos_3_hash);
|
||||
|
||||
let pos_6 = backend.get(6, false).unwrap();
|
||||
let pos_6_hash = backend.get_from_file(6).unwrap();
|
||||
assert_eq!(pos_6.0, pos_6_hash);
|
||||
|
||||
let pos_7 = backend.get(7, false).unwrap();
|
||||
let pos_7_hash = backend.get_from_file(7).unwrap();
|
||||
assert_eq!(pos_7.0, pos_7_hash);
|
||||
|
||||
let pos_8 = backend.get(8, true).unwrap();
|
||||
let pos_8_hash = backend.get_from_file(8).unwrap();
|
||||
assert_eq!(pos_8.0, pos_8_hash);
|
||||
|
||||
let pos_11 = backend.get(11, true).unwrap();
|
||||
let pos_11_hash = backend.get_from_file(11).unwrap();
|
||||
assert_eq!(pos_11.0, pos_11_hash);
|
||||
|
||||
let root: Hash;
|
||||
{
|
||||
// setup the mmr store with all elements
|
||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
||||
let mmr_size = load(0, &elems[..], &mut backend);
|
||||
backend.sync().unwrap();
|
||||
|
||||
// save the root
|
||||
{
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
root = pmmr.root();
|
||||
}
|
||||
|
||||
// pruning some choice nodes with an increasing block height
|
||||
{
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
pmmr.prune(2, 2).unwrap();
|
||||
pmmr.prune(4, 3).unwrap();
|
||||
pmmr.prune(5, 4).unwrap();
|
||||
pmmr.prune(4, 1).unwrap();
|
||||
pmmr.prune(5, 2).unwrap();
|
||||
pmmr.prune(1, 3).unwrap();
|
||||
pmmr.prune(2, 4).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
|
||||
// check we can read hashes and data correctly after pruning
|
||||
{
|
||||
assert_eq!(backend.get(3, false), None);
|
||||
assert_eq!(backend.get_from_file(3), Some(pos_3_hash));
|
||||
|
||||
assert_eq!(backend.get(6, false), None);
|
||||
assert_eq!(backend.get_from_file(6), Some(pos_6_hash));
|
||||
|
||||
assert_eq!(backend.get(7, true), None);
|
||||
assert_eq!(backend.get_from_file(7), Some(pos_7_hash));
|
||||
|
||||
assert_eq!(backend.get(8, true), Some(pos_8));
|
||||
assert_eq!(backend.get_from_file(8), Some(pos_8_hash));
|
||||
|
||||
assert_eq!(backend.get(11, true), Some(pos_11));
|
||||
assert_eq!(backend.get_from_file(11), Some(pos_11_hash));
|
||||
}
|
||||
|
||||
// compact
|
||||
backend.check_compact(2, 3, &prune_noop).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
|
||||
// recheck stored data
|
||||
{
|
||||
// recreate backend
|
||||
let mut backend =
|
||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
||||
// 9 elements total, minus 2 compacted
|
||||
assert_eq!(backend.data_size().unwrap(), 7);
|
||||
// 15 nodes total, 2 pruned and compacted
|
||||
assert_eq!(backend.hash_size().unwrap(), 13);
|
||||
// check we can read a hash by pos correctly after compaction
|
||||
{
|
||||
assert_eq!(backend.get(3, false), None);
|
||||
assert_eq!(backend.get_from_file(3), Some(pos_3_hash));
|
||||
|
||||
// compact some more
|
||||
backend.check_compact(1, 5, &prune_noop).unwrap();
|
||||
assert_eq!(backend.get(6, false), None);
|
||||
assert_eq!(backend.get_from_file(6), Some(pos_6_hash));
|
||||
|
||||
assert_eq!(backend.get(7, true), None);
|
||||
assert_eq!(backend.get_from_file(7), Some(pos_7_hash));
|
||||
|
||||
assert_eq!(backend.get(8, true), Some(pos_8));
|
||||
assert_eq!(backend.get_from_file(8), Some(pos_8_hash));
|
||||
}
|
||||
}
|
||||
|
||||
// recheck stored data
|
||||
|
@ -271,10 +487,50 @@ fn pmmr_compact_horizon() {
|
|||
// recreate backend
|
||||
let backend =
|
||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
||||
// 9 elements total, minus 4 compacted
|
||||
assert_eq!(backend.data_size().unwrap(), 5);
|
||||
// 15 nodes total, 6 pruned and compacted
|
||||
assert_eq!(backend.hash_size().unwrap(), 9);
|
||||
|
||||
assert_eq!(backend.data_size().unwrap(), 19);
|
||||
assert_eq!(backend.hash_size().unwrap(), 33);
|
||||
|
||||
// check we can read a hash by pos correctly from recreated backend
|
||||
assert_eq!(backend.get(7, true), None);
|
||||
assert_eq!(backend.get_from_file(7), Some(pos_7_hash));
|
||||
|
||||
assert_eq!(backend.get(8, true), Some(pos_8));
|
||||
assert_eq!(backend.get_from_file(8), Some(pos_8_hash));
|
||||
}
|
||||
|
||||
{
|
||||
let mut backend =
|
||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
||||
|
||||
{
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
|
||||
pmmr.prune(8, 5).unwrap();
|
||||
pmmr.prune(9, 5).unwrap();
|
||||
}
|
||||
|
||||
// compact some more
|
||||
backend.check_compact(1, 6, &prune_noop).unwrap();
|
||||
}
|
||||
|
||||
// recheck stored data
|
||||
{
|
||||
// recreate backend
|
||||
let backend =
|
||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
||||
|
||||
// 0010012001001230
|
||||
|
||||
assert_eq!(backend.data_size().unwrap(), 19);
|
||||
assert_eq!(backend.hash_size().unwrap(), 29);
|
||||
|
||||
// check we can read a hash by pos correctly from recreated backend
|
||||
assert_eq!(backend.get(7, true), None);
|
||||
assert_eq!(backend.get_from_file(7), Some(pos_7_hash));
|
||||
|
||||
assert_eq!(backend.get(11, true), Some(pos_11));
|
||||
assert_eq!(backend.get_from_file(11), Some(pos_11_hash));
|
||||
}
|
||||
|
||||
teardown(data_dir);
|
||||
|
@ -286,17 +542,10 @@ fn setup(tag: &str) -> (String, Vec<TestElem>) {
|
|||
let data_dir = format!("./target/{}.{}-{}", t.sec, t.nsec, tag);
|
||||
fs::create_dir_all(data_dir.clone()).unwrap();
|
||||
|
||||
let elems = vec![
|
||||
TestElem([0, 0, 0, 1]),
|
||||
TestElem([0, 0, 0, 2]),
|
||||
TestElem([0, 0, 0, 3]),
|
||||
TestElem([0, 0, 0, 4]),
|
||||
TestElem([0, 0, 0, 5]),
|
||||
TestElem([0, 0, 0, 6]),
|
||||
TestElem([0, 0, 0, 7]),
|
||||
TestElem([0, 0, 0, 8]),
|
||||
TestElem([1, 0, 0, 0]),
|
||||
];
|
||||
let mut elems = vec![];
|
||||
for x in 1..20 {
|
||||
elems.push(TestElem(x));
|
||||
}
|
||||
(data_dir, elems)
|
||||
}
|
||||
|
||||
|
@ -313,29 +562,21 @@ fn load(pos: u64, elems: &[TestElem], backend: &mut store::pmmr::PMMRBackend<Tes
|
|||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
struct TestElem([u32; 4]);
|
||||
struct TestElem(u32);
|
||||
|
||||
impl PMMRable for TestElem {
|
||||
fn len() -> usize {
|
||||
16
|
||||
4
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for TestElem {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
try!(writer.write_u32(self.0[0]));
|
||||
try!(writer.write_u32(self.0[1]));
|
||||
try!(writer.write_u32(self.0[2]));
|
||||
writer.write_u32(self.0[3])
|
||||
writer.write_u32(self.0)
|
||||
}
|
||||
}
|
||||
impl Readable for TestElem {
|
||||
fn read(reader: &mut Reader) -> Result<TestElem, Error> {
|
||||
Ok(TestElem([
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
]))
|
||||
Ok(TestElem(reader.read_u32()?))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -374,6 +374,14 @@ impl OutputData {
|
|||
return false;
|
||||
} else if self.status == OutputStatus::Unconfirmed && self.is_coinbase {
|
||||
return false;
|
||||
} else if self.is_coinbase && self.block.is_none() {
|
||||
// if we do not have a block hash for coinbase output we cannot spent it
|
||||
// block index got compacted before we refreshed our wallet?
|
||||
return false;
|
||||
} else if self.is_coinbase && self.merkle_proof.is_none() {
|
||||
// if we do not have a Merkle proof for coinbase output we cannot spent it
|
||||
// block index got compacted before we refreshed our wallet?
|
||||
return false;
|
||||
} else if self.lock_height > current_height {
|
||||
return false;
|
||||
} else if self.status == OutputStatus::Unspent
|
||||
|
|
Loading…
Reference in a new issue