fix misbehaving simulnet fastsync test (#1227)

* fix misbehaving simulnet fastsync test
cleanup redundant cutoff vs bitmap params is rewind and check_compact

* make sure we do not verify full kernel history on a writeable txhashset extension
rework simulnet simulate_fast_sync test to be more robust

* fixup store tests

* sleep for a bit longer to give nodes time to update
their sync_state correctly

* tweak timing of simulate_block_propagation
This commit is contained in:
Antioch Peverell 2018-07-08 17:37:09 +01:00 committed by GitHub
parent 5c142864ff
commit 980378eb65
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 134 additions and 96 deletions

View file

@ -425,15 +425,17 @@ impl Chain {
return Ok(()); return Ok(());
} }
// We want to validate the full kernel history here for completeness.
let skip_kernel_hist = false;
let mut txhashset = self.txhashset.write().unwrap(); let mut txhashset = self.txhashset.write().unwrap();
// Now create an extension from the txhashset and validate against the // Now create an extension from the txhashset and validate against the
// latest block header. Rewind the extension to the specified header to // latest block header. Rewind the extension to the specified header to
// ensure the view is consistent. // ensure the view is consistent.
txhashset::extending_readonly(&mut txhashset, |extension| { txhashset::extending_readonly(&mut txhashset, |extension| {
// TODO - is this rewind guaranteed to be redundant now?
extension.rewind(&header, &header)?; extension.rewind(&header, &header)?;
extension.validate(&header, skip_rproofs, &NoStatus)?; extension.validate(&header, skip_rproofs, skip_kernel_hist, &NoStatus)?;
Ok(()) Ok(())
}) })
} }
@ -546,22 +548,25 @@ impl Chain {
// validate against a read-only extension first (some of the validation // validate against a read-only extension first (some of the validation
// runs additional rewinds) // runs additional rewinds)
debug!(LOGGER, "chain: txhashset_write: rewinding and validating (read-only)");
txhashset::extending_readonly(&mut txhashset, |extension| { txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&header, &header)?; extension.rewind(&header, &header)?;
extension.validate(&header, false, status) extension.validate(&header, false, false, status)?;
Ok(())
})?; })?;
// all good, prepare a new batch and update all the required records // all good, prepare a new batch and update all the required records
debug!(LOGGER, "chain: txhashset_write: rewinding and validating a 2nd time (writeable)");
let mut batch = self.store.batch()?; let mut batch = self.store.batch()?;
txhashset::extending(&mut txhashset, &mut batch, |extension| { txhashset::extending(&mut txhashset, &mut batch, |extension| {
// TODO do we need to rewind here? We have no blocks to rewind
// (and we need them for the pos to unremove)
extension.rewind(&header, &header)?; extension.rewind(&header, &header)?;
extension.validate(&header, false, status)?; extension.validate(&header, false, true, status)?;
extension.rebuild_index()?; extension.rebuild_index()?;
Ok(()) Ok(())
})?; })?;
debug!(LOGGER, "chain: txhashset_write: finished validating and rebuilding");
status.on_save(); status.on_save();
// replace the chain txhashset with the newly built one // replace the chain txhashset with the newly built one
{ {
@ -578,6 +583,8 @@ impl Chain {
} }
batch.commit()?; batch.commit()?;
debug!(LOGGER, "chain: txhashset_write: finished committing the batch (head etc.)");
self.check_orphans(header.height + 1); self.check_orphans(header.height + 1);
status.on_done(); status.on_done();

View file

@ -233,7 +233,6 @@ impl TxHashSet {
let horizon = current_height.saturating_sub(global::cut_through_horizon().into()); let horizon = current_height.saturating_sub(global::cut_through_horizon().into());
let horizon_header = self.commit_index.get_header_by_height(horizon)?; let horizon_header = self.commit_index.get_header_by_height(horizon)?;
let rewind_add_pos = output_pos_to_rewind(&horizon_header, &head_header)?;
let rewind_rm_pos = let rewind_rm_pos =
input_pos_to_rewind(self.commit_index.clone(), &horizon_header, &head_header)?; input_pos_to_rewind(self.commit_index.clone(), &horizon_header, &head_header)?;
@ -249,14 +248,12 @@ impl TxHashSet {
self.output_pmmr_h.backend.check_compact( self.output_pmmr_h.backend.check_compact(
horizon_header.output_mmr_size, horizon_header.output_mmr_size,
&rewind_add_pos,
&rewind_rm_pos.1, &rewind_rm_pos.1,
clean_output_index, clean_output_index,
)?; )?;
self.rproof_pmmr_h.backend.check_compact( self.rproof_pmmr_h.backend.check_compact(
horizon_header.output_mmr_size, horizon_header.output_mmr_size,
&rewind_add_pos,
&rewind_rm_pos.1, &rewind_rm_pos.1,
&prune_noop, &prune_noop,
)?; )?;
@ -453,14 +450,9 @@ impl<'a> Extension<'a> {
kernel_pos: u64, kernel_pos: u64,
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
) -> Result<(), Error> { ) -> Result<(), Error> {
let latest_output_pos = self.output_pmmr.unpruned_size();
let rewind_add_pos: Bitmap = ((output_pos + 1)..(latest_output_pos + 1))
.map(|x| x as u32)
.collect();
self.rewind_to_pos( self.rewind_to_pos(
output_pos, output_pos,
kernel_pos, kernel_pos,
&rewind_add_pos,
rewind_rm_pos, rewind_rm_pos,
)?; )?;
Ok(()) Ok(())
@ -475,9 +467,7 @@ impl<'a> Extension<'a> {
/// new tx). /// new tx).
pub fn apply_raw_tx(&mut self, tx: &Transaction) -> Result<(), Error> { pub fn apply_raw_tx(&mut self, tx: &Transaction) -> Result<(), Error> {
// This should *never* be called on a writeable extension... // This should *never* be called on a writeable extension...
if !self.rollback { assert!(self.rollback, "applied raw_tx to writeable txhashset extension");
panic!("attempted to apply a raw tx to a writeable txhashset extension");
}
// Checkpoint the MMR positions before we apply the new tx, // Checkpoint the MMR positions before we apply the new tx,
// anything goes wrong we will rewind to these positions. // anything goes wrong we will rewind to these positions.
@ -769,7 +759,6 @@ impl<'a> Extension<'a> {
// undone during rewind). // undone during rewind).
// Rewound output pos will be removed from the MMR. // Rewound output pos will be removed from the MMR.
// Rewound input (spent) pos will be added back to the MMR. // Rewound input (spent) pos will be added back to the MMR.
let rewind_add_pos = output_pos_to_rewind(block_header, head_header)?;
let rewind_rm_pos = let rewind_rm_pos =
input_pos_to_rewind(self.commit_index.clone(), block_header, head_header)?; input_pos_to_rewind(self.commit_index.clone(), block_header, head_header)?;
if !rewind_rm_pos.0 { if !rewind_rm_pos.0 {
@ -780,7 +769,6 @@ impl<'a> Extension<'a> {
self.rewind_to_pos( self.rewind_to_pos(
block_header.output_mmr_size, block_header.output_mmr_size,
block_header.kernel_mmr_size, block_header.kernel_mmr_size,
&rewind_add_pos,
&rewind_rm_pos.1, &rewind_rm_pos.1,
) )
} }
@ -791,7 +779,6 @@ impl<'a> Extension<'a> {
&mut self, &mut self,
output_pos: u64, output_pos: u64,
kernel_pos: u64, kernel_pos: u64,
rewind_add_pos: &Bitmap,
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
) -> Result<(), Error> { ) -> Result<(), Error> {
trace!( trace!(
@ -807,13 +794,13 @@ impl<'a> Extension<'a> {
self.new_output_commits.retain(|_, &mut v| v <= output_pos); self.new_output_commits.retain(|_, &mut v| v <= output_pos);
self.output_pmmr self.output_pmmr
.rewind(output_pos, rewind_add_pos, rewind_rm_pos) .rewind(output_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?; .map_err(&ErrorKind::TxHashSetErr)?;
self.rproof_pmmr self.rproof_pmmr
.rewind(output_pos, rewind_add_pos, rewind_rm_pos) .rewind(output_pos, rewind_rm_pos)
.map_err(&ErrorKind::TxHashSetErr)?; .map_err(&ErrorKind::TxHashSetErr)?;
self.kernel_pmmr self.kernel_pmmr
.rewind(kernel_pos, rewind_add_pos, rewind_rm_pos) .rewind(kernel_pos, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?; .map_err(&ErrorKind::TxHashSetErr)?;
Ok(()) Ok(())
} }
@ -882,6 +869,7 @@ impl<'a> Extension<'a> {
&mut self, &mut self,
header: &BlockHeader, header: &BlockHeader,
skip_rproofs: bool, skip_rproofs: bool,
skip_kernel_hist: bool,
status: &T, status: &T,
) -> Result<((Commitment, Commitment)), Error> ) -> Result<((Commitment, Commitment)), Error>
where where
@ -911,7 +899,9 @@ impl<'a> Extension<'a> {
// Verify kernel roots for all past headers, need to be last as it rewinds // Verify kernel roots for all past headers, need to be last as it rewinds
// a lot without resetting // a lot without resetting
if !skip_kernel_hist {
self.verify_kernel_history(header)?; self.verify_kernel_history(header)?;
}
Ok((output_sum, kernel_sum)) Ok((output_sum, kernel_sum))
} }
@ -1040,14 +1030,15 @@ impl<'a> Extension<'a> {
Ok(()) Ok(())
} }
fn verify_kernel_history(&mut self, header: &BlockHeader) -> Result<(), Error> {
// Special handling to make sure the whole kernel set matches each of its // Special handling to make sure the whole kernel set matches each of its
// roots in each block header, without truncation. We go back header by // roots in each block header, without truncation. We go back header by
// header, rewind and check each root. This fixes a potential weakness in // header, rewind and check each root. This fixes a potential weakness in
// fast sync where a reorg past the horizon could allow a whole rewrite of // fast sync where a reorg past the horizon could allow a whole rewrite of
// the kernel set. // the kernel set.
fn verify_kernel_history(&mut self, header: &BlockHeader) -> Result<(), Error> {
assert!(self.rollback, "verified kernel history on writeable txhashset extension");
let mut current = header.clone(); let mut current = header.clone();
let empty_bitmap = Bitmap::create();
loop { loop {
current = self.commit_index.get_block_header(&current.previous)?; current = self.commit_index.get_block_header(&current.previous)?;
if current.height == 0 { if current.height == 0 {
@ -1055,7 +1046,7 @@ impl<'a> Extension<'a> {
} }
// rewinding kernels only further and further back // rewinding kernels only further and further back
self.kernel_pmmr self.kernel_pmmr
.rewind(current.kernel_mmr_size, &empty_bitmap, &empty_bitmap) .rewind(current.kernel_mmr_size, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?; .map_err(&ErrorKind::TxHashSetErr)?;
if self.kernel_pmmr.root() != current.kernel_root { if self.kernel_pmmr.root() != current.kernel_root {
return Err(ErrorKind::InvalidTxHashSet(format!( return Err(ErrorKind::InvalidTxHashSet(format!(
@ -1096,20 +1087,6 @@ pub fn zip_write(root_dir: String, txhashset_data: File) -> Result<(), Error> {
.map_err(|ze| ErrorKind::Other(ze.to_string()).into()) .map_err(|ze| ErrorKind::Other(ze.to_string()).into())
} }
/// Given a block header to rewind to and the block header at the
/// head of the current chain state, we need to calculate the positions
/// of all outputs we need to "undo" during a rewind.
/// The MMR is append-only so we can simply look for all positions added after
/// the rewind pos.
fn output_pos_to_rewind(
block_header: &BlockHeader,
head_header: &BlockHeader,
) -> Result<Bitmap, Error> {
let marker_to = head_header.output_mmr_size;
let marker_from = block_header.output_mmr_size;
Ok(((marker_from + 1)..=marker_to).map(|x| x as u32).collect())
}
/// Given a block header to rewind to and the block header at the /// Given a block header to rewind to and the block header at the
/// head of the current chain state, we need to calculate the positions /// head of the current chain state, we need to calculate the positions
/// of all inputs (spent outputs) we need to "undo" during a rewind. /// of all inputs (spent outputs) we need to "undo" during a rewind.

View file

@ -49,7 +49,7 @@ pub struct TxHashSetRoots {
/// blockchain tree. References the max height and the latest and previous /// blockchain tree. References the max height and the latest and previous
/// blocks /// blocks
/// for convenience and the total difficulty. /// for convenience and the total difficulty.
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct Tip { pub struct Tip {
/// Height of the tip (max height of the fork) /// Height of the tip (max height of the fork)
pub height: u64, pub height: u64,

View file

@ -67,7 +67,6 @@ where
fn rewind( fn rewind(
&mut self, &mut self,
position: u64, position: u64,
rewind_add_pos: &Bitmap,
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
) -> Result<(), String>; ) -> Result<(), String>;
@ -301,7 +300,6 @@ where
pub fn rewind( pub fn rewind(
&mut self, &mut self,
position: u64, position: u64,
rewind_add_pos: &Bitmap,
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
) -> Result<(), String> { ) -> Result<(), String> {
// Identify which actual position we should rewind to as the provided // Identify which actual position we should rewind to as the provided
@ -312,7 +310,7 @@ where
pos += 1; pos += 1;
} }
self.backend.rewind(pos, rewind_add_pos, rewind_rm_pos)?; self.backend.rewind(pos, rewind_rm_pos)?;
self.last_pos = pos; self.last_pos = pos;
Ok(()) Ok(())
} }

View file

@ -121,7 +121,6 @@ where
fn rewind( fn rewind(
&mut self, &mut self,
position: u64, position: u64,
_rewind_add_pos: &Bitmap,
_rewind_rm_pos: &Bitmap, _rewind_rm_pos: &Bitmap,
) -> Result<(), String> { ) -> Result<(), String> {
self.elems = self.elems[0..(position as usize) + 1].to_vec(); self.elems = self.elems[0..(position as usize) + 1].to_vec();

View file

@ -24,6 +24,7 @@ use core::{core, pow};
use p2p; use p2p;
use pool; use pool;
use store; use store;
use util::LOGGER;
use wallet; use wallet;
/// Error type wrapping underlying module errors. /// Error type wrapping underlying module errors.
@ -259,6 +260,8 @@ impl Default for StratumServerConfig {
#[derive(Debug, Clone, Copy, Eq, PartialEq)] #[derive(Debug, Clone, Copy, Eq, PartialEq)]
#[allow(missing_docs)] #[allow(missing_docs)]
pub enum SyncStatus { pub enum SyncStatus {
/// Initial State (we do not yet know if we are/should be syncing)
Initial,
/// Not syncing /// Not syncing
NoSync, NoSync,
/// Downloading block headers /// Downloading block headers
@ -295,11 +298,12 @@ impl SyncState {
/// Return a new SyncState initialize to NoSync /// Return a new SyncState initialize to NoSync
pub fn new() -> SyncState { pub fn new() -> SyncState {
SyncState { SyncState {
current: RwLock::new(SyncStatus::NoSync), current: RwLock::new(SyncStatus::Initial),
} }
} }
/// Whether the current state matches any active syncing operation /// Whether the current state matches any active syncing operation.
/// Note: This includes our "initial" state.
pub fn is_syncing(&self) -> bool { pub fn is_syncing(&self) -> bool {
*self.current.read().unwrap() != SyncStatus::NoSync *self.current.read().unwrap() != SyncStatus::NoSync
} }
@ -311,7 +315,19 @@ impl SyncState {
/// Update the syncing status /// Update the syncing status
pub fn update(&self, new_status: SyncStatus) { pub fn update(&self, new_status: SyncStatus) {
if self.status() == new_status {
return;
}
let mut status = self.current.write().unwrap(); let mut status = self.current.write().unwrap();
debug!(
LOGGER,
"sync_state: sync_status: {:?} -> {:?}",
*status,
new_status,
);
*status = new_status; *status = new_status;
} }
} }

View file

@ -72,11 +72,19 @@ pub fn run_sync(
.spawn(move || { .spawn(move || {
let mut si = SyncInfo::new(); let mut si = SyncInfo::new();
// initial sleep to give us time to peer with some nodes {
if !skip_sync_wait { // Initial sleep to give us time to peer with some nodes.
// Note: Even if we have "skip_sync_wait" we need to wait a
// short period of time for tests to do the right thing.
let wait_secs = if skip_sync_wait {
3
} else {
30
};
awaiting_peers.store(true, Ordering::Relaxed); awaiting_peers.store(true, Ordering::Relaxed);
let mut n = 0; let mut n = 0;
while peers.more_work_peers().len() < 4 && n < 30 { while peers.more_work_peers().len() < 4 && n < wait_secs {
thread::sleep(Duration::from_secs(1)); thread::sleep(Duration::from_secs(1));
n += 1; n += 1;
} }
@ -305,7 +313,7 @@ fn needs_syncing(
); );
let _ = chain.reset_head(); let _ = chain.reset_head();
return (false, 0); return (false, most_work_height);
} }
} }
} else { } else {

View file

@ -25,6 +25,7 @@ mod framework;
use std::default::Default; use std::default::Default;
use std::{thread, time}; use std::{thread, time};
use core::core::hash::Hashed;
use core::global::{self, ChainTypes}; use core::global::{self, ChainTypes};
use framework::{config, stratum_config, LocalServerContainerConfig, LocalServerContainerPool, use framework::{config, stratum_config, LocalServerContainerConfig, LocalServerContainerPool,
@ -190,7 +191,6 @@ fn simulate_block_propagation() {
// start mining // start mining
servers[0].start_test_miner(None); servers[0].start_test_miner(None);
let _original_height = servers[0].head().height;
// monitor for a change of head on a different server and check whether // monitor for a change of head on a different server and check whether
// chain height has changed // chain height has changed
@ -204,7 +204,7 @@ fn simulate_block_propagation() {
if count == 5 { if count == 5 {
break; break;
} }
thread::sleep(time::Duration::from_millis(100)); thread::sleep(time::Duration::from_millis(1_000));
} }
for n in 0..5 { for n in 0..5 {
servers[n].stop(); servers[n].stop();
@ -228,12 +228,21 @@ fn simulate_full_sync() {
s1.start_test_miner(None); s1.start_test_miner(None);
thread::sleep(time::Duration::from_secs(8)); thread::sleep(time::Duration::from_secs(8));
#[ignore(unused_mut)] // mut needed? let s2 = servers::Server::new(framework::config(1001, "grin-sync", 1000)).unwrap();
let mut conf = framework::config(1001, "grin-sync", 1000);
let s2 = servers::Server::new(conf).unwrap(); // Get the current header from s1.
while s2.head().height < 4 { let s1_header = s1.chain.head_header().unwrap();
thread::sleep(time::Duration::from_millis(100));
// Wait for s2 to sync up to and including the header from s1.
while s2.head().height < s1_header.height {
thread::sleep(time::Duration::from_millis(1_000));
} }
// Confirm both s1 and s2 see a consistent header at that height.
let s2_header = s2.chain.get_block_header(&s1_header.hash()).unwrap();
assert_eq!(s1_header, s2_header);
// Stop our servers cleanly.
s1.stop(); s1.stop();
s2.stop(); s2.stop();
} }
@ -250,19 +259,35 @@ fn simulate_fast_sync() {
let test_name_dir = "grin-fast"; let test_name_dir = "grin-fast";
framework::clean_all_output(test_name_dir); framework::clean_all_output(test_name_dir);
// start s1 and mine enough blocks to get beyond the fast sync horizon
let s1 = servers::Server::new(framework::config(2000, "grin-fast", 2000)).unwrap(); let s1 = servers::Server::new(framework::config(2000, "grin-fast", 2000)).unwrap();
// mine a few blocks on server 1
s1.start_test_miner(None); s1.start_test_miner(None);
thread::sleep(time::Duration::from_secs(8));
while s1.head().height < 21 {
thread::sleep(time::Duration::from_millis(1_000));
}
let mut conf = config(2001, "grin-fast", 2000); let mut conf = config(2001, "grin-fast", 2000);
conf.archive_mode = Some(false); conf.archive_mode = Some(false);
let s2 = servers::Server::new(conf).unwrap();
while s2.head().height != s2.header_head().height || s2.head().height < 20 {
thread::sleep(time::Duration::from_millis(1000));
}
let _h2 = s2.chain.get_header_by_height(1).unwrap();
let s2 = servers::Server::new(conf).unwrap();
while s2.head().height < 21 {
thread::sleep(time::Duration::from_millis(1_000));
}
// Get the current header from s1.
let s1_header = s1.chain.head_header().unwrap();
// Wait for s2 to sync up to and including the header from s1.
while s2.head().height < s1_header.height {
thread::sleep(time::Duration::from_millis(1_000));
}
// Confirm both s1 and s2 see a consistent header at that height.
let s2_header = s2.chain.get_block_header(&s1_header.hash()).unwrap();
assert_eq!(s1_header, s2_header);
// Stop our servers cleanly.
s1.stop(); s1.stop();
s2.stop(); s2.stop();
} }

View file

@ -87,7 +87,7 @@ fn basic_stratum_server() {
workers.remove(4); workers.remove(4);
// Swallow the genesis block // Swallow the genesis block
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to broadcast thread::sleep(time::Duration::from_secs(5)); // Wait for the server to broadcast
let mut response = String::new(); let mut response = String::new();
for n in 0..workers.len() { for n in 0..workers.len() {
let _result = workers[n].read_line(&mut response); let _result = workers[n].read_line(&mut response);

View file

@ -81,6 +81,7 @@ impl TUIStatusListener for TUIStatusView {
"Waiting for peers".to_string() "Waiting for peers".to_string()
} else { } else {
match stats.sync_status { match stats.sync_status {
SyncStatus::Initial => "Initializing".to_string(),
SyncStatus::NoSync => "Running".to_string(), SyncStatus::NoSync => "Running".to_string(),
SyncStatus::HeaderSync { SyncStatus::HeaderSync {
current_height, current_height,

View file

@ -102,14 +102,20 @@ impl LeafSet {
pub fn removed_pre_cutoff( pub fn removed_pre_cutoff(
&self, &self,
cutoff_pos: u64, cutoff_pos: u64,
rewind_add_pos: &Bitmap,
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
prune_list: &PruneList, prune_list: &PruneList,
) -> Bitmap { ) -> Bitmap {
let mut bitmap = self.bitmap.clone(); let mut bitmap = self.bitmap.clone();
// Now "rewind" using the rewind_add_pos and rewind_rm_pos bitmaps passed in. // First remove pos from leaf_set that were
// added after the point we are rewinding to.
let marker_from = cutoff_pos;
let marker_to = self.bitmap.maximum() as u64;
let rewind_add_pos: Bitmap = ((marker_from + 1)..=marker_to).map(|x| x as u32).collect();
bitmap.andnot_inplace(&rewind_add_pos); bitmap.andnot_inplace(&rewind_add_pos);
// Then add back output pos to the leaf_set
// that were removed.
bitmap.or_inplace(&rewind_rm_pos); bitmap.or_inplace(&rewind_rm_pos);
// Invert bitmap for the leaf pos and return the resulting bitmap. // Invert bitmap for the leaf pos and return the resulting bitmap.
@ -119,10 +125,16 @@ impl LeafSet {
} }
/// Rewinds the leaf_set back to a previous state. /// Rewinds the leaf_set back to a previous state.
pub fn rewind(&mut self, rewind_add_pos: &Bitmap, rewind_rm_pos: &Bitmap) { /// Removes all pos after the cutoff.
/// Adds back all pos in rewind_rm_pos.
pub fn rewind(&mut self, cutoff_pos: u64, rewind_rm_pos: &Bitmap) {
// First remove pos from leaf_set that were // First remove pos from leaf_set that were
// added after the point we are rewinding to. // added after the point we are rewinding to.
let marker_from = cutoff_pos;
let marker_to = self.bitmap.maximum() as u64;
let rewind_add_pos: Bitmap = ((marker_from + 1)..=marker_to).map(|x| x as u32).collect();
self.bitmap.andnot_inplace(&rewind_add_pos); self.bitmap.andnot_inplace(&rewind_add_pos);
// Then add back output pos to the leaf_set // Then add back output pos to the leaf_set
// that were removed. // that were removed.
self.bitmap.or_inplace(&rewind_rm_pos); self.bitmap.or_inplace(&rewind_rm_pos);

View file

@ -152,12 +152,11 @@ where
fn rewind( fn rewind(
&mut self, &mut self,
position: u64, position: u64,
rewind_add_pos: &Bitmap,
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
) -> Result<(), String> { ) -> Result<(), String> {
// First rewind the leaf_set with the necessary added and removed positions. // First rewind the leaf_set with the necessary added and removed positions.
if self.prunable { if self.prunable {
self.leaf_set.rewind(rewind_add_pos, rewind_rm_pos); self.leaf_set.rewind(position, rewind_rm_pos);
} }
// Rewind the hash file accounting for pruned/compacted pos // Rewind the hash file accounting for pruned/compacted pos
@ -328,7 +327,6 @@ where
pub fn check_compact<P>( pub fn check_compact<P>(
&mut self, &mut self,
cutoff_pos: u64, cutoff_pos: u64,
rewind_add_pos: &Bitmap,
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
prune_cb: P, prune_cb: P,
) -> io::Result<bool> ) -> io::Result<bool>
@ -343,7 +341,7 @@ where
// Calculate the sets of leaf positions and node positions to remove based // Calculate the sets of leaf positions and node positions to remove based
// on the cutoff_pos provided. // on the cutoff_pos provided.
let (leaves_removed, pos_to_rm) = self.pos_to_rm(cutoff_pos, rewind_add_pos, rewind_rm_pos); let (leaves_removed, pos_to_rm) = self.pos_to_rm(cutoff_pos, rewind_rm_pos);
// 1. Save compact copy of the hash file, skipping removed data. // 1. Save compact copy of the hash file, skipping removed data.
{ {
@ -418,14 +416,12 @@ where
fn pos_to_rm( fn pos_to_rm(
&self, &self,
cutoff_pos: u64, cutoff_pos: u64,
rewind_add_pos: &Bitmap,
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
) -> (Bitmap, Bitmap) { ) -> (Bitmap, Bitmap) {
let mut expanded = Bitmap::create(); let mut expanded = Bitmap::create();
let leaf_pos_to_rm = self.leaf_set.removed_pre_cutoff( let leaf_pos_to_rm = self.leaf_set.removed_pre_cutoff(
cutoff_pos, cutoff_pos,
rewind_add_pos,
rewind_rm_pos, rewind_rm_pos,
&self.prune_list, &self.prune_list,
); );
@ -467,4 +463,3 @@ fn removed_excl_roots(removed: Bitmap) -> Bitmap {
}) })
.collect() .collect()
} }

View file

@ -124,7 +124,7 @@ fn pmmr_compact_leaf_sibling() {
// aggressively compact the PMMR files // aggressively compact the PMMR files
backend backend
.check_compact(1, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(1, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
// check pos 1, 2, 3 are in the state we expect after compacting // check pos 1, 2, 3 are in the state we expect after compacting
@ -182,7 +182,7 @@ fn pmmr_prune_compact() {
// compact // compact
backend backend
.check_compact(2, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(2, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
// recheck the root and stored data // recheck the root and stored data
@ -228,7 +228,7 @@ fn pmmr_reload() {
// now check and compact the backend // now check and compact the backend
backend backend
.check_compact(1, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(1, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
backend.sync().unwrap(); backend.sync().unwrap();
@ -241,7 +241,7 @@ fn pmmr_reload() {
backend.sync().unwrap(); backend.sync().unwrap();
backend backend
.check_compact(4, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(4, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
backend.sync().unwrap(); backend.sync().unwrap();
@ -340,7 +340,7 @@ fn pmmr_rewind() {
// and compact the MMR to remove the pruned elements // and compact the MMR to remove the pruned elements
backend backend
.check_compact(6, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(6, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
backend.sync().unwrap(); backend.sync().unwrap();
@ -354,7 +354,7 @@ fn pmmr_rewind() {
// rewind and check the roots still match // rewind and check the roots still match
{ {
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
pmmr.rewind(9, &Bitmap::of(&vec![11, 12, 16]), &Bitmap::create()) pmmr.rewind(9, &Bitmap::of(&vec![11, 12, 16]))
.unwrap(); .unwrap();
assert_eq!(pmmr.unpruned_size(), 10); assert_eq!(pmmr.unpruned_size(), 10);
@ -399,7 +399,7 @@ fn pmmr_rewind() {
{ {
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, 10); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
pmmr.rewind(5, &Bitmap::create(), &Bitmap::create()) pmmr.rewind(5, &Bitmap::create())
.unwrap(); .unwrap();
assert_eq!(pmmr.root(), root1); assert_eq!(pmmr.root(), root1);
} }
@ -440,7 +440,7 @@ fn pmmr_compact_single_leaves() {
// compact // compact
backend backend
.check_compact(2, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(2, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
{ {
@ -453,7 +453,7 @@ fn pmmr_compact_single_leaves() {
// compact // compact
backend backend
.check_compact(2, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(2, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
teardown(data_dir); teardown(data_dir);
@ -484,7 +484,7 @@ fn pmmr_compact_entire_peak() {
// compact // compact
backend backend
.check_compact(2, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(2, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
// now check we have pruned up to and including the peak at pos 7 // now check we have pruned up to and including the peak at pos 7
@ -557,7 +557,7 @@ fn pmmr_compact_horizon() {
// compact // compact
backend backend
.check_compact(4, &Bitmap::create(), &Bitmap::of(&vec![1, 2]), &prune_noop) .check_compact(4, &Bitmap::of(&vec![1, 2]), &prune_noop)
.unwrap(); .unwrap();
backend.sync().unwrap(); backend.sync().unwrap();
@ -612,7 +612,7 @@ fn pmmr_compact_horizon() {
// compact some more // compact some more
backend backend
.check_compact(9, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(9, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
} }
@ -675,7 +675,7 @@ fn compact_twice() {
// compact // compact
backend backend
.check_compact(2, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(2, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
// recheck the root and stored data // recheck the root and stored data
@ -704,7 +704,7 @@ fn compact_twice() {
// compact // compact
backend backend
.check_compact(2, &Bitmap::create(), &Bitmap::create(), &prune_noop) .check_compact(2, &Bitmap::create(), &prune_noop)
.unwrap(); .unwrap();
// recheck the root and stored data // recheck the root and stored data

View file

@ -83,7 +83,7 @@ fn test_leaf_set_performance() {
let from_pos = x * 1_000 + 1; let from_pos = x * 1_000 + 1;
let to_pos = from_pos + 1_000; let to_pos = from_pos + 1_000;
let bitmap: Bitmap = (from_pos..to_pos).collect(); let bitmap: Bitmap = (from_pos..to_pos).collect();
leaf_set.rewind(&Bitmap::create(), &bitmap); leaf_set.rewind(1_000_000, &bitmap);
} }
assert_eq!(leaf_set.len(), 1_000_000); assert_eq!(leaf_set.len(), 1_000_000);
println!( println!(