mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-01 17:01:09 +03:00
Simplify block marker metadata (#985)
* wip BlockMarker struct, get rid of PMMRMetadata * use rewind to init the txhashet correctly on startup, we do not need to track pos via metadata (we have block markers), we do not need to open the txhashset with specific pos (we have rewind) * better logging on init * keep rewinding and validating on init, to find a good block * use validate_roots on chain init
This commit is contained in:
parent
55f6e3e63f
commit
e22d025dc8
11 changed files with 278 additions and 412 deletions
|
@ -166,56 +166,71 @@ impl Chain {
|
||||||
// check if we have a head in store, otherwise the genesis block is it
|
// check if we have a head in store, otherwise the genesis block is it
|
||||||
let head = store.head();
|
let head = store.head();
|
||||||
|
|
||||||
let txhashset_md = match head {
|
// open the txhashset, creating a new one if necessary
|
||||||
Ok(h) => {
|
let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone())?;
|
||||||
// Add the height to the metadata for the use of the rewind log, as this isn't
|
|
||||||
// stored
|
match head {
|
||||||
if let Ok(mut ts) = store.get_block_pmmr_file_metadata(&h.last_block_h) {
|
Ok(head) => {
|
||||||
ts.output_file_md.block_height = h.height;
|
let mut head = head;
|
||||||
ts.rproof_file_md.block_height = h.height;
|
loop {
|
||||||
ts.kernel_file_md.block_height = h.height;
|
// Use current chain tip if we have one.
|
||||||
Some(ts)
|
// Note: We are rewinding and validating against a writeable extension.
|
||||||
} else {
|
// If validation is successful we will truncate the backend files
|
||||||
debug!(LOGGER, "metadata not found for {} @ {}", h.height, h.hash());
|
// to match the provided block header.
|
||||||
None
|
let header = store.get_block_header(&head.last_block_h)?;
|
||||||
|
let res = txhashset::extending(&mut txhashset, |extension| {
|
||||||
|
debug!(
|
||||||
|
LOGGER,
|
||||||
|
"chain: init: rewinding and validating before we start... {} at {}",
|
||||||
|
header.hash(),
|
||||||
|
header.height,
|
||||||
|
);
|
||||||
|
|
||||||
|
extension.rewind(&header)?;
|
||||||
|
extension.validate_roots(&header)?;
|
||||||
|
Ok(())
|
||||||
|
});
|
||||||
|
if res.is_ok() {
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
// We may have corrupted the MMR backend files
|
||||||
|
// last time we stopped the node.
|
||||||
|
// If this appears to be the case
|
||||||
|
// revert the head to the previous header and try again
|
||||||
|
|
||||||
|
let _ = store.delete_block(&header.hash());
|
||||||
|
let prev_header = store.get_block_header(&head.prev_block_h)?;
|
||||||
|
let _ = store.setup_height(&prev_header, &head)?;
|
||||||
|
head = Tip::from_block(&prev_header);
|
||||||
|
store.save_head(&head)?;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Err(NotFoundErr) => None,
|
|
||||||
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut txhashset =
|
|
||||||
txhashset::TxHashSet::open(db_root.clone(), store.clone(), txhashset_md)?;
|
|
||||||
|
|
||||||
let head = store.head();
|
|
||||||
|
|
||||||
let head = match head {
|
|
||||||
Ok(h) => h,
|
|
||||||
Err(NotFoundErr) => {
|
Err(NotFoundErr) => {
|
||||||
let tip = Tip::from_block(&genesis.header);
|
let tip = Tip::from_block(&genesis.header);
|
||||||
store.save_block(&genesis)?;
|
store.save_block(&genesis)?;
|
||||||
store.setup_height(&genesis.header, &tip)?;
|
store.setup_height(&genesis.header, &tip)?;
|
||||||
if genesis.kernels.len() > 0 {
|
txhashset::extending(&mut txhashset, |extension| {
|
||||||
txhashset::extending(&mut txhashset, |extension| {
|
extension.apply_block(&genesis)?;
|
||||||
extension.apply_block(&genesis)
|
Ok(())
|
||||||
})?;
|
})?;
|
||||||
}
|
|
||||||
|
|
||||||
// saving a new tip based on genesis
|
// saving a new tip based on genesis
|
||||||
store.save_head(&tip)?;
|
store.save_head(&tip)?;
|
||||||
info!(
|
info!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"Saved genesis block: {:?}, nonce: {:?}, pow: {:?}",
|
"chain: init: saved genesis block: {:?}, nonce: {:?}, pow: {:?}",
|
||||||
genesis.hash(),
|
genesis.hash(),
|
||||||
genesis.header.nonce,
|
genesis.header.nonce,
|
||||||
genesis.header.pow,
|
genesis.header.pow,
|
||||||
);
|
);
|
||||||
pipe::save_pmmr_metadata(&tip, &txhashset, store.clone())?;
|
|
||||||
tip
|
|
||||||
}
|
}
|
||||||
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
|
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Now reload the chain head (either existing head or genesis from above)
|
||||||
|
let head = store.head()?;
|
||||||
|
|
||||||
// Initialize header_head and sync_head as necessary for chain init.
|
// Initialize header_head and sync_head as necessary for chain init.
|
||||||
store.init_head()?;
|
store.init_head()?;
|
||||||
|
|
||||||
|
@ -224,7 +239,7 @@ impl Chain {
|
||||||
"Chain init: {} @ {} [{}]",
|
"Chain init: {} @ {} [{}]",
|
||||||
head.total_difficulty.into_num(),
|
head.total_difficulty.into_num(),
|
||||||
head.height,
|
head.height,
|
||||||
head.last_block_h
|
head.last_block_h,
|
||||||
);
|
);
|
||||||
|
|
||||||
Ok(Chain {
|
Ok(Chain {
|
||||||
|
@ -434,9 +449,10 @@ impl Chain {
|
||||||
|
|
||||||
// Now create an extension from the txhashset and validate
|
// Now create an extension from the txhashset and validate
|
||||||
// against the latest block header.
|
// against the latest block header.
|
||||||
// We will rewind the extension internally to the pos for
|
// Rewind the extension to the specified header to ensure the view is
|
||||||
// the block header to ensure the view is consistent.
|
// consistent.
|
||||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
||||||
|
extension.rewind(&header)?;
|
||||||
extension.validate(&header, skip_rproofs)
|
extension.validate(&header, skip_rproofs)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -502,18 +518,14 @@ impl Chain {
|
||||||
/// at the provided block hash.
|
/// at the provided block hash.
|
||||||
pub fn txhashset_read(&self, h: Hash) -> Result<(u64, u64, File), Error> {
|
pub fn txhashset_read(&self, h: Hash) -> Result<(u64, u64, File), Error> {
|
||||||
// get the indexes for the block
|
// get the indexes for the block
|
||||||
let out_index: u64;
|
let marker = {
|
||||||
let kernel_index: u64;
|
|
||||||
{
|
|
||||||
let txhashset = self.txhashset.read().unwrap();
|
let txhashset = self.txhashset.read().unwrap();
|
||||||
let (oi, ki) = txhashset.indexes_at(&h)?;
|
txhashset.indexes_at(&h)?
|
||||||
out_index = oi;
|
};
|
||||||
kernel_index = ki;
|
|
||||||
}
|
|
||||||
|
|
||||||
// prepares the zip and return the corresponding Read
|
// prepares the zip and return the corresponding Read
|
||||||
let txhashset_reader = txhashset::zip_read(self.db_root.clone())?;
|
let txhashset_reader = txhashset::zip_read(self.db_root.clone())?;
|
||||||
Ok((out_index, kernel_index, txhashset_reader))
|
Ok((marker.output_pos, marker.kernel_pos, txhashset_reader))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Writes a reading view on a txhashset state that's been provided to us.
|
/// Writes a reading view on a txhashset state that's been provided to us.
|
||||||
|
@ -539,16 +551,22 @@ impl Chain {
|
||||||
|
|
||||||
// write the block marker so we can safely rewind to
|
// write the block marker so we can safely rewind to
|
||||||
// the pos for that block when we validate the extension below
|
// the pos for that block when we validate the extension below
|
||||||
self.store
|
let marker = BlockMarker {
|
||||||
.save_block_marker(&h, &(rewind_to_output, rewind_to_kernel))?;
|
output_pos: rewind_to_output,
|
||||||
|
kernel_pos: rewind_to_kernel,
|
||||||
|
};
|
||||||
|
self.store.save_block_marker(&h, &marker)?;
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"Going to validate new txhashset, might take some time..."
|
"Going to validate new txhashset, might take some time..."
|
||||||
);
|
);
|
||||||
let mut txhashset =
|
|
||||||
txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone(), None)?;
|
let mut txhashset = txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone())?;
|
||||||
|
|
||||||
|
// Note: we are validating against a writeable extension.
|
||||||
txhashset::extending(&mut txhashset, |extension| {
|
txhashset::extending(&mut txhashset, |extension| {
|
||||||
|
extension.rewind(&header)?;
|
||||||
extension.validate(&header, false)?;
|
extension.validate(&header, false)?;
|
||||||
extension.rebuild_index()?;
|
extension.rebuild_index()?;
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -621,7 +639,6 @@ impl Chain {
|
||||||
match self.store.get_block(¤t.hash()) {
|
match self.store.get_block(¤t.hash()) {
|
||||||
Ok(b) => {
|
Ok(b) => {
|
||||||
self.store.delete_block(&b.hash())?;
|
self.store.delete_block(&b.hash())?;
|
||||||
self.store.delete_block_pmmr_file_metadata(&b.hash())?;
|
|
||||||
self.store.delete_block_marker(&b.hash())?;
|
self.store.delete_block_marker(&b.hash())?;
|
||||||
}
|
}
|
||||||
Err(NotFoundErr) => {
|
Err(NotFoundErr) => {
|
||||||
|
@ -733,6 +750,13 @@ impl Chain {
|
||||||
.map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
|
.map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the block marker for the specified block hash.
|
||||||
|
pub fn get_block_marker(&self, bh: &Hash) -> Result<BlockMarker, Error> {
|
||||||
|
self.store
|
||||||
|
.get_block_marker(bh)
|
||||||
|
.map_err(|e| Error::StoreErr(e, "chain get block marker".to_owned()))
|
||||||
|
}
|
||||||
|
|
||||||
/// Gets the block header at the provided height
|
/// Gets the block header at the provided height
|
||||||
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||||
self.store
|
self.store
|
||||||
|
@ -778,16 +802,6 @@ impl Chain {
|
||||||
.map_err(|e| Error::StoreErr(e, "chain block exists".to_owned()))
|
.map_err(|e| Error::StoreErr(e, "chain block exists".to_owned()))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Retrieve the file index metadata for a given block
|
|
||||||
pub fn get_block_pmmr_file_metadata(
|
|
||||||
&self,
|
|
||||||
h: &Hash,
|
|
||||||
) -> Result<PMMRFileMetadataCollection, Error> {
|
|
||||||
self.store
|
|
||||||
.get_block_pmmr_file_metadata(h)
|
|
||||||
.map_err(|e| Error::StoreErr(e, "retrieve block pmmr metadata".to_owned()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Rebuilds height index. Reachable as endpoint POST /chain/height-index
|
/// Rebuilds height index. Reachable as endpoint POST /chain/height-index
|
||||||
pub fn rebuild_header_by_height(&self) -> Result<(), Error> {
|
pub fn rebuild_header_by_height(&self) -> Result<(), Error> {
|
||||||
let head = self.head_header()?;
|
let head = self.head_header()?;
|
||||||
|
|
|
@ -112,27 +112,7 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
|
||||||
Ok(h)
|
Ok(h)
|
||||||
});
|
});
|
||||||
|
|
||||||
match result {
|
result
|
||||||
Ok(t) => {
|
|
||||||
save_pmmr_metadata(&Tip::from_block(&b.header), &txhashset, ctx.store.clone())?;
|
|
||||||
Ok(t)
|
|
||||||
}
|
|
||||||
Err(e) => Err(e),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Save pmmr index location for a given block
|
|
||||||
pub fn save_pmmr_metadata(
|
|
||||||
t: &Tip,
|
|
||||||
txhashset: &txhashset::TxHashSet,
|
|
||||||
store: Arc<ChainStore>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
// Save pmmr file metadata for this block
|
|
||||||
let block_file_md = txhashset.last_file_metadata();
|
|
||||||
store
|
|
||||||
.save_block_pmmr_file_metadata(&t.last_block_h, &block_file_md)
|
|
||||||
.map_err(|e| Error::StoreErr(e, "saving pmmr file metadata".to_owned()))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Process the block header.
|
/// Process the block header.
|
||||||
|
|
|
@ -37,7 +37,6 @@ const SYNC_HEAD_PREFIX: u8 = 's' as u8;
|
||||||
const HEADER_HEIGHT_PREFIX: u8 = '8' as u8;
|
const HEADER_HEIGHT_PREFIX: u8 = '8' as u8;
|
||||||
const COMMIT_POS_PREFIX: u8 = 'c' as u8;
|
const COMMIT_POS_PREFIX: u8 = 'c' as u8;
|
||||||
const BLOCK_MARKER_PREFIX: u8 = 'm' as u8;
|
const BLOCK_MARKER_PREFIX: u8 = 'm' as u8;
|
||||||
const BLOCK_PMMR_FILE_METADATA_PREFIX: u8 = 'p' as u8;
|
|
||||||
|
|
||||||
/// An implementation of the ChainStore trait backed by a simple key-value
|
/// An implementation of the ChainStore trait backed by a simple key-value
|
||||||
/// store.
|
/// store.
|
||||||
|
@ -163,7 +162,17 @@ impl ChainStore for ChainKVStore {
|
||||||
self.db.delete(&to_key(BLOCK_PREFIX, &mut bh.to_vec())[..])
|
self.db.delete(&to_key(BLOCK_PREFIX, &mut bh.to_vec())[..])
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We are on the current chain if -
|
||||||
|
// * the header by height index matches the header, and
|
||||||
|
// * we are not ahead of the current head
|
||||||
fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
|
fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
|
let head = self.head()?;
|
||||||
|
|
||||||
|
// check we are not out ahead of the current head
|
||||||
|
if header.height > head.height {
|
||||||
|
return Err(Error::NotFoundErr);
|
||||||
|
}
|
||||||
|
|
||||||
let header_at_height = self.get_header_by_height(header.height)?;
|
let header_at_height = self.get_header_by_height(header.height)?;
|
||||||
if header.hash() == header_at_height.hash() {
|
if header.hash() == header_at_height.hash() {
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -212,12 +221,12 @@ impl ChainStore for ChainKVStore {
|
||||||
.delete(&to_key(COMMIT_POS_PREFIX, &mut commit.to_vec()))
|
.delete(&to_key(COMMIT_POS_PREFIX, &mut commit.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_block_marker(&self, bh: &Hash, marker: &(u64, u64)) -> Result<(), Error> {
|
fn save_block_marker(&self, bh: &Hash, marker: &BlockMarker) -> Result<(), Error> {
|
||||||
self.db
|
self.db
|
||||||
.put_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())[..], &marker)
|
.put_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())[..], &marker)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_block_marker(&self, bh: &Hash) -> Result<(u64, u64), Error> {
|
fn get_block_marker(&self, bh: &Hash) -> Result<BlockMarker, Error> {
|
||||||
option_to_not_found(
|
option_to_not_found(
|
||||||
self.db
|
self.db
|
||||||
.get_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())),
|
.get_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())),
|
||||||
|
@ -229,29 +238,6 @@ impl ChainStore for ChainKVStore {
|
||||||
.delete(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec()))
|
.delete(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn save_block_pmmr_file_metadata(
|
|
||||||
&self,
|
|
||||||
h: &Hash,
|
|
||||||
md: &PMMRFileMetadataCollection,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
self.db.put_ser(
|
|
||||||
&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..],
|
|
||||||
&md,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, Error> {
|
|
||||||
option_to_not_found(
|
|
||||||
self.db
|
|
||||||
.get_ser(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), Error> {
|
|
||||||
self.db
|
|
||||||
.delete(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..])
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Maintain consistency of the "header_by_height" index by traversing back
|
/// Maintain consistency of the "header_by_height" index by traversing back
|
||||||
/// through the current chain and updating "header_by_height" until we reach
|
/// through the current chain and updating "header_by_height" until we reach
|
||||||
/// a block_header that is consistent with its height (everything prior to
|
/// a block_header that is consistent with its height (everything prior to
|
||||||
|
|
|
@ -33,10 +33,10 @@ use core::core::hash::{Hash, Hashed};
|
||||||
use core::ser::{PMMRIndexHashable, PMMRable};
|
use core::ser::{PMMRIndexHashable, PMMRable};
|
||||||
|
|
||||||
use grin_store;
|
use grin_store;
|
||||||
use grin_store::pmmr::{PMMRBackend, PMMRFileMetadata};
|
use grin_store::pmmr::PMMRBackend;
|
||||||
use grin_store::types::prune_noop;
|
use grin_store::types::prune_noop;
|
||||||
use keychain::BlindingFactor;
|
use keychain::BlindingFactor;
|
||||||
use types::{ChainStore, Error, PMMRFileMetadataCollection, TxHashSetRoots};
|
use types::{BlockMarker, ChainStore, Error, TxHashSetRoots};
|
||||||
use util::{zip, LOGGER};
|
use util::{zip, LOGGER};
|
||||||
|
|
||||||
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
||||||
|
@ -57,25 +57,16 @@ impl<T> PMMRHandle<T>
|
||||||
where
|
where
|
||||||
T: PMMRable + ::std::fmt::Debug,
|
T: PMMRable + ::std::fmt::Debug,
|
||||||
{
|
{
|
||||||
fn new(
|
fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> {
|
||||||
root_dir: String,
|
|
||||||
file_name: &str,
|
|
||||||
index_md: Option<PMMRFileMetadata>,
|
|
||||||
) -> Result<PMMRHandle<T>, Error> {
|
|
||||||
let path = Path::new(&root_dir).join(TXHASHSET_SUBDIR).join(file_name);
|
let path = Path::new(&root_dir).join(TXHASHSET_SUBDIR).join(file_name);
|
||||||
fs::create_dir_all(path.clone())?;
|
fs::create_dir_all(path.clone())?;
|
||||||
let be = PMMRBackend::new(path.to_str().unwrap().to_string(), index_md)?;
|
let be = PMMRBackend::new(path.to_str().unwrap().to_string())?;
|
||||||
let sz = be.unpruned_size()?;
|
let sz = be.unpruned_size()?;
|
||||||
Ok(PMMRHandle {
|
Ok(PMMRHandle {
|
||||||
backend: be,
|
backend: be,
|
||||||
last_pos: sz,
|
last_pos: sz,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return last written positions of hash file and data file
|
|
||||||
pub fn last_file_positions(&self) -> PMMRFileMetadata {
|
|
||||||
self.backend.last_file_positions()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An easy to manipulate structure holding the 3 sum trees necessary to
|
/// An easy to manipulate structure holding the 3 sum trees necessary to
|
||||||
|
@ -99,11 +90,7 @@ pub struct TxHashSet {
|
||||||
|
|
||||||
impl TxHashSet {
|
impl TxHashSet {
|
||||||
/// Open an existing or new set of backends for the TxHashSet
|
/// Open an existing or new set of backends for the TxHashSet
|
||||||
pub fn open(
|
pub fn open(root_dir: String, commit_index: Arc<ChainStore>) -> Result<TxHashSet, Error> {
|
||||||
root_dir: String,
|
|
||||||
commit_index: Arc<ChainStore>,
|
|
||||||
last_file_positions: Option<PMMRFileMetadataCollection>,
|
|
||||||
) -> Result<TxHashSet, Error> {
|
|
||||||
let output_file_path: PathBuf = [&root_dir, TXHASHSET_SUBDIR, OUTPUT_SUBDIR]
|
let output_file_path: PathBuf = [&root_dir, TXHASHSET_SUBDIR, OUTPUT_SUBDIR]
|
||||||
.iter()
|
.iter()
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -119,21 +106,11 @@ impl TxHashSet {
|
||||||
.collect();
|
.collect();
|
||||||
fs::create_dir_all(kernel_file_path.clone())?;
|
fs::create_dir_all(kernel_file_path.clone())?;
|
||||||
|
|
||||||
let mut output_md = None;
|
|
||||||
let mut rproof_md = None;
|
|
||||||
let mut kernel_md = None;
|
|
||||||
|
|
||||||
if let Some(p) = last_file_positions {
|
|
||||||
output_md = Some(p.output_file_md);
|
|
||||||
rproof_md = Some(p.rproof_file_md);
|
|
||||||
kernel_md = Some(p.kernel_file_md);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(TxHashSet {
|
Ok(TxHashSet {
|
||||||
output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR, output_md)?,
|
output_pmmr_h: PMMRHandle::new(root_dir.clone(), OUTPUT_SUBDIR)?,
|
||||||
rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR, rproof_md)?,
|
rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR)?,
|
||||||
kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR, kernel_md)?,
|
kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR)?,
|
||||||
commit_index: commit_index,
|
commit_index,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,19 +189,10 @@ impl TxHashSet {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Output and kernel MMR indexes at the end of the provided block
|
/// Output and kernel MMR indexes at the end of the provided block
|
||||||
pub fn indexes_at(&self, bh: &Hash) -> Result<(u64, u64), Error> {
|
pub fn indexes_at(&self, bh: &Hash) -> Result<BlockMarker, Error> {
|
||||||
self.commit_index.get_block_marker(bh).map_err(&From::from)
|
self.commit_index.get_block_marker(bh).map_err(&From::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Last file positions of Output set.. hash file,data file
|
|
||||||
pub fn last_file_metadata(&self) -> PMMRFileMetadataCollection {
|
|
||||||
PMMRFileMetadataCollection::new(
|
|
||||||
self.output_pmmr_h.last_file_positions(),
|
|
||||||
self.rproof_pmmr_h.last_file_positions(),
|
|
||||||
self.kernel_pmmr_h.last_file_positions(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get sum tree roots
|
/// Get sum tree roots
|
||||||
/// TODO: Return data instead of hashes
|
/// TODO: Return data instead of hashes
|
||||||
pub fn roots(&mut self) -> (Hash, Hash, Hash) {
|
pub fn roots(&mut self) -> (Hash, Hash, Hash) {
|
||||||
|
@ -314,6 +282,7 @@ where
|
||||||
let sizes: (u64, u64, u64);
|
let sizes: (u64, u64, u64);
|
||||||
let res: Result<T, Error>;
|
let res: Result<T, Error>;
|
||||||
let rollback: bool;
|
let rollback: bool;
|
||||||
|
|
||||||
{
|
{
|
||||||
let commit_index = trees.commit_index.clone();
|
let commit_index = trees.commit_index.clone();
|
||||||
|
|
||||||
|
@ -327,6 +296,7 @@ where
|
||||||
}
|
}
|
||||||
sizes = extension.sizes();
|
sizes = extension.sizes();
|
||||||
}
|
}
|
||||||
|
|
||||||
match res {
|
match res {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(LOGGER, "Error returned, discarding txhashset extension.");
|
debug!(LOGGER, "Error returned, discarding txhashset extension.");
|
||||||
|
@ -367,7 +337,7 @@ pub struct Extension<'a> {
|
||||||
|
|
||||||
commit_index: Arc<ChainStore>,
|
commit_index: Arc<ChainStore>,
|
||||||
new_output_commits: HashMap<Commitment, u64>,
|
new_output_commits: HashMap<Commitment, u64>,
|
||||||
new_block_markers: HashMap<Hash, (u64, u64)>,
|
new_block_markers: HashMap<Hash, BlockMarker>,
|
||||||
rollback: bool,
|
rollback: bool,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -427,11 +397,11 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// finally, recording the PMMR positions after this block for future rewind
|
// finally, recording the PMMR positions after this block for future rewind
|
||||||
let last_output_pos = self.output_pmmr.unpruned_size();
|
let marker = BlockMarker {
|
||||||
let last_kernel_pos = self.kernel_pmmr.unpruned_size();
|
output_pos: self.output_pmmr.unpruned_size(),
|
||||||
self.new_block_markers
|
kernel_pos: self.kernel_pmmr.unpruned_size(),
|
||||||
.insert(b.hash(), (last_output_pos, last_kernel_pos));
|
};
|
||||||
|
self.new_block_markers.insert(b.hash(), marker);
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -440,8 +410,8 @@ impl<'a> Extension<'a> {
|
||||||
for (commit, pos) in &self.new_output_commits {
|
for (commit, pos) in &self.new_output_commits {
|
||||||
self.commit_index.save_output_pos(commit, *pos)?;
|
self.commit_index.save_output_pos(commit, *pos)?;
|
||||||
}
|
}
|
||||||
for (bh, tag) in &self.new_block_markers {
|
for (bh, marker) in &self.new_block_markers {
|
||||||
self.commit_index.save_block_marker(bh, tag)?;
|
self.commit_index.save_block_marker(bh, marker)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -566,35 +536,28 @@ impl<'a> Extension<'a> {
|
||||||
pub fn rewind(&mut self, block_header: &BlockHeader) -> Result<(), Error> {
|
pub fn rewind(&mut self, block_header: &BlockHeader) -> Result<(), Error> {
|
||||||
let hash = block_header.hash();
|
let hash = block_header.hash();
|
||||||
let height = block_header.height;
|
let height = block_header.height;
|
||||||
debug!(LOGGER, "Rewind to header at {} [{}]", height, hash); // keep this
|
debug!(LOGGER, "Rewind to header {} @ {}", height, hash);
|
||||||
|
|
||||||
// rewind each MMR
|
// rewind our MMRs to the appropriate pos
|
||||||
let (out_pos_rew, kern_pos_rew) = self.commit_index.get_block_marker(&hash)?;
|
// based on block height and block marker
|
||||||
self.rewind_pos(height, out_pos_rew, kern_pos_rew)?;
|
let marker = self.commit_index.get_block_marker(&hash)?;
|
||||||
|
self.rewind_to_marker(height, &marker)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Rewinds the MMRs to the provided positions, given the output and
|
/// Rewinds the MMRs to the provided positions, given the output and
|
||||||
/// kernel we want to rewind to.
|
/// kernel we want to rewind to.
|
||||||
fn rewind_pos(
|
fn rewind_to_marker(&mut self, height: u64, marker: &BlockMarker) -> Result<(), Error> {
|
||||||
&mut self,
|
debug!(LOGGER, "Rewind txhashset to {}, {:?}", height, marker);
|
||||||
height: u64,
|
|
||||||
out_pos_rew: u64,
|
|
||||||
kern_pos_rew: u64,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"Rewind txhashset to output pos: {}, kernel pos: {}", out_pos_rew, kern_pos_rew,
|
|
||||||
);
|
|
||||||
|
|
||||||
self.output_pmmr
|
self.output_pmmr
|
||||||
.rewind(out_pos_rew, height as u32)
|
.rewind(marker.output_pos, height as u32)
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&Error::TxHashSetErr)?;
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.rewind(out_pos_rew, height as u32)
|
.rewind(marker.output_pos, height as u32)
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&Error::TxHashSetErr)?;
|
||||||
self.kernel_pmmr
|
self.kernel_pmmr
|
||||||
.rewind(kern_pos_rew, height as u32)
|
.rewind(marker.kernel_pos, height as u32)
|
||||||
.map_err(&Error::TxHashSetErr)?;
|
.map_err(&Error::TxHashSetErr)?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
|
@ -618,15 +581,26 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validate the txhashset state against the provided block header.
|
/// Validate the various MMR roots against the block header.
|
||||||
/// Rewinds to that pos for the header first so we see a consistent
|
pub fn validate_roots(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
/// view of the world.
|
// If we are validating the genesis block then
|
||||||
/// Note: this is an expensive operation and sets force_rollback
|
// we have no outputs or kernels.
|
||||||
/// so the extension is read-only.
|
// So we are done here.
|
||||||
pub fn validate(&mut self, header: &BlockHeader, skip_rproofs: bool) -> Result<(), Error> {
|
if header.height == 0 {
|
||||||
// rewind to the provided header for a consistent view
|
return Ok(());
|
||||||
&self.rewind(header)?;
|
}
|
||||||
|
|
||||||
|
let roots = self.roots();
|
||||||
|
if roots.output_root != header.output_root || roots.rproof_root != header.range_proof_root
|
||||||
|
|| roots.kernel_root != header.kernel_root
|
||||||
|
{
|
||||||
|
return Err(Error::InvalidRoot);
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate the txhashset state against the provided block header.
|
||||||
|
pub fn validate(&mut self, header: &BlockHeader, skip_rproofs: bool) -> Result<(), Error> {
|
||||||
// validate all hashes and sums within the trees
|
// validate all hashes and sums within the trees
|
||||||
if let Err(e) = self.output_pmmr.validate() {
|
if let Err(e) = self.output_pmmr.validate() {
|
||||||
return Err(Error::InvalidTxHashSet(e));
|
return Err(Error::InvalidTxHashSet(e));
|
||||||
|
@ -638,12 +612,10 @@ impl<'a> Extension<'a> {
|
||||||
return Err(Error::InvalidTxHashSet(e));
|
return Err(Error::InvalidTxHashSet(e));
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate the tree roots against the block header
|
self.validate_roots(header)?;
|
||||||
let roots = self.roots();
|
|
||||||
if roots.output_root != header.output_root || roots.rproof_root != header.range_proof_root
|
if header.height == 0 {
|
||||||
|| roots.kernel_root != header.kernel_root
|
return Ok(());
|
||||||
{
|
|
||||||
return Err(Error::InvalidRoot);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// the real magicking: the sum of all kernel excess should equal the sum
|
// the real magicking: the sum of all kernel excess should equal the sum
|
||||||
|
|
|
@ -25,7 +25,6 @@ use core::core::{block, transaction, Block, BlockHeader};
|
||||||
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||||
use grin_store as store;
|
use grin_store as store;
|
||||||
use grin_store;
|
use grin_store;
|
||||||
use grin_store::pmmr::PMMRFileMetadata;
|
|
||||||
use keychain;
|
use keychain;
|
||||||
|
|
||||||
bitflags! {
|
bitflags! {
|
||||||
|
@ -321,31 +320,14 @@ pub trait ChainStore: Send + Sync {
|
||||||
|
|
||||||
/// Saves a marker associated with a block recording the MMR positions of
|
/// Saves a marker associated with a block recording the MMR positions of
|
||||||
/// its last elements.
|
/// its last elements.
|
||||||
fn save_block_marker(&self, bh: &Hash, marker: &(u64, u64)) -> Result<(), store::Error>;
|
fn save_block_marker(&self, bh: &Hash, marker: &BlockMarker) -> Result<(), store::Error>;
|
||||||
|
|
||||||
/// Retrieves a block marker from a block hash.
|
/// Retrieves a block marker from a block hash.
|
||||||
fn get_block_marker(&self, bh: &Hash) -> Result<(u64, u64), store::Error>;
|
fn get_block_marker(&self, bh: &Hash) -> Result<BlockMarker, store::Error>;
|
||||||
|
|
||||||
/// Deletes a block marker associated with the provided hash
|
/// Deletes a block marker associated with the provided hash
|
||||||
fn delete_block_marker(&self, bh: &Hash) -> Result<(), store::Error>;
|
fn delete_block_marker(&self, bh: &Hash) -> Result<(), store::Error>;
|
||||||
|
|
||||||
/// Saves information about the last written PMMR file positions for each
|
|
||||||
/// committed block
|
|
||||||
fn save_block_pmmr_file_metadata(
|
|
||||||
&self,
|
|
||||||
h: &Hash,
|
|
||||||
md: &PMMRFileMetadataCollection,
|
|
||||||
) -> Result<(), store::Error>;
|
|
||||||
|
|
||||||
/// Retrieves stored pmmr file metadata information for a given block
|
|
||||||
fn get_block_pmmr_file_metadata(
|
|
||||||
&self,
|
|
||||||
h: &Hash,
|
|
||||||
) -> Result<PMMRFileMetadataCollection, store::Error>;
|
|
||||||
|
|
||||||
/// Delete stored pmmr file metadata information for a given block
|
|
||||||
fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), store::Error>;
|
|
||||||
|
|
||||||
/// Saves the provided block header at the corresponding height. Also check
|
/// Saves the provided block header at the corresponding height. Also check
|
||||||
/// the consistency of the height chain in store by assuring previous
|
/// the consistency of the height chain in store by assuring previous
|
||||||
/// headers are also at their respective heights.
|
/// headers are also at their respective heights.
|
||||||
|
@ -355,61 +337,6 @@ pub trait ChainStore: Send + Sync {
|
||||||
fn build_by_height_index(&self, header: &BlockHeader, force: bool) -> Result<(), store::Error>;
|
fn build_by_height_index(&self, header: &BlockHeader, force: bool) -> Result<(), store::Error>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Single serializable struct to hold metadata about all PMMR file position
|
|
||||||
/// for a given block
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
|
||||||
pub struct PMMRFileMetadataCollection {
|
|
||||||
/// file metadata for the output file
|
|
||||||
pub output_file_md: PMMRFileMetadata,
|
|
||||||
/// file metadata for the rangeproof file
|
|
||||||
pub rproof_file_md: PMMRFileMetadata,
|
|
||||||
/// file metadata for the kernel file
|
|
||||||
pub kernel_file_md: PMMRFileMetadata,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Writeable for PMMRFileMetadataCollection {
|
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
|
||||||
self.output_file_md.write(writer)?;
|
|
||||||
self.rproof_file_md.write(writer)?;
|
|
||||||
self.kernel_file_md.write(writer)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Readable for PMMRFileMetadataCollection {
|
|
||||||
fn read(reader: &mut Reader) -> Result<PMMRFileMetadataCollection, ser::Error> {
|
|
||||||
Ok(PMMRFileMetadataCollection {
|
|
||||||
output_file_md: PMMRFileMetadata::read(reader)?,
|
|
||||||
rproof_file_md: PMMRFileMetadata::read(reader)?,
|
|
||||||
kernel_file_md: PMMRFileMetadata::read(reader)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PMMRFileMetadataCollection {
|
|
||||||
/// Return empty with all file positions = 0
|
|
||||||
pub fn empty() -> PMMRFileMetadataCollection {
|
|
||||||
PMMRFileMetadataCollection {
|
|
||||||
output_file_md: PMMRFileMetadata::empty(),
|
|
||||||
rproof_file_md: PMMRFileMetadata::empty(),
|
|
||||||
kernel_file_md: PMMRFileMetadata::empty(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Helper to create a new collection
|
|
||||||
pub fn new(
|
|
||||||
output_md: PMMRFileMetadata,
|
|
||||||
rproof_md: PMMRFileMetadata,
|
|
||||||
kernel_md: PMMRFileMetadata,
|
|
||||||
) -> PMMRFileMetadataCollection {
|
|
||||||
PMMRFileMetadataCollection {
|
|
||||||
output_file_md: output_md,
|
|
||||||
rproof_file_md: rproof_md,
|
|
||||||
kernel_file_md: kernel_md,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Bridge between the chain pipeline and the rest of the system. Handles
|
/// Bridge between the chain pipeline and the rest of the system. Handles
|
||||||
/// downstream processing of valid blocks by the rest of the system, most
|
/// downstream processing of valid blocks by the rest of the system, most
|
||||||
/// importantly the broadcasting of blocks to our peers.
|
/// importantly the broadcasting of blocks to our peers.
|
||||||
|
@ -421,6 +348,44 @@ pub trait ChainAdapter {
|
||||||
|
|
||||||
/// Dummy adapter used as a placeholder for real implementations
|
/// Dummy adapter used as a placeholder for real implementations
|
||||||
pub struct NoopAdapter {}
|
pub struct NoopAdapter {}
|
||||||
|
|
||||||
impl ChainAdapter for NoopAdapter {
|
impl ChainAdapter for NoopAdapter {
|
||||||
fn block_accepted(&self, _: &Block, _: Options) {}
|
fn block_accepted(&self, _: &Block, _: Options) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The output and kernel positions that define the size of the MMRs for a
|
||||||
|
/// particular block.
|
||||||
|
#[derive(Debug, Clone)]
|
||||||
|
pub struct BlockMarker {
|
||||||
|
/// The output (and rangeproof) MMR position of the final output in the
|
||||||
|
/// block
|
||||||
|
pub output_pos: u64,
|
||||||
|
/// The kernel position of the final kernel in the block
|
||||||
|
pub kernel_pos: u64,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Writeable for BlockMarker {
|
||||||
|
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||||
|
writer.write_u64(self.output_pos)?;
|
||||||
|
writer.write_u64(self.kernel_pos)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Readable for BlockMarker {
|
||||||
|
fn read(reader: &mut Reader) -> Result<BlockMarker, ser::Error> {
|
||||||
|
Ok(BlockMarker {
|
||||||
|
output_pos: reader.read_u64()?,
|
||||||
|
kernel_pos: reader.read_u64()?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Default for BlockMarker {
|
||||||
|
fn default() -> BlockMarker {
|
||||||
|
BlockMarker {
|
||||||
|
output_pos: 0,
|
||||||
|
kernel_pos: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -94,13 +94,13 @@ fn data_files() {
|
||||||
|
|
||||||
let head = Tip::from_block(&b.header);
|
let head = Tip::from_block(&b.header);
|
||||||
|
|
||||||
// Check we have indexes for the last block and the block previous
|
// Check we have block markers for the last block and the block previous
|
||||||
let cur_pmmr_md = chain
|
let cur_pmmr_md = chain
|
||||||
.get_block_pmmr_file_metadata(&head.last_block_h)
|
.get_block_marker(&head.last_block_h)
|
||||||
.expect("block pmmr file data doesn't exist");
|
.expect("block marker does not exist");
|
||||||
chain
|
chain
|
||||||
.get_block_pmmr_file_metadata(&head.prev_block_h)
|
.get_block_marker(&head.prev_block_h)
|
||||||
.expect("previous block pmmr file data doesn't exist");
|
.expect("prev block marker does not exist");
|
||||||
|
|
||||||
println!("Cur_pmmr_md: {:?}", cur_pmmr_md);
|
println!("Cur_pmmr_md: {:?}", cur_pmmr_md);
|
||||||
chain.validate(false).unwrap();
|
chain.validate(false).unwrap();
|
||||||
|
|
|
@ -799,6 +799,10 @@ impl PruneList {
|
||||||
/// side of the range, and navigates toward lower siblings toward the right
|
/// side of the range, and navigates toward lower siblings toward the right
|
||||||
/// of the range.
|
/// of the range.
|
||||||
pub fn peaks(num: u64) -> Vec<u64> {
|
pub fn peaks(num: u64) -> Vec<u64> {
|
||||||
|
if num == 0 {
|
||||||
|
return vec![];
|
||||||
|
}
|
||||||
|
|
||||||
// detecting an invalid mountain range, when siblings exist but no parent
|
// detecting an invalid mountain range, when siblings exist but no parent
|
||||||
// exists
|
// exists
|
||||||
if bintree_postorder_height(num + 1) > bintree_postorder_height(num) {
|
if bintree_postorder_height(num + 1) > bintree_postorder_height(num) {
|
||||||
|
@ -838,6 +842,10 @@ pub fn peaks(num: u64) -> Vec<u64> {
|
||||||
/// The number of leaves nodes in a MMR of the provided size. Uses peaks to
|
/// The number of leaves nodes in a MMR of the provided size. Uses peaks to
|
||||||
/// get the positions of all full binary trees and uses the height of these
|
/// get the positions of all full binary trees and uses the height of these
|
||||||
pub fn n_leaves(mut sz: u64) -> u64 {
|
pub fn n_leaves(mut sz: u64) -> u64 {
|
||||||
|
if sz == 0 {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
while bintree_postorder_height(sz + 1) > 0 {
|
while bintree_postorder_height(sz + 1) > 0 {
|
||||||
sz += 1;
|
sz += 1;
|
||||||
}
|
}
|
||||||
|
@ -1140,16 +1148,6 @@ mod test {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_leaf_index() {
|
|
||||||
assert_eq!(n_leaves(1), 1);
|
|
||||||
assert_eq!(n_leaves(2), 2);
|
|
||||||
assert_eq!(n_leaves(4), 3);
|
|
||||||
assert_eq!(n_leaves(5), 4);
|
|
||||||
assert_eq!(n_leaves(8), 5);
|
|
||||||
assert_eq!(n_leaves(9), 6);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn some_all_ones() {
|
fn some_all_ones() {
|
||||||
for n in vec![1, 7, 255] {
|
for n in vec![1, 7, 255] {
|
||||||
|
@ -1190,15 +1188,22 @@ mod test {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Trst our n_leaves impl does the right thing for various MMR sizes
|
|
||||||
#[test]
|
#[test]
|
||||||
fn various_n_leaves() {
|
fn test_n_leaves() {
|
||||||
|
// make sure we handle an empty MMR correctly
|
||||||
|
assert_eq!(n_leaves(0), 0);
|
||||||
|
|
||||||
|
// and various sizes on non-empty MMRs
|
||||||
assert_eq!(n_leaves(1), 1);
|
assert_eq!(n_leaves(1), 1);
|
||||||
// 2 is not a valid size for a tree, but n_leaves rounds up to next valid tree
|
|
||||||
// size
|
|
||||||
assert_eq!(n_leaves(2), 2);
|
assert_eq!(n_leaves(2), 2);
|
||||||
assert_eq!(n_leaves(3), 2);
|
assert_eq!(n_leaves(3), 2);
|
||||||
|
assert_eq!(n_leaves(4), 3);
|
||||||
|
assert_eq!(n_leaves(5), 4);
|
||||||
|
assert_eq!(n_leaves(6), 4);
|
||||||
assert_eq!(n_leaves(7), 4);
|
assert_eq!(n_leaves(7), 4);
|
||||||
|
assert_eq!(n_leaves(8), 5);
|
||||||
|
assert_eq!(n_leaves(9), 6);
|
||||||
|
assert_eq!(n_leaves(10), 6);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Find parent and sibling positions for various node positions.
|
/// Find parent and sibling positions for various node positions.
|
||||||
|
@ -1281,7 +1286,13 @@ mod test {
|
||||||
#[test]
|
#[test]
|
||||||
fn some_peaks() {
|
fn some_peaks() {
|
||||||
// 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3
|
// 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3
|
||||||
|
|
||||||
let empty: Vec<u64> = vec![];
|
let empty: Vec<u64> = vec![];
|
||||||
|
|
||||||
|
// make sure we handle an empty MMR correctly
|
||||||
|
assert_eq!(peaks(0), empty);
|
||||||
|
|
||||||
|
// and various non-empty MMRs
|
||||||
assert_eq!(peaks(1), [1]);
|
assert_eq!(peaks(1), [1]);
|
||||||
assert_eq!(peaks(2), empty);
|
assert_eq!(peaks(2), empty);
|
||||||
assert_eq!(peaks(3), [3]);
|
assert_eq!(peaks(3), [3]);
|
||||||
|
@ -1921,19 +1932,6 @@ mod test {
|
||||||
// assert_eq!(pl.get_shift(17), Some(11));
|
// assert_eq!(pl.get_shift(17), Some(11));
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn n_size_check() {
|
|
||||||
assert_eq!(n_leaves(1), 1);
|
|
||||||
assert_eq!(n_leaves(2), 2);
|
|
||||||
assert_eq!(n_leaves(3), 2);
|
|
||||||
assert_eq!(n_leaves(4), 3);
|
|
||||||
assert_eq!(n_leaves(5), 4);
|
|
||||||
assert_eq!(n_leaves(7), 4);
|
|
||||||
assert_eq!(n_leaves(8), 5);
|
|
||||||
assert_eq!(n_leaves(9), 6);
|
|
||||||
assert_eq!(n_leaves(10), 6);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn check_all_ones() {
|
fn check_all_ones() {
|
||||||
for i in 0..1000000 {
|
for i in 0..1000000 {
|
||||||
|
|
|
@ -205,8 +205,10 @@ fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
|
||||||
);
|
);
|
||||||
|
|
||||||
for hash in hashes_to_get.clone() {
|
for hash in hashes_to_get.clone() {
|
||||||
// TODO - Is there a threshold where we sync from most_work_peer (not
|
// TODO - Is there a threshold where we sync from most_work_peer
|
||||||
// more_work_peer)?
|
// (not more_work_peer)?
|
||||||
|
// TODO - right now we *only* sync blocks from a full archival node
|
||||||
|
// even if we are requesting recent blocks (i.e. during a fast sync)
|
||||||
let peer = peers.more_work_archival_peer();
|
let peer = peers.more_work_archival_peer();
|
||||||
if let Some(peer) = peer {
|
if let Some(peer) = peer {
|
||||||
if let Ok(peer) = peer.try_read() {
|
if let Ok(peer) = peer.try_read() {
|
||||||
|
|
|
@ -18,10 +18,11 @@ use std::io;
|
||||||
use std::marker;
|
use std::marker;
|
||||||
|
|
||||||
use core::core::pmmr::{self, family, Backend};
|
use core::core::pmmr::{self, family, Backend};
|
||||||
use core::ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
|
|
||||||
use core::core::hash::Hash;
|
use core::core::hash::Hash;
|
||||||
use util::LOGGER;
|
use core::ser;
|
||||||
|
use core::ser::PMMRable;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
use util::LOGGER;
|
||||||
|
|
||||||
const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin";
|
const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin";
|
||||||
const PMMR_DATA_FILE: &'static str = "pmmr_data.bin";
|
const PMMR_DATA_FILE: &'static str = "pmmr_data.bin";
|
||||||
|
@ -31,48 +32,6 @@ const PMMR_PRUNED_FILE: &'static str = "pmmr_pruned.bin";
|
||||||
/// Maximum number of nodes in the remove log before it gets flushed
|
/// Maximum number of nodes in the remove log before it gets flushed
|
||||||
pub const RM_LOG_MAX_NODES: usize = 10_000;
|
pub const RM_LOG_MAX_NODES: usize = 10_000;
|
||||||
|
|
||||||
/// Metadata for the PMMR backend's AppendOnlyFile, which can be serialized and
|
|
||||||
/// stored
|
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
|
||||||
pub struct PMMRFileMetadata {
|
|
||||||
/// The block height represented by these indices in the file
|
|
||||||
pub block_height: u64,
|
|
||||||
/// last written index of the hash file
|
|
||||||
pub last_hash_file_pos: u64,
|
|
||||||
/// last written index of the data file
|
|
||||||
pub last_data_file_pos: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Writeable for PMMRFileMetadata {
|
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
|
||||||
writer.write_u64(self.block_height)?;
|
|
||||||
writer.write_u64(self.last_hash_file_pos)?;
|
|
||||||
writer.write_u64(self.last_data_file_pos)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Readable for PMMRFileMetadata {
|
|
||||||
fn read(reader: &mut Reader) -> Result<PMMRFileMetadata, ser::Error> {
|
|
||||||
Ok(PMMRFileMetadata {
|
|
||||||
block_height: reader.read_u64()?,
|
|
||||||
last_hash_file_pos: reader.read_u64()?,
|
|
||||||
last_data_file_pos: reader.read_u64()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PMMRFileMetadata {
|
|
||||||
/// Return fields with all positions = 0
|
|
||||||
pub fn empty() -> PMMRFileMetadata {
|
|
||||||
PMMRFileMetadata {
|
|
||||||
block_height: 0,
|
|
||||||
last_hash_file_pos: 0,
|
|
||||||
last_data_file_pos: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// PMMR persistent backend implementation. Relies on multiple facilities to
|
/// PMMR persistent backend implementation. Relies on multiple facilities to
|
||||||
/// handle writing, reading and pruning.
|
/// handle writing, reading and pruning.
|
||||||
///
|
///
|
||||||
|
@ -184,22 +143,27 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Rewind the PMMR backend to the given position.
|
||||||
|
/// Use the index to rewind the rm_log correctly (based on block height).
|
||||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||||
|
// Rewind the rm_log based on index (block height)
|
||||||
self.rm_log
|
self.rm_log
|
||||||
.rewind(index)
|
.rewind(index)
|
||||||
.map_err(|e| format!("Could not truncate remove log: {}", e))?;
|
.map_err(|e| format!("Could not truncate remove log: {}", e))?;
|
||||||
|
|
||||||
// Hash file
|
// Rewind the hash file accounting for pruned/compacted pos
|
||||||
let shift = self.pruned_nodes.get_shift(position).unwrap_or(0);
|
let shift = self.pruned_nodes.get_shift(position).unwrap_or(0);
|
||||||
let record_len = 32;
|
let record_len = 32 as u64;
|
||||||
let file_pos = (position - shift) * (record_len as u64);
|
let file_pos = (position - shift) * record_len;
|
||||||
self.hash_file.rewind(file_pos);
|
self.hash_file.rewind(file_pos);
|
||||||
|
|
||||||
// Data file
|
// Rewind the data file accounting for pruned/compacted pos
|
||||||
let leaf_shift = self.pruned_nodes.get_leaf_shift(position).unwrap_or(0);
|
let leaf_shift = self.pruned_nodes.get_leaf_shift(position).unwrap_or(0);
|
||||||
let flatfile_pos = pmmr::n_leaves(position);
|
let flatfile_pos = pmmr::n_leaves(position);
|
||||||
let file_pos = (flatfile_pos - leaf_shift) * T::len() as u64;
|
let record_len = T::len() as u64;
|
||||||
self.data_file.rewind(file_pos as u64);
|
let file_pos = (flatfile_pos - leaf_shift) * record_len;
|
||||||
|
self.data_file.rewind(file_pos);
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -218,11 +182,12 @@ where
|
||||||
fn dump_stats(&self) {
|
fn dump_stats(&self) {
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"pmmr backend: unpruned - {}, hashes - {}, data - {}, rm_log - {:?}",
|
"pmmr backend: unpruned: {}, hashes: {}, data: {}, rm_log: {}, prune_list: {}",
|
||||||
self.unpruned_size().unwrap_or(0),
|
self.unpruned_size().unwrap_or(0),
|
||||||
self.hash_size().unwrap_or(0),
|
self.hash_size().unwrap_or(0),
|
||||||
self.data_size().unwrap_or(0),
|
self.data_size().unwrap_or(0),
|
||||||
self.rm_log.removed
|
self.rm_log.removed.len(),
|
||||||
|
self.pruned_nodes.pruned_nodes.len(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -231,32 +196,23 @@ impl<T> PMMRBackend<T>
|
||||||
where
|
where
|
||||||
T: PMMRable + ::std::fmt::Debug,
|
T: PMMRable + ::std::fmt::Debug,
|
||||||
{
|
{
|
||||||
/// Instantiates a new PMMR backend that will use the provided directly to
|
/// Instantiates a new PMMR backend.
|
||||||
/// store its files.
|
/// Use the provided dir to store its files.
|
||||||
pub fn new(data_dir: String, file_md: Option<PMMRFileMetadata>) -> io::Result<PMMRBackend<T>> {
|
pub fn new(data_dir: String) -> io::Result<PMMRBackend<T>> {
|
||||||
let (height, hash_to_pos, data_to_pos) = match file_md {
|
|
||||||
Some(m) => (
|
|
||||||
m.block_height as u32,
|
|
||||||
m.last_hash_file_pos,
|
|
||||||
m.last_data_file_pos,
|
|
||||||
),
|
|
||||||
None => (0, 0, 0),
|
|
||||||
};
|
|
||||||
let hash_file =
|
|
||||||
AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE), hash_to_pos)?;
|
|
||||||
let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE), height)?;
|
|
||||||
let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?;
|
let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?;
|
||||||
let data_file =
|
let pruned_nodes = pmmr::PruneList {
|
||||||
AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE), data_to_pos)?;
|
pruned_nodes: prune_list,
|
||||||
|
};
|
||||||
|
let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE))?;
|
||||||
|
let hash_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE))?;
|
||||||
|
let data_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE))?;
|
||||||
|
|
||||||
Ok(PMMRBackend {
|
Ok(PMMRBackend {
|
||||||
data_dir: data_dir,
|
data_dir,
|
||||||
hash_file: hash_file,
|
hash_file,
|
||||||
data_file: data_file,
|
data_file,
|
||||||
rm_log: rm_log,
|
rm_log,
|
||||||
pruned_nodes: pmmr::PruneList {
|
pruned_nodes,
|
||||||
pruned_nodes: prune_list,
|
|
||||||
},
|
|
||||||
_marker: marker::PhantomData,
|
_marker: marker::PhantomData,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -317,13 +273,13 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return last written buffer positions for the hash file and the data file
|
/// Return last written buffer positions for the hash file and the data file
|
||||||
pub fn last_file_positions(&self) -> PMMRFileMetadata {
|
// pub fn last_file_positions(&self) -> PMMRFileMetadata {
|
||||||
PMMRFileMetadata {
|
// PMMRFileMetadata {
|
||||||
block_height: 0,
|
// block_height: 0,
|
||||||
last_hash_file_pos: self.hash_file.last_buffer_pos() as u64,
|
// last_hash_file_pos: self.hash_file.last_buffer_pos() as u64,
|
||||||
last_data_file_pos: self.data_file.last_buffer_pos() as u64,
|
// last_data_file_pos: self.data_file.last_buffer_pos() as u64,
|
||||||
}
|
// }
|
||||||
}
|
// }
|
||||||
|
|
||||||
/// Checks the length of the remove log to see if it should get compacted.
|
/// Checks the length of the remove log to see if it should get compacted.
|
||||||
/// If so, the remove log is flushed into the pruned list, which itself gets
|
/// If so, the remove log is flushed into the pruned list, which itself gets
|
||||||
|
@ -417,14 +373,14 @@ where
|
||||||
tmp_prune_file_hash.clone(),
|
tmp_prune_file_hash.clone(),
|
||||||
format!("{}/{}", self.data_dir, PMMR_HASH_FILE),
|
format!("{}/{}", self.data_dir, PMMR_HASH_FILE),
|
||||||
)?;
|
)?;
|
||||||
self.hash_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_HASH_FILE), 0)?;
|
self.hash_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_HASH_FILE))?;
|
||||||
|
|
||||||
// 5. Rename the compact copy of the data file and reopen it.
|
// 5. Rename the compact copy of the data file and reopen it.
|
||||||
fs::rename(
|
fs::rename(
|
||||||
tmp_prune_file_data.clone(),
|
tmp_prune_file_data.clone(),
|
||||||
format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
|
format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
|
||||||
)?;
|
)?;
|
||||||
self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE), 0)?;
|
self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?;
|
||||||
|
|
||||||
// 6. Truncate the rm log based on pos removed.
|
// 6. Truncate the rm log based on pos removed.
|
||||||
// Excluding roots which remain in rm log.
|
// Excluding roots which remain in rm log.
|
||||||
|
|
|
@ -49,9 +49,8 @@ pub struct AppendOnlyFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppendOnlyFile {
|
impl AppendOnlyFile {
|
||||||
/// Open a file (existing or not) as append-only, backed by a mmap. Sets
|
/// Open a file (existing or not) as append-only, backed by a mmap.
|
||||||
/// the last written pos to to_pos if > 0, otherwise the end of the file
|
pub fn open(path: String) -> io::Result<AppendOnlyFile> {
|
||||||
pub fn open(path: String, to_pos: u64) -> io::Result<AppendOnlyFile> {
|
|
||||||
let file = OpenOptions::new()
|
let file = OpenOptions::new()
|
||||||
.read(true)
|
.read(true)
|
||||||
.append(true)
|
.append(true)
|
||||||
|
@ -65,13 +64,10 @@ impl AppendOnlyFile {
|
||||||
buffer: vec![],
|
buffer: vec![],
|
||||||
buffer_start_bak: 0,
|
buffer_start_bak: 0,
|
||||||
};
|
};
|
||||||
|
// if we have a non-empty file then mmap it.
|
||||||
if let Ok(sz) = aof.size() {
|
if let Ok(sz) = aof.size() {
|
||||||
let mut buf_start = sz;
|
if sz > 0 {
|
||||||
if to_pos > 0 && to_pos <= buf_start {
|
aof.buffer_start = sz as usize;
|
||||||
buf_start = to_pos;
|
|
||||||
}
|
|
||||||
if buf_start > 0 {
|
|
||||||
aof.buffer_start = buf_start as usize;
|
|
||||||
aof.mmap = Some(unsafe { memmap::Mmap::map(&aof.file)? });
|
aof.mmap = Some(unsafe { memmap::Mmap::map(&aof.file)? });
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -149,6 +145,11 @@ impl AppendOnlyFile {
|
||||||
return vec![];
|
return vec![];
|
||||||
}
|
}
|
||||||
let mmap = self.mmap.as_ref().unwrap();
|
let mmap = self.mmap.as_ref().unwrap();
|
||||||
|
|
||||||
|
if mmap.len() < (offset + length) {
|
||||||
|
return vec![];
|
||||||
|
}
|
||||||
|
|
||||||
(&mmap[offset..(offset + length)]).to_vec()
|
(&mmap[offset..(offset + length)]).to_vec()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -245,21 +246,16 @@ pub struct RemoveLog {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RemoveLog {
|
impl RemoveLog {
|
||||||
/// Open the remove log file. The content of the file will be read in memory
|
/// Open the remove log file.
|
||||||
/// for fast checking.
|
/// The content of the file will be read in memory for fast checking.
|
||||||
pub fn open(path: String, rewind_to_index: u32) -> io::Result<RemoveLog> {
|
pub fn open(path: String) -> io::Result<RemoveLog> {
|
||||||
let removed = read_ordered_vec(path.clone(), 12)?;
|
let removed = read_ordered_vec(path.clone(), 12)?;
|
||||||
let mut rl = RemoveLog {
|
Ok(RemoveLog {
|
||||||
path: path,
|
path: path,
|
||||||
removed: removed,
|
removed: removed,
|
||||||
removed_tmp: vec![],
|
removed_tmp: vec![],
|
||||||
removed_bak: vec![],
|
removed_bak: vec![],
|
||||||
};
|
})
|
||||||
if rewind_to_index > 0 {
|
|
||||||
rl.rewind(rewind_to_index)?;
|
|
||||||
rl.flush()?;
|
|
||||||
}
|
|
||||||
Ok(rl)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Rewinds the remove log back to the provided index.
|
/// Rewinds the remove log back to the provided index.
|
||||||
|
|
|
@ -26,7 +26,7 @@ use store::types::prune_noop;
|
||||||
#[test]
|
#[test]
|
||||||
fn pmmr_append() {
|
fn pmmr_append() {
|
||||||
let (data_dir, elems) = setup("append");
|
let (data_dir, elems) = setup("append");
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
|
|
||||||
// adding first set of 4 elements and sync
|
// adding first set of 4 elements and sync
|
||||||
let mut mmr_size = load(0, &elems[0..4], &mut backend);
|
let mut mmr_size = load(0, &elems[0..4], &mut backend);
|
||||||
|
@ -76,7 +76,7 @@ fn pmmr_compact_leaf_sibling() {
|
||||||
let (data_dir, elems) = setup("compact_leaf_sibling");
|
let (data_dir, elems) = setup("compact_leaf_sibling");
|
||||||
|
|
||||||
// setup the mmr store with all elements
|
// setup the mmr store with all elements
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
let mmr_size = load(0, &elems[..], &mut backend);
|
let mmr_size = load(0, &elems[..], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
|
@ -146,7 +146,7 @@ fn pmmr_prune_compact() {
|
||||||
let (data_dir, elems) = setup("prune_compact");
|
let (data_dir, elems) = setup("prune_compact");
|
||||||
|
|
||||||
// setup the mmr store with all elements
|
// setup the mmr store with all elements
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
let mmr_size = load(0, &elems[..], &mut backend);
|
let mmr_size = load(0, &elems[..], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
|
@ -194,7 +194,7 @@ fn pmmr_reload() {
|
||||||
let (data_dir, elems) = setup("reload");
|
let (data_dir, elems) = setup("reload");
|
||||||
|
|
||||||
// set everything up with an initial backend
|
// set everything up with an initial backend
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
|
|
||||||
let mmr_size = load(0, &elems[..], &mut backend);
|
let mmr_size = load(0, &elems[..], &mut backend);
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ fn pmmr_reload() {
|
||||||
// create a new backend referencing the data files
|
// create a new backend referencing the data files
|
||||||
// and check everything still works as expected
|
// and check everything still works as expected
|
||||||
{
|
{
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
||||||
{
|
{
|
||||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
|
@ -286,7 +286,7 @@ fn pmmr_reload() {
|
||||||
#[test]
|
#[test]
|
||||||
fn pmmr_rewind() {
|
fn pmmr_rewind() {
|
||||||
let (data_dir, elems) = setup("rewind");
|
let (data_dir, elems) = setup("rewind");
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap();
|
||||||
|
|
||||||
// adding elements and keeping the corresponding root
|
// adding elements and keeping the corresponding root
|
||||||
let mut mmr_size = load(0, &elems[0..4], &mut backend);
|
let mut mmr_size = load(0, &elems[0..4], &mut backend);
|
||||||
|
@ -371,7 +371,7 @@ fn pmmr_rewind() {
|
||||||
#[test]
|
#[test]
|
||||||
fn pmmr_compact_single_leaves() {
|
fn pmmr_compact_single_leaves() {
|
||||||
let (data_dir, elems) = setup("compact_single_leaves");
|
let (data_dir, elems) = setup("compact_single_leaves");
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap();
|
||||||
let mmr_size = load(0, &elems[0..5], &mut backend);
|
let mmr_size = load(0, &elems[0..5], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
|
@ -403,7 +403,7 @@ fn pmmr_compact_single_leaves() {
|
||||||
#[test]
|
#[test]
|
||||||
fn pmmr_compact_entire_peak() {
|
fn pmmr_compact_entire_peak() {
|
||||||
let (data_dir, elems) = setup("compact_entire_peak");
|
let (data_dir, elems) = setup("compact_entire_peak");
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap();
|
||||||
let mmr_size = load(0, &elems[0..5], &mut backend);
|
let mmr_size = load(0, &elems[0..5], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
|
@ -442,7 +442,7 @@ fn pmmr_compact_entire_peak() {
|
||||||
#[test]
|
#[test]
|
||||||
fn pmmr_compact_horizon() {
|
fn pmmr_compact_horizon() {
|
||||||
let (data_dir, elems) = setup("compact_horizon");
|
let (data_dir, elems) = setup("compact_horizon");
|
||||||
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone(), None).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap();
|
||||||
let mmr_size = load(0, &elems[..], &mut backend);
|
let mmr_size = load(0, &elems[..], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
|
@ -517,8 +517,7 @@ fn pmmr_compact_horizon() {
|
||||||
// recheck stored data
|
// recheck stored data
|
||||||
{
|
{
|
||||||
// recreate backend
|
// recreate backend
|
||||||
let backend =
|
let backend = store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string()).unwrap();
|
||||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
|
||||||
|
|
||||||
assert_eq!(backend.data_size().unwrap(), 17);
|
assert_eq!(backend.data_size().unwrap(), 17);
|
||||||
assert_eq!(backend.hash_size().unwrap(), 33);
|
assert_eq!(backend.hash_size().unwrap(), 33);
|
||||||
|
@ -532,8 +531,7 @@ fn pmmr_compact_horizon() {
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut backend =
|
let mut backend = store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string()).unwrap();
|
||||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
|
@ -549,8 +547,7 @@ fn pmmr_compact_horizon() {
|
||||||
// recheck stored data
|
// recheck stored data
|
||||||
{
|
{
|
||||||
// recreate backend
|
// recreate backend
|
||||||
let backend =
|
let backend = store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string()).unwrap();
|
||||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
|
||||||
|
|
||||||
// 0010012001001230
|
// 0010012001001230
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue