add segmenter for generating segments from txhashset with consistent rewind (#3482)

* add segmenter for generating segments from txhashset with consistent rewind

* rework segmenter to take a txhashset wrapped in rwlock
rework our rewindable pmmr so we can convert to readonly easily

* placeholder code for rewinding readonly txhashset extension to build a rangeproof segment

* segment creation for outputs/rangeproofs/kernels/bitmaps

* placeholder segment impl

* commit

* rework segmenter to use a cached bitmap (rewind is expensive)

* cache segmenter instance based on current archive header

* integrate the real segment and segment identifier with our segmenter

* exercise the segmenter code on chain init

* wrap accumulator in an arc, no need to clone each time
This commit is contained in:
Antioch Peverell 2020-11-23 19:07:07 +00:00 committed by GitHub
parent 5282ecb12f
commit cba3137338
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
11 changed files with 335 additions and 76 deletions

View file

@ -20,7 +20,7 @@ use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::verifier_cache::VerifierCache; use crate::core::core::verifier_cache::VerifierCache;
use crate::core::core::{ use crate::core::core::{
Block, BlockHeader, BlockSums, Committed, Inputs, KernelFeatures, Output, OutputIdentifier, Block, BlockHeader, BlockSums, Committed, Inputs, KernelFeatures, Output, OutputIdentifier,
Transaction, TxKernel, SegmentIdentifier, Transaction, TxKernel,
}; };
use crate::core::global; use crate::core::global;
use crate::core::pow; use crate::core::pow;
@ -29,12 +29,13 @@ use crate::error::{Error, ErrorKind};
use crate::pipe; use crate::pipe;
use crate::store; use crate::store;
use crate::txhashset; use crate::txhashset;
use crate::txhashset::{PMMRHandle, TxHashSet}; use crate::txhashset::{PMMRHandle, Segmenter, TxHashSet};
use crate::types::{ use crate::types::{
BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus, BlockStatus, ChainAdapter, CommitPos, NoStatus, Options, Tip, TxHashsetWriteStatus,
}; };
use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::{util::RwLock, ChainStore}; use crate::util::RwLock;
use crate::ChainStore;
use grin_core::ser; use grin_core::ser;
use grin_store::Error::NotFoundErr; use grin_store::Error::NotFoundErr;
use std::fs::{self, File}; use std::fs::{self, File};
@ -152,6 +153,7 @@ pub struct Chain {
header_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>, header_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>,
sync_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>, sync_pmmr: Arc<RwLock<txhashset::PMMRHandle<BlockHeader>>>,
verifier_cache: Arc<RwLock<dyn VerifierCache>>, verifier_cache: Arc<RwLock<dyn VerifierCache>>,
pibd_segmenter: Arc<RwLock<Option<Segmenter>>>,
// POW verification function // POW verification function
pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>, pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
archive_mode: bool, archive_mode: bool,
@ -217,6 +219,7 @@ impl Chain {
txhashset: Arc::new(RwLock::new(txhashset)), txhashset: Arc::new(RwLock::new(txhashset)),
header_pmmr: Arc::new(RwLock::new(header_pmmr)), header_pmmr: Arc::new(RwLock::new(header_pmmr)),
sync_pmmr: Arc::new(RwLock::new(sync_pmmr)), sync_pmmr: Arc::new(RwLock::new(sync_pmmr)),
pibd_segmenter: Arc::new(RwLock::new(None)),
pow_verifier, pow_verifier,
verifier_cache, verifier_cache,
archive_mode, archive_mode,
@ -225,6 +228,22 @@ impl Chain {
chain.log_heads()?; chain.log_heads()?;
// Temporarily exercising the initialization process.
// Note: This is *really* slow because we are starting from cold.
//
// This is not required as we will lazily initialize our segmenter as required
// once we start receiving PIBD segment requests.
// In reality we will do this based on PIBD segment requests.
// Initialization (once per 12 hour period) will not be this slow once lmdb and PMMRs
// are warmed up.
{
let segmenter = chain.segmenter()?;
let _ = segmenter.kernel_segment(SegmentIdentifier { height: 9, idx: 0 });
let _ = segmenter.bitmap_segment(SegmentIdentifier { height: 9, idx: 0 });
let _ = segmenter.output_segment(SegmentIdentifier { height: 11, idx: 0 });
let _ = segmenter.rangeproof_segment(SegmentIdentifier { height: 7, idx: 0 });
}
Ok(chain) Ok(chain)
} }
@ -815,6 +834,64 @@ impl Chain {
}) })
} }
/// The segmenter is responsible for generation PIBD segments.
/// We cache a segmenter instance based on the current archve period (new period every 12 hours).
/// This allows us to efficiently generate bitmap segments for the current archive period.
///
/// It is a relatively expensive operation to initializa and cache a new segmenter instance
/// as this involves rewinding the txhashet by approx 720 blocks (12 hours).
///
/// Caller is responsible for only doing this when required.
/// Caller should verify a peer segment request is valid before calling this for example.
///
pub fn segmenter(&self) -> Result<Segmenter, Error> {
// The archive header corresponds to the data we will segment.
let ref archive_header = self.txhashset_archive_header()?;
// Use our cached segmenter if we have one and the associated header matches.
if let Some(x) = self.pibd_segmenter.read().as_ref() {
if x.header() == archive_header {
return Ok(x.clone());
}
}
// We have no cached segmenter or the cached segmenter is no longer useful.
// Initialize a new segment, cache it and return it.
let segmenter = self.init_segmenter(archive_header)?;
let mut cache = self.pibd_segmenter.write();
*cache = Some(segmenter.clone());
return Ok(segmenter);
}
/// This is an expensive rewind to recreate bitmap state but we only need to do this once.
/// Caller is responsible for "caching" the segmenter (per archive period) for reuse.
fn init_segmenter(&self, header: &BlockHeader) -> Result<Segmenter, Error> {
let now = Instant::now();
debug!(
"init_segmenter: initializing new segmenter for {} at {}",
header.hash(),
header.height
);
let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write();
let bitmap_snapshot =
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
ext.extension.rewind(header, batch)?;
Ok(ext.extension.bitmap_accumulator())
})?;
debug!("init_segmenter: done, took {}ms", now.elapsed().as_millis());
Ok(Segmenter::new(
self.txhashset(),
Arc::new(bitmap_snapshot),
header.clone(),
))
}
/// To support the ability to download the txhashset from multiple peers in parallel, /// To support the ability to download the txhashset from multiple peers in parallel,
/// the peers must all agree on the exact binary representation of the txhashset. /// the peers must all agree on the exact binary representation of the txhashset.
/// This means compacting and rewinding to the exact same header. /// This means compacting and rewinding to the exact same header.

View file

@ -13,6 +13,7 @@
// limitations under the License. // limitations under the License.
//! Error types for chain //! Error types for chain
use crate::core::core::pmmr::segment;
use crate::core::core::{block, committed, transaction}; use crate::core::core::{block, committed, transaction};
use crate::core::ser; use crate::core::ser;
use crate::keychain; use crate::keychain;
@ -149,6 +150,9 @@ pub enum ErrorKind {
/// Error during chain sync /// Error during chain sync
#[fail(display = "Sync error")] #[fail(display = "Sync error")]
SyncError(String), SyncError(String),
/// PIBD segment related error
#[fail(display = "Segment error")]
SegmentError(segment::SegmentError),
} }
impl Display for Error { impl Display for Error {
@ -273,6 +277,14 @@ impl From<ser::Error> for Error {
} }
} }
impl From<segment::SegmentError> for Error {
fn from(error: segment::SegmentError) -> Error {
Error {
inner: Context::new(ErrorKind::SegmentError(error)),
}
}
}
impl From<secp::Error> for Error { impl From<secp::Error> for Error {
fn from(e: secp::Error) -> Error { fn from(e: secp::Error) -> Error {
Error { Error {

View file

@ -17,10 +17,12 @@
mod bitmap_accumulator; mod bitmap_accumulator;
mod rewindable_kernel_view; mod rewindable_kernel_view;
mod segmenter;
mod txhashset; mod txhashset;
mod utxo_view; mod utxo_view;
pub use self::bitmap_accumulator::*; pub use self::bitmap_accumulator::*;
pub use self::rewindable_kernel_view::*; pub use self::rewindable_kernel_view::*;
pub use self::segmenter::*;
pub use self::txhashset::*; pub use self::txhashset::*;
pub use self::utxo_view::*; pub use self::utxo_view::*;

View file

@ -50,7 +50,7 @@ impl BitmapAccumulator {
/// Crate a new empty bitmap accumulator. /// Crate a new empty bitmap accumulator.
pub fn new() -> BitmapAccumulator { pub fn new() -> BitmapAccumulator {
BitmapAccumulator { BitmapAccumulator {
backend: VecBackend::new_hash_only(), backend: VecBackend::new(),
} }
} }
@ -176,9 +176,12 @@ impl BitmapAccumulator {
/// The root hash of the bitmap accumulator MMR. /// The root hash of the bitmap accumulator MMR.
pub fn root(&self) -> Hash { pub fn root(&self) -> Hash {
self.readonly_pmmr().root().expect("no root, invalid tree")
}
/// Readonly access to our internal data.
pub fn readonly_pmmr(&self) -> ReadonlyPMMR<BitmapChunk, VecBackend<BitmapChunk>> {
ReadonlyPMMR::at(&self.backend, self.backend.size()) ReadonlyPMMR::at(&self.backend, self.backend.size())
.root()
.expect("no root, invalid tree")
} }
} }

View file

@ -14,7 +14,7 @@
//! Lightweight readonly view into kernel MMR for convenience. //! Lightweight readonly view into kernel MMR for convenience.
use crate::core::core::pmmr::RewindablePMMR; use crate::core::core::pmmr::{ReadablePMMR, ReadonlyPMMR, RewindablePMMR};
use crate::core::core::{BlockHeader, TxKernel}; use crate::core::core::{BlockHeader, TxKernel};
use crate::error::{Error, ErrorKind}; use crate::error::{Error, ErrorKind};
use grin_store::pmmr::PMMRBackend; use grin_store::pmmr::PMMRBackend;
@ -54,7 +54,10 @@ impl<'a> RewindableKernelView<'a> {
/// fast sync where a reorg past the horizon could allow a whole rewrite of /// fast sync where a reorg past the horizon could allow a whole rewrite of
/// the kernel set. /// the kernel set.
pub fn validate_root(&self) -> Result<(), Error> { pub fn validate_root(&self) -> Result<(), Error> {
let root = self.pmmr.root().map_err(|_| ErrorKind::InvalidRoot)?; let root = self
.readonly_pmmr()
.root()
.map_err(|_| ErrorKind::InvalidRoot)?;
if root != self.header.kernel_root { if root != self.header.kernel_root {
return Err(ErrorKind::InvalidTxHashSet(format!( return Err(ErrorKind::InvalidTxHashSet(format!(
"Kernel root at {} does not match", "Kernel root at {} does not match",
@ -64,4 +67,9 @@ impl<'a> RewindableKernelView<'a> {
} }
Ok(()) Ok(())
} }
/// Readonly view of our internal data.
pub fn readonly_pmmr(&self) -> ReadonlyPMMR<TxKernel, PMMRBackend<TxKernel>> {
self.pmmr.as_readonly()
}
} }

View file

@ -0,0 +1,149 @@
// Copyright 2020 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Generation of the various necessary segments requested during PIBD.
use std::{sync::Arc, time::Instant};
use crate::core::core::hash::Hash;
use crate::core::core::pmmr::ReadablePMMR;
use crate::core::core::{BlockHeader, OutputIdentifier, Segment, SegmentIdentifier, TxKernel};
use crate::error::{Error, ErrorKind};
use crate::txhashset::{BitmapAccumulator, BitmapChunk, TxHashSet};
use crate::util::secp::pedersen::RangeProof;
use crate::util::RwLock;
/// Segmenter for generating PIBD segments.
#[derive(Clone)]
pub struct Segmenter {
txhashset: Arc<RwLock<TxHashSet>>,
bitmap_snapshot: Arc<BitmapAccumulator>,
header: BlockHeader,
}
impl Segmenter {
/// Create a new segmenter based on the provided txhashset.
pub fn new(
txhashset: Arc<RwLock<TxHashSet>>,
bitmap_snapshot: Arc<BitmapAccumulator>,
header: BlockHeader,
) -> Segmenter {
Segmenter {
txhashset,
bitmap_snapshot,
header,
}
}
/// Header associated with this segmenter instance.
/// The bitmap "snapshot" corresponds to rewound state at this header.
pub fn header(&self) -> &BlockHeader {
&self.header
}
/// Create a kernel segment.
pub fn kernel_segment(&self, id: SegmentIdentifier) -> Result<Segment<TxKernel>, Error> {
let now = Instant::now();
let txhashset = self.txhashset.read();
let kernel_pmmr = txhashset.kernel_pmmr_at(&self.header);
let segment = Segment::from_pmmr(id, &kernel_pmmr, false)?;
debug!(
"kernel_segment: id: ({}, {}), leaves: {}, hashes: {}, proof hashes: {}, took {}ms",
segment.id().height,
segment.id().idx,
segment.leaf_iter().count(),
segment.hash_iter().count(),
segment.proof().size(),
now.elapsed().as_millis()
);
Ok(segment)
}
/// The root of the output PMMR based on size from the header.
fn output_root(&self) -> Result<Hash, Error> {
let txhashset = self.txhashset.read();
let pmmr = txhashset.output_pmmr_at(&self.header);
let root = pmmr.root().map_err(&ErrorKind::TxHashSetErr)?;
Ok(root)
}
/// The root of the bitmap snapshot PMMR.
fn bitmap_root(&self) -> Result<Hash, Error> {
let pmmr = self.bitmap_snapshot.readonly_pmmr();
let root = pmmr.root().map_err(&ErrorKind::TxHashSetErr)?;
Ok(root)
}
/// Create a utxo bitmap segment based on our bitmap "snapshot" and return it with
/// the corresponding output root.
pub fn bitmap_segment(
&self,
id: SegmentIdentifier,
) -> Result<(Segment<BitmapChunk>, Hash), Error> {
let now = Instant::now();
let bitmap_pmmr = self.bitmap_snapshot.readonly_pmmr();
let segment = Segment::from_pmmr(id, &bitmap_pmmr, false)?;
let output_root = self.output_root()?;
debug!(
"bitmap_segment: id: ({}, {}), leaves: {}, hashes: {}, proof hashes: {}, took {}ms",
segment.id().height,
segment.id().idx,
segment.leaf_iter().count(),
segment.hash_iter().count(),
segment.proof().size(),
now.elapsed().as_millis()
);
Ok((segment, output_root))
}
/// Create an output segment and return it with the corresponding bitmap root.
pub fn output_segment(
&self,
id: SegmentIdentifier,
) -> Result<(Segment<OutputIdentifier>, Hash), Error> {
let now = Instant::now();
let txhashset = self.txhashset.read();
let output_pmmr = txhashset.output_pmmr_at(&self.header);
let segment = Segment::from_pmmr(id, &output_pmmr, true)?;
let bitmap_root = self.bitmap_root()?;
debug!(
"output_segment: id: ({}, {}), leaves: {}, hashes: {}, proof hashes: {}, took {}ms",
segment.id().height,
segment.id().idx,
segment.leaf_iter().count(),
segment.hash_iter().count(),
segment.proof().size(),
now.elapsed().as_millis()
);
Ok((segment, bitmap_root))
}
/// Create a rangeproof segment.
pub fn rangeproof_segment(&self, id: SegmentIdentifier) -> Result<Segment<RangeProof>, Error> {
let now = Instant::now();
let txhashset = self.txhashset.read();
let pmmr = txhashset.rangeproof_pmmr_at(&self.header);
let segment = Segment::from_pmmr(id, &pmmr, true)?;
debug!(
"rangeproof_segment: id: ({}, {}), leaves: {}, hashes: {}, proof hashes: {}, took {}ms",
segment.id().height,
segment.id().idx,
segment.leaf_iter().count(),
segment.hash_iter().count(),
segment.proof().size(),
now.elapsed().as_millis()
);
Ok(segment)
}
}

View file

@ -19,14 +19,16 @@ use crate::core::consensus::WEEK_HEIGHT;
use crate::core::core::committed::Committed; use crate::core::core::committed::Committed;
use crate::core::core::hash::{Hash, Hashed}; use crate::core::core::hash::{Hash, Hashed};
use crate::core::core::merkle_proof::MerkleProof; use crate::core::core::merkle_proof::MerkleProof;
use crate::core::core::pmmr::{self, Backend, ReadablePMMR, ReadonlyPMMR, RewindablePMMR, PMMR}; use crate::core::core::pmmr::{
self, Backend, ReadablePMMR, ReadonlyPMMR, RewindablePMMR, VecBackend, PMMR,
};
use crate::core::core::{Block, BlockHeader, KernelFeatures, Output, OutputIdentifier, TxKernel}; use crate::core::core::{Block, BlockHeader, KernelFeatures, Output, OutputIdentifier, TxKernel};
use crate::core::global; use crate::core::global;
use crate::core::ser::{PMMRable, ProtocolVersion}; use crate::core::ser::{PMMRable, ProtocolVersion};
use crate::error::{Error, ErrorKind}; use crate::error::{Error, ErrorKind};
use crate::linked_list::{ListIndex, PruneableListIndex, RewindableListIndex}; use crate::linked_list::{ListIndex, PruneableListIndex, RewindableListIndex};
use crate::store::{self, Batch, ChainStore}; use crate::store::{self, Batch, ChainStore};
use crate::txhashset::bitmap_accumulator::BitmapAccumulator; use crate::txhashset::bitmap_accumulator::{BitmapAccumulator, BitmapChunk};
use crate::txhashset::{RewindableKernelView, UTXOView}; use crate::txhashset::{RewindableKernelView, UTXOView};
use crate::types::{CommitPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use crate::types::{CommitPos, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::secp::pedersen::{Commitment, RangeProof};
@ -301,6 +303,30 @@ impl TxHashSet {
.get_last_n_insertions(distance) .get_last_n_insertions(distance)
} }
/// Efficient view into the kernel PMMR based on size in header.
pub fn kernel_pmmr_at(
&self,
header: &BlockHeader,
) -> ReadonlyPMMR<TxKernel, PMMRBackend<TxKernel>> {
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, header.kernel_mmr_size)
}
/// Efficient view into the output PMMR based on size in header.
pub fn output_pmmr_at(
&self,
header: &BlockHeader,
) -> ReadonlyPMMR<OutputIdentifier, PMMRBackend<OutputIdentifier>> {
ReadonlyPMMR::at(&self.output_pmmr_h.backend, header.output_mmr_size)
}
/// Efficient view into the rangeproof PMMR based on size in header.
pub fn rangeproof_pmmr_at(
&self,
header: &BlockHeader,
) -> ReadonlyPMMR<RangeProof, PMMRBackend<RangeProof>> {
ReadonlyPMMR::at(&self.rproof_pmmr_h.backend, header.output_mmr_size)
}
/// Convenience function to query the db for a header by its hash. /// Convenience function to query the db for a header by its hash.
pub fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, Error> { pub fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, Error> {
Ok(self.commit_index.get_block_header(&hash)?) Ok(self.commit_index.get_block_header(&hash)?)
@ -1069,11 +1095,33 @@ impl<'a> Extension<'a> {
pub fn utxo_view(&'a self, header_ext: &'a HeaderExtension<'a>) -> UTXOView<'a> { pub fn utxo_view(&'a self, header_ext: &'a HeaderExtension<'a>) -> UTXOView<'a> {
UTXOView::new( UTXOView::new(
header_ext.pmmr.readonly_pmmr(), header_ext.pmmr.readonly_pmmr(),
self.output_pmmr.readonly_pmmr(), self.output_readonly_pmmr(),
self.rproof_pmmr.readonly_pmmr(), self.rproof_readonly_pmmr(),
) )
} }
/// Readonly view of our output data.
pub fn output_readonly_pmmr(
&self,
) -> ReadonlyPMMR<OutputIdentifier, PMMRBackend<OutputIdentifier>> {
self.output_pmmr.readonly_pmmr()
}
/// Take a snapshot of our bitmap accumulator
pub fn bitmap_accumulator(&self) -> BitmapAccumulator {
self.bitmap_accumulator.clone()
}
/// Readonly view of our bitmap accumulator data.
pub fn bitmap_readonly_pmmr(&self) -> ReadonlyPMMR<BitmapChunk, VecBackend<BitmapChunk>> {
self.bitmap_accumulator.readonly_pmmr()
}
/// Readonly view of our rangeproof data.
pub fn rproof_readonly_pmmr(&self) -> ReadonlyPMMR<RangeProof, PMMRBackend<RangeProof>> {
self.rproof_pmmr.readonly_pmmr()
}
/// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs).
/// Returns a vec of commit_pos representing the pos and height of the outputs spent /// Returns a vec of commit_pos representing the pos and height of the outputs spent
/// by this block. /// by this block.

View file

@ -17,9 +17,8 @@
use std::marker; use std::marker;
use crate::core::hash::{Hash, ZERO_HASH}; use crate::core::pmmr::{bintree_postorder_height, Backend, ReadonlyPMMR};
use crate::core::pmmr::{bintree_postorder_height, is_leaf, peaks, Backend}; use crate::ser::PMMRable;
use crate::ser::{PMMRIndexHashable, PMMRable};
/// Rewindable (but still readonly) view of a PMMR. /// Rewindable (but still readonly) view of a PMMR.
pub struct RewindablePMMR<'a, T, B> pub struct RewindablePMMR<'a, T, B>
@ -49,11 +48,6 @@ where
} }
} }
/// Reference to the underlying storage backend.
pub fn backend(&'a self) -> &dyn Backend<T> {
self.backend
}
/// Build a new readonly PMMR pre-initialized to /// Build a new readonly PMMR pre-initialized to
/// last_pos with the provided backend. /// last_pos with the provided backend.
pub fn at(backend: &'a B, last_pos: u64) -> RewindablePMMR<'_, T, B> { pub fn at(backend: &'a B, last_pos: u64) -> RewindablePMMR<'_, T, B> {
@ -74,62 +68,14 @@ where
while bintree_postorder_height(pos + 1) > 0 { while bintree_postorder_height(pos + 1) > 0 {
pos += 1; pos += 1;
} }
self.last_pos = pos; self.last_pos = pos;
Ok(()) Ok(())
} }
/// Get the data element at provided position in the MMR. /// Allows conversion of a "rewindable" PMMR into a "readonly" PMMR.
pub fn get_data(&self, pos: u64) -> Option<T::E> { /// Intended usage is to create a rewindable PMMR, rewind it,
if pos > self.last_pos { /// then convert to "readonly" and read from it.
// If we are beyond the rhs of the MMR return None. pub fn as_readonly(&self) -> ReadonlyPMMR<'a, T, B> {
None ReadonlyPMMR::at(&self.backend, self.last_pos)
} else if is_leaf(pos) {
// If we are a leaf then get data from the backend.
self.backend.get_data(pos)
} else {
// If we are not a leaf then return None as only leaves have data.
None
}
}
/// Is the MMR empty?
pub fn is_empty(&self) -> bool {
self.last_pos == 0
}
/// Computes the root of the MMR. Find all the peaks in the current
/// tree and "bags" them to get a single peak.
pub fn root(&self) -> Result<Hash, String> {
if self.is_empty() {
return Ok(ZERO_HASH);
}
let mut res = None;
for peak in self.peaks().iter().rev() {
res = match res {
None => Some(*peak),
Some(rhash) => Some((*peak, rhash).hash_with_index(self.unpruned_size())),
}
}
res.ok_or_else(|| "no root, invalid tree".to_owned())
}
/// Returns a vec of the peaks of this MMR.
pub fn peaks(&self) -> Vec<Hash> {
let peaks_pos = peaks(self.last_pos);
peaks_pos
.into_iter()
.filter_map(|pi| {
// here we want to get from underlying hash file
// as the pos *may* have been "removed"
self.backend.get_from_file(pi)
})
.collect()
}
/// Total size of the tree, including intermediary nodes and ignoring any
/// pruning.
pub fn unpruned_size(&self) -> u64 {
self.last_pos
} }
} }

View file

@ -132,6 +132,7 @@ impl<T> Segment<T> {
(first, last) (first, last)
} }
/// TODO - binary_search_by_key() here (can we assume these are sorted by pos?)
fn get_hash(&self, pos: u64) -> Result<Hash, SegmentError> { fn get_hash(&self, pos: u64) -> Result<Hash, SegmentError> {
self.hash_pos self.hash_pos
.iter() .iter()
@ -153,6 +154,16 @@ impl<T> Segment<T> {
.zip(&self.hashes) .zip(&self.hashes)
.map(|(&p, &h)| (p, h)) .map(|(&p, &h)| (p, h))
} }
/// Segment proof
pub fn proof(&self) -> &SegmentProof {
&self.proof
}
/// Segment identifier
pub fn id(&self) -> SegmentIdentifier {
self.identifier
}
} }
impl<T> Segment<T> impl<T> Segment<T>
@ -539,6 +550,11 @@ impl SegmentProof {
Ok(proof) Ok(proof)
} }
/// Size of the proof in hashes.
pub fn size(&self) -> usize {
self.hashes.len()
}
/// Reconstruct PMMR root using this proof /// Reconstruct PMMR root using this proof
pub fn reconstruct_root( pub fn reconstruct_root(
&self, &self,

View file

@ -15,7 +15,6 @@
use grin_core as core; use grin_core as core;
use grin_store as store; use grin_store as store;
use grin_util as util; use grin_util as util;
use store::PrefixIterator;
use crate::core::global; use crate::core::global;
use crate::core::ser::{self, Readable, Reader, Writeable, Writer}; use crate::core::ser::{self, Readable, Reader, Writeable, Writer};

View file

@ -28,8 +28,7 @@ extern crate lazy_static;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
// Re-export so only has to be included once // Re-export so only has to be included once
pub use parking_lot::Mutex; pub use parking_lot::{Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
pub use parking_lot::{RwLock, RwLockReadGuard, RwLockWriteGuard};
// Re-export so only has to be included once // Re-export so only has to be included once
pub use secp256k1zkp as secp; pub use secp256k1zkp as secp;