mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
improve "check known" steps during process_block() (#1475)
* document what we do during pipe::apply_block() * rustfmt * wip * rustfmt * wip * additional check_known_store and check_known_mmr checks in process_block * rustfmt * cleanup coinbase maturity check in process_block * consolidate the "check in store" logic add TODOs around the 50 block OldBlock logic * rustfmt * cleanup
This commit is contained in:
parent
86aea875de
commit
77765796ab
3 changed files with 205 additions and 102 deletions
|
@ -57,8 +57,8 @@ pub struct BlockContext {
|
||||||
|
|
||||||
// Check if this block is the next block *immediately*
|
// Check if this block is the next block *immediately*
|
||||||
// after our current chain head.
|
// after our current chain head.
|
||||||
fn is_next_block(b: &Block, ctx: &mut BlockContext) -> bool {
|
fn is_next_block(header: &BlockHeader, ctx: &mut BlockContext) -> bool {
|
||||||
b.header.previous == ctx.head.last_block_h
|
header.previous == ctx.head.last_block_h
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Runs the block processing pipeline, including validation and finding a
|
/// Runs the block processing pipeline, including validation and finding a
|
||||||
|
@ -89,40 +89,47 @@ pub fn process_block(
|
||||||
let txhashset = ctx.txhashset.clone();
|
let txhashset = ctx.txhashset.clone();
|
||||||
let mut txhashset = txhashset.write().unwrap();
|
let mut txhashset = txhashset.write().unwrap();
|
||||||
|
|
||||||
// update head now that we're in the lock
|
// Update head now that we are in the lock.
|
||||||
ctx.head = ctx.store.head()?;
|
ctx.head = ctx.store.head()?;
|
||||||
|
|
||||||
// Check if we have recently processed this block (via block_hashes_cache).
|
// Fast in-memory checks to avoid re-processing a block we recently processed.
|
||||||
check_known(b.hash(), ctx)?;
|
{
|
||||||
|
// Check if we have recently processed this block (via ctx chain head).
|
||||||
|
check_known_head(&b.header, ctx)?;
|
||||||
|
|
||||||
|
// Check if we have recently processed this block (via block_hashes_cache).
|
||||||
|
check_known_cache(&b.header, ctx)?;
|
||||||
|
|
||||||
|
// Check if this block is already know due it being in the current set of orphan blocks.
|
||||||
|
check_known_orphans(&b.header, ctx)?;
|
||||||
|
}
|
||||||
|
|
||||||
// Check our header itself is actually valid before proceeding any further.
|
// Check our header itself is actually valid before proceeding any further.
|
||||||
validate_header(&b.header, ctx)?;
|
validate_header(&b.header, ctx)?;
|
||||||
|
|
||||||
// Check if are processing the "next" block relative to the current chain head.
|
// Check if are processing the "next" block relative to the current chain head.
|
||||||
if is_next_block(b, ctx) {
|
if is_next_block(&b.header, ctx) {
|
||||||
// If this is the "next" block then either -
|
// If this is the "next" block then either -
|
||||||
// * common case where we process blocks sequentially.
|
// * common case where we process blocks sequentially.
|
||||||
// * special case where this is the first fast sync full block
|
// * special case where this is the first fast sync full block
|
||||||
// Either way we can proceed.
|
// Either way we can proceed (and we know the block is new and unprocessed).
|
||||||
} else {
|
} else {
|
||||||
// Check we actually have the previous block in the store.
|
// Check we have *this* block in the store.
|
||||||
// Note: not just the header but the full block itself.
|
// Stop if we have processed this block previously (it is in the store).
|
||||||
// We cannot assume we can use the chain head for this
|
// This is more expensive than the earlier check_known() as we hit the store.
|
||||||
// as we may be dealing with a fork (with less work currently).
|
check_known_store(&b.header, ctx)?;
|
||||||
match ctx.store.block_exists(&b.header.previous) {
|
|
||||||
Ok(true) => {
|
// Check existing MMR (via rewind) to see if this block is known to us already.
|
||||||
// We have the previous block in the store, so we can proceed.
|
// This should catch old blocks before we check to see if they appear to be
|
||||||
}
|
// orphaned due to compacting/pruning on a fast-sync node.
|
||||||
Ok(false) => {
|
// This is more expensive than check_known_store() as we rewind the txhashset.
|
||||||
// We do not have the previous block in the store.
|
// But we only incur the cost of the rewind if this is an earlier block on the same chain.
|
||||||
// We have not yet processed the previous block so
|
check_known_mmr(&b.header, ctx, &mut txhashset)?;
|
||||||
// this block is an orphan (for now).
|
|
||||||
return Err(ErrorKind::Orphan.into());
|
// At this point it looks like this is a new block that we have not yet processed.
|
||||||
}
|
// Check we have the *previous* block in the store.
|
||||||
Err(e) => {
|
// If we do not then treat this block as an orphan.
|
||||||
return Err(ErrorKind::StoreErr(e, "pipe get previous".to_owned()).into());
|
check_prev_store(&b.header, ctx)?;
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Validate the block itself.
|
// Validate the block itself.
|
||||||
|
@ -140,7 +147,7 @@ pub fn process_block(
|
||||||
// First we rewind the txhashset extension if necessary
|
// First we rewind the txhashset extension if necessary
|
||||||
// to put it into a consistent state for validating the block.
|
// to put it into a consistent state for validating the block.
|
||||||
// We can skip this step if the previous header is the latest header we saw.
|
// We can skip this step if the previous header is the latest header we saw.
|
||||||
if is_next_block(b, ctx) {
|
if is_next_block(&b.header, ctx) {
|
||||||
// No need to rewind if we are processing the next block.
|
// No need to rewind if we are processing the next block.
|
||||||
} else {
|
} else {
|
||||||
// Rewind the re-apply blocks on the forked chain to
|
// Rewind the re-apply blocks on the forked chain to
|
||||||
|
@ -149,6 +156,12 @@ pub fn process_block(
|
||||||
rewind_and_apply_fork(b, ctx.store.clone(), extension)?;
|
rewind_and_apply_fork(b, ctx.store.clone(), extension)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check any coinbase being spent have matured sufficiently.
|
||||||
|
// This needs to be done within the context of a potentially
|
||||||
|
// rewound txhashset extension to reflect chain state prior
|
||||||
|
// to applying the new block.
|
||||||
|
verify_coinbase_maturity(b, &mut extension)?;
|
||||||
|
|
||||||
// Apply the block to the txhashset state.
|
// Apply the block to the txhashset state.
|
||||||
// Validate the txhashset roots and sizes against the block header.
|
// Validate the txhashset roots and sizes against the block header.
|
||||||
// Block is invalid if there are any discrepencies.
|
// Block is invalid if there are any discrepencies.
|
||||||
|
@ -156,7 +169,7 @@ pub fn process_block(
|
||||||
|
|
||||||
// If applying this block does not increase the work on the chain then
|
// If applying this block does not increase the work on the chain then
|
||||||
// we know we have not yet updated the chain to produce a new chain head.
|
// we know we have not yet updated the chain to produce a new chain head.
|
||||||
if !block_has_more_work(b, &ctx.head) {
|
if !block_has_more_work(&b.header, &ctx.head) {
|
||||||
extension.force_rollback();
|
extension.force_rollback();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -234,19 +247,125 @@ fn check_header_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Quick in-memory check to fast-reject any block we've already handled
|
/// Quick in-memory check to fast-reject any block handled recently.
|
||||||
/// recently. Keeps duplicates from the network in check.
|
/// Keeps duplicates from the network in check.
|
||||||
fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
/// Checks against the last_block_h and prev_block_h of the chain head.
|
||||||
|
fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
|
let bh = header.hash();
|
||||||
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
||||||
return Err(ErrorKind::Unfit("already known".to_string()).into());
|
return Err(ErrorKind::Unfit("already known in head".to_string()).into());
|
||||||
}
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Quick in-memory check to fast-reject any block handled recently.
|
||||||
|
/// Keeps duplicates from the network in check.
|
||||||
|
/// Checks against the cache of recently processed block hashes.
|
||||||
|
fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
let cache = ctx.block_hashes_cache.read().unwrap();
|
let cache = ctx.block_hashes_cache.read().unwrap();
|
||||||
if cache.contains(&bh) {
|
if cache.contains(&header.hash()) {
|
||||||
return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
|
return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
|
||||||
}
|
}
|
||||||
if ctx.orphans.contains(&bh) {
|
Ok(())
|
||||||
return Err(ErrorKind::Unfit("already known in orphans".to_string()).into());
|
}
|
||||||
|
|
||||||
|
/// Check if this block is in the set of known orphans.
|
||||||
|
fn check_known_orphans(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
|
if ctx.orphans.contains(&header.hash()) {
|
||||||
|
Err(ErrorKind::Unfit("already known in orphans".to_string()).into())
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this block is in the store already.
|
||||||
|
fn check_known_store(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
|
match ctx.store.block_exists(&header.hash()) {
|
||||||
|
Ok(true) => {
|
||||||
|
if header.height < ctx.head.height.saturating_sub(50) {
|
||||||
|
// TODO - we flag this as an "abusive peer" but only in the case
|
||||||
|
// where we have the full block in our store.
|
||||||
|
// So this is not a particularly exhaustive check.
|
||||||
|
Err(ErrorKind::OldBlock.into())
|
||||||
|
} else {
|
||||||
|
Err(ErrorKind::Unfit("already known in store".to_string()).into())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
// Not yet processed this block, we can proceed.
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
return Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check we have the *previous* block in the store.
|
||||||
|
// Note: not just the header but the full block itself.
|
||||||
|
// We cannot assume we can use the chain head for this
|
||||||
|
// as we may be dealing with a fork (with less work currently).
|
||||||
|
fn check_prev_store(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||||
|
match ctx.store.block_exists(&header.previous) {
|
||||||
|
Ok(true) => {
|
||||||
|
// We have the previous block in the store, so we can proceed.
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
Ok(false) => {
|
||||||
|
// We do not have the previous block in the store.
|
||||||
|
// We have not yet processed the previous block so
|
||||||
|
// this block is an orphan (for now).
|
||||||
|
Err(ErrorKind::Orphan.into())
|
||||||
|
}
|
||||||
|
Err(e) => Err(ErrorKind::StoreErr(e, "pipe get previous".to_owned()).into()),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we are processing an "old" block then
|
||||||
|
// we can quickly check if it already exists
|
||||||
|
// on our current longest chain (we have already processes it).
|
||||||
|
// First check the header matches via current height index.
|
||||||
|
// Then peek directly into the MMRs at the appropriate pos.
|
||||||
|
// We can avoid a full rewind in this case.
|
||||||
|
fn check_known_mmr(
|
||||||
|
header: &BlockHeader,
|
||||||
|
ctx: &mut BlockContext,
|
||||||
|
write_txhashset: &mut txhashset::TxHashSet,
|
||||||
|
) -> Result<(), Error> {
|
||||||
|
// No point checking the MMR if this block is not earlier in the chain.
|
||||||
|
if header.height > ctx.head.height {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Use "header by height" index to look at current most work chain.
|
||||||
|
// Header is not "known if the header differs at the given height.
|
||||||
|
let local_header = ctx.store.get_header_by_height(header.height)?;
|
||||||
|
if local_header.hash() != header.hash() {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rewind the txhashset to the given block and validate
|
||||||
|
// roots and sizes against the header.
|
||||||
|
// If everything matches then this is a "known" block
|
||||||
|
// and we do not need to spend any more effort
|
||||||
|
txhashset::extending_readonly(write_txhashset, |extension| {
|
||||||
|
extension.rewind(header)?;
|
||||||
|
|
||||||
|
// We want to return an error here (block already known)
|
||||||
|
// if we *successfully validate the MMR roots and sizes.
|
||||||
|
if extension.validate_roots(header).is_ok() && extension.validate_sizes(header).is_ok() {
|
||||||
|
// TODO - determine if block is more than 50 blocks old
|
||||||
|
// and return specific OldBlock error.
|
||||||
|
// Or pull OldBlock (abusive peer) out into separate processing step.
|
||||||
|
|
||||||
|
return Err(ErrorKind::Unfit("already known on most work chain".to_string()).into());
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we get here then we have *not* seen this block before
|
||||||
|
// and we should continue processing the block.
|
||||||
|
Ok(())
|
||||||
|
})?;
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -354,72 +473,35 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_block(
|
fn validate_block(
|
||||||
b: &Block,
|
block: &Block,
|
||||||
ctx: &mut BlockContext,
|
ctx: &mut BlockContext,
|
||||||
verifier_cache: Arc<RwLock<VerifierCache>>,
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
||||||
) -> Result<(), Error> {
|
) -> Result<(), Error> {
|
||||||
if ctx.store.block_exists(&b.hash())? {
|
let prev = ctx.store.get_block_header(&block.header.previous)?;
|
||||||
if b.header.height < ctx.head.height.saturating_sub(50) {
|
block
|
||||||
return Err(ErrorKind::OldBlock.into());
|
.validate(
|
||||||
} else {
|
&prev.total_kernel_offset,
|
||||||
return Err(ErrorKind::Unfit("already known".to_string()).into());
|
&prev.total_kernel_sum,
|
||||||
}
|
verifier_cache,
|
||||||
}
|
)
|
||||||
let prev = ctx.store.get_block_header(&b.header.previous)?;
|
.map_err(|e| ErrorKind::InvalidBlockProof(e))?;
|
||||||
b.validate(
|
Ok(())
|
||||||
&prev.total_kernel_offset,
|
}
|
||||||
&prev.total_kernel_sum,
|
|
||||||
verifier_cache,
|
/// Verify the block is not attempting to spend coinbase outputs
|
||||||
).map_err(|e| ErrorKind::InvalidBlockProof(e))?;
|
/// before they have sufficiently matured.
|
||||||
|
/// Note: requires a txhashset extension.
|
||||||
|
fn verify_coinbase_maturity(block: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
||||||
|
ext.verify_coinbase_maturity(&block.inputs(), block.header.height)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fully validate the block by applying it to the txhashset extension.
|
/// Fully validate the block by applying it to the txhashset extension.
|
||||||
/// Check both the txhashset roots and sizes are correct after applying the block.
|
/// Check both the txhashset roots and sizes are correct after applying the block.
|
||||||
fn apply_block_to_txhashset(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
fn apply_block_to_txhashset(block: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
||||||
// First check we are not attempting to spend any coinbase outputs
|
ext.apply_block(block)?;
|
||||||
// before they have matured sufficiently.
|
ext.validate_roots(&block.header)?;
|
||||||
// TODO - this step is ill-fitting here - where should this live?
|
ext.validate_sizes(&block.header)?;
|
||||||
ext.verify_coinbase_maturity(&b.inputs(), b.header.height)?;
|
|
||||||
|
|
||||||
// apply the new block to the MMR trees and check the new root hashes
|
|
||||||
ext.apply_block(&b)?;
|
|
||||||
|
|
||||||
let roots = ext.roots();
|
|
||||||
if roots.output_root != b.header.output_root
|
|
||||||
|| roots.rproof_root != b.header.range_proof_root
|
|
||||||
|| roots.kernel_root != b.header.kernel_root
|
|
||||||
{
|
|
||||||
ext.dump(false);
|
|
||||||
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"validate_block_via_txhashset: output roots - {:?}, {:?}",
|
|
||||||
roots.output_root,
|
|
||||||
b.header.output_root,
|
|
||||||
);
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"validate_block_via_txhashset: rproof roots - {:?}, {:?}",
|
|
||||||
roots.rproof_root,
|
|
||||||
b.header.range_proof_root,
|
|
||||||
);
|
|
||||||
debug!(
|
|
||||||
LOGGER,
|
|
||||||
"validate_block_via_txhashset: kernel roots - {:?}, {:?}",
|
|
||||||
roots.kernel_root,
|
|
||||||
b.header.kernel_root,
|
|
||||||
);
|
|
||||||
|
|
||||||
return Err(ErrorKind::InvalidRoot.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check the output and rangeproof MMR sizes here against the header.
|
|
||||||
let (output_mmr_size, _, kernel_mmr_size) = ext.sizes();
|
|
||||||
if b.header.output_mmr_size != output_mmr_size || b.header.kernel_mmr_size != kernel_mmr_size {
|
|
||||||
return Err(ErrorKind::InvalidMMRSize.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -448,7 +530,7 @@ fn add_block_header(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Er
|
||||||
fn update_head(b: &Block, ctx: &BlockContext, batch: &store::Batch) -> Result<Option<Tip>, Error> {
|
fn update_head(b: &Block, ctx: &BlockContext, batch: &store::Batch) -> Result<Option<Tip>, Error> {
|
||||||
// if we made a fork with more work than the head (which should also be true
|
// if we made a fork with more work than the head (which should also be true
|
||||||
// when extending the head), update it
|
// when extending the head), update it
|
||||||
if block_has_more_work(b, &ctx.head) {
|
if block_has_more_work(&b.header, &ctx.head) {
|
||||||
// update the block height index
|
// update the block height index
|
||||||
batch
|
batch
|
||||||
.setup_height(&b.header, &ctx.head)
|
.setup_height(&b.header, &ctx.head)
|
||||||
|
@ -480,8 +562,8 @@ fn update_head(b: &Block, ctx: &BlockContext, batch: &store::Batch) -> Result<Op
|
||||||
}
|
}
|
||||||
|
|
||||||
// Whether the provided block totals more work than the chain tip
|
// Whether the provided block totals more work than the chain tip
|
||||||
fn block_has_more_work(b: &Block, tip: &Tip) -> bool {
|
fn block_has_more_work(header: &BlockHeader, tip: &Tip) -> bool {
|
||||||
let block_tip = Tip::from_block(&b.header);
|
let block_tip = Tip::from_block(header);
|
||||||
block_tip.total_difficulty > tip.total_difficulty
|
block_tip.total_difficulty > tip.total_difficulty
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -837,9 +837,26 @@ impl<'a> Extension<'a> {
|
||||||
|| roots.rproof_root != header.range_proof_root
|
|| roots.rproof_root != header.range_proof_root
|
||||||
|| roots.kernel_root != header.kernel_root
|
|| roots.kernel_root != header.kernel_root
|
||||||
{
|
{
|
||||||
return Err(ErrorKind::InvalidRoot.into());
|
Err(ErrorKind::InvalidRoot.into())
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate the output and kernel MMR sizes against the block header.
|
||||||
|
pub fn validate_sizes(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
|
// If we are validating the genesis block then we have no outputs or
|
||||||
|
// kernels. So we are done here.
|
||||||
|
if header.height == 0 {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
let (output_mmr_size, _, kernel_mmr_size) = self.sizes();
|
||||||
|
if output_mmr_size != header.output_mmr_size || kernel_mmr_size != header.kernel_mmr_size {
|
||||||
|
Err(ErrorKind::InvalidMMRSize.into())
|
||||||
|
} else {
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_mmrs(&self) -> Result<(), Error> {
|
fn validate_mmrs(&self) -> Result<(), Error> {
|
||||||
|
@ -874,6 +891,7 @@ impl<'a> Extension<'a> {
|
||||||
) -> Result<((Commitment, Commitment)), Error> {
|
) -> Result<((Commitment, Commitment)), Error> {
|
||||||
self.validate_mmrs()?;
|
self.validate_mmrs()?;
|
||||||
self.validate_roots(header)?;
|
self.validate_roots(header)?;
|
||||||
|
self.validate_sizes(header)?;
|
||||||
|
|
||||||
if header.height == 0 {
|
if header.height == 0 {
|
||||||
let zero_commit = secp_static::commit_to_zero_value();
|
let zero_commit = secp_static::commit_to_zero_value();
|
||||||
|
|
|
@ -352,7 +352,8 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper function to get the last N nodes inserted, i.e. the last
|
/// Helper function to get the last N nodes inserted, i.e. the last
|
||||||
/// n nodes along the bottom of the tree
|
/// n nodes along the bottom of the tree.
|
||||||
|
/// May return less than n items if the MMR has been pruned/compacted.
|
||||||
pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, T)> {
|
pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, T)> {
|
||||||
let mut return_vec = vec![];
|
let mut return_vec = vec![];
|
||||||
let mut last_leaf = self.last_pos;
|
let mut last_leaf = self.last_pos;
|
||||||
|
@ -361,10 +362,12 @@ where
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
last_leaf = bintree_rightmost(last_leaf);
|
last_leaf = bintree_rightmost(last_leaf);
|
||||||
return_vec.push((
|
|
||||||
self.backend.get_hash(last_leaf).unwrap(),
|
if let Some(hash) = self.backend.get_hash(last_leaf) {
|
||||||
self.backend.get_data(last_leaf).unwrap(),
|
if let Some(data) = self.backend.get_data(last_leaf) {
|
||||||
));
|
return_vec.push((hash, data));
|
||||||
|
}
|
||||||
|
}
|
||||||
last_leaf -= 1;
|
last_leaf -= 1;
|
||||||
}
|
}
|
||||||
return_vec
|
return_vec
|
||||||
|
|
Loading…
Reference in a new issue