mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
Refactor to pass batch around explicitly (#3176)
* Refactor to pass batch around explicitly rather than keeping it in the extension. We would like to be able to pass an extension around and potentially clone it. * cleanup, pass header around where it makes sense
This commit is contained in:
parent
2f1e8299b1
commit
9ec9d04457
5 changed files with 194 additions and 202 deletions
|
@ -509,7 +509,7 @@ impl Chain {
|
|||
pub fn get_unspent_output_at(&self, pos: u64) -> Result<Output, Error> {
|
||||
let header_pmmr = self.header_pmmr.read();
|
||||
let txhashset = self.txhashset.read();
|
||||
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo| {
|
||||
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, _| {
|
||||
utxo.get_unspent_output_at(pos)
|
||||
})
|
||||
}
|
||||
|
@ -518,8 +518,8 @@ impl Chain {
|
|||
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
|
||||
let header_pmmr = self.header_pmmr.read();
|
||||
let txhashset = self.txhashset.read();
|
||||
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo| {
|
||||
utxo.validate_tx(tx)?;
|
||||
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
|
||||
utxo.validate_tx(tx, batch)?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
@ -535,8 +535,8 @@ impl Chain {
|
|||
let height = self.next_block_height()?;
|
||||
let header_pmmr = self.header_pmmr.read();
|
||||
let txhashset = self.txhashset.read();
|
||||
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo| {
|
||||
utxo.verify_coinbase_maturity(&tx.inputs(), height)?;
|
||||
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
|
||||
utxo.verify_coinbase_maturity(&tx.inputs(), height, batch)?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
@ -567,10 +567,10 @@ impl Chain {
|
|||
// Now create an extension from the txhashset and validate against the
|
||||
// latest block header. Rewind the extension to the specified header to
|
||||
// ensure the view is consistent.
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |mut ext| {
|
||||
pipe::rewind_and_apply_fork(&header, &mut ext)?;
|
||||
let ref mut extension = ext.extension;
|
||||
extension.validate(&self.genesis, fast_validation, &NoStatus)?;
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
|
||||
pipe::rewind_and_apply_fork(&header, ext, batch)?;
|
||||
ext.extension
|
||||
.validate(&self.genesis, fast_validation, &NoStatus, &header)?;
|
||||
Ok(())
|
||||
})
|
||||
}
|
||||
|
@ -582,9 +582,9 @@ impl Chain {
|
|||
let mut txhashset = self.txhashset.write();
|
||||
|
||||
let (prev_root, roots, sizes) =
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext| {
|
||||
let previous_header = ext.batch().get_previous_header(&b.header)?;
|
||||
pipe::rewind_and_apply_fork(&previous_header, ext)?;
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
|
||||
let previous_header = batch.get_previous_header(&b.header)?;
|
||||
pipe::rewind_and_apply_fork(&previous_header, ext, batch)?;
|
||||
|
||||
let ref mut extension = ext.extension;
|
||||
let ref mut header_extension = ext.header_extension;
|
||||
|
@ -593,7 +593,7 @@ impl Chain {
|
|||
let prev_root = header_extension.root()?;
|
||||
|
||||
// Apply the latest block to the chain state via the extension.
|
||||
extension.apply_block(b)?;
|
||||
extension.apply_block(b, batch)?;
|
||||
|
||||
Ok((prev_root, extension.roots()?, extension.sizes()))
|
||||
})?;
|
||||
|
@ -628,10 +628,9 @@ impl Chain {
|
|||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
let merkle_proof =
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext| {
|
||||
pipe::rewind_and_apply_fork(&header, ext)?;
|
||||
let ref mut extension = ext.extension;
|
||||
extension.merkle_proof(output)
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
|
||||
pipe::rewind_and_apply_fork(&header, ext, batch)?;
|
||||
ext.extension.merkle_proof(output, batch)
|
||||
})?;
|
||||
|
||||
Ok(merkle_proof)
|
||||
|
@ -647,7 +646,7 @@ impl Chain {
|
|||
/// Provides a reading view into the current kernel state.
|
||||
pub fn kernel_data_read(&self) -> Result<File, Error> {
|
||||
let txhashset = self.txhashset.read();
|
||||
txhashset::rewindable_kernel_view(&txhashset, |view| view.kernel_data_read())
|
||||
txhashset::rewindable_kernel_view(&txhashset, |view, _| view.kernel_data_read())
|
||||
}
|
||||
|
||||
/// Writes kernels provided to us (via a kernel data download).
|
||||
|
@ -679,9 +678,9 @@ impl Chain {
|
|||
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut txhashset = self.txhashset.write();
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext| {
|
||||
pipe::rewind_and_apply_fork(&header, ext)?;
|
||||
ext.extension.snapshot()?;
|
||||
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
|
||||
pipe::rewind_and_apply_fork(&header, ext, batch)?;
|
||||
ext.extension.snapshot(batch)?;
|
||||
|
||||
// prepare the zip
|
||||
txhashset::zip_read(self.db_root.clone(), &header)
|
||||
|
@ -724,11 +723,11 @@ impl Chain {
|
|||
|
||||
let mut count = 0;
|
||||
let mut current = header.clone();
|
||||
txhashset::rewindable_kernel_view(&txhashset, |view| {
|
||||
txhashset::rewindable_kernel_view(&txhashset, |view, batch| {
|
||||
while current.height > 0 {
|
||||
view.rewind(¤t)?;
|
||||
view.validate_root()?;
|
||||
current = view.batch().get_previous_header(¤t)?;
|
||||
current = batch.get_previous_header(¤t)?;
|
||||
count += 1;
|
||||
}
|
||||
Ok(())
|
||||
|
@ -749,8 +748,8 @@ impl Chain {
|
|||
let mut sync_pmmr = self.sync_pmmr.write();
|
||||
let mut batch = self.store.batch()?;
|
||||
let header = batch.get_block_header(&head.hash())?;
|
||||
txhashset::header_extending(&mut sync_pmmr, &mut batch, |extension| {
|
||||
pipe::rewind_and_apply_header_fork(&header, extension)?;
|
||||
txhashset::header_extending(&mut sync_pmmr, &mut batch, |ext, batch| {
|
||||
pipe::rewind_and_apply_header_fork(&header, ext, batch)?;
|
||||
Ok(())
|
||||
})?;
|
||||
batch.commit()?;
|
||||
|
@ -939,25 +938,31 @@ impl Chain {
|
|||
|
||||
let mut header_pmmr = self.header_pmmr.write();
|
||||
let mut batch = self.store.batch()?;
|
||||
txhashset::extending(&mut header_pmmr, &mut txhashset, &mut batch, |ext| {
|
||||
let extension = &mut ext.extension;
|
||||
extension.rewind(&header)?;
|
||||
txhashset::extending(
|
||||
&mut header_pmmr,
|
||||
&mut txhashset,
|
||||
&mut batch,
|
||||
|ext, batch| {
|
||||
let extension = &mut ext.extension;
|
||||
extension.rewind(&header, batch)?;
|
||||
|
||||
// Validate the extension, generating the utxo_sum and kernel_sum.
|
||||
// Full validation, including rangeproofs and kernel signature verification.
|
||||
let (utxo_sum, kernel_sum) = extension.validate(&self.genesis, false, status)?;
|
||||
// Validate the extension, generating the utxo_sum and kernel_sum.
|
||||
// Full validation, including rangeproofs and kernel signature verification.
|
||||
let (utxo_sum, kernel_sum) =
|
||||
extension.validate(&self.genesis, false, status, &header)?;
|
||||
|
||||
// Save the block_sums (utxo_sum, kernel_sum) to the db for use later.
|
||||
extension.batch.save_block_sums(
|
||||
&header.hash(),
|
||||
&BlockSums {
|
||||
utxo_sum,
|
||||
kernel_sum,
|
||||
},
|
||||
)?;
|
||||
// Save the block_sums (utxo_sum, kernel_sum) to the db for use later.
|
||||
batch.save_block_sums(
|
||||
&header.hash(),
|
||||
&BlockSums {
|
||||
utxo_sum,
|
||||
kernel_sum,
|
||||
},
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
})?;
|
||||
Ok(())
|
||||
},
|
||||
)?;
|
||||
|
||||
debug!("txhashset_write: finished validating and rebuilding");
|
||||
|
||||
|
@ -1473,14 +1478,14 @@ fn setup_head(
|
|||
// We read header_head and sync_head directly from the MMR and assume they are non-empty.
|
||||
{
|
||||
if header_pmmr.last_pos == 0 {
|
||||
txhashset::header_extending(header_pmmr, &mut batch, |extension| {
|
||||
extension.apply_header(&genesis.header)
|
||||
txhashset::header_extending(header_pmmr, &mut batch, |ext, _| {
|
||||
ext.apply_header(&genesis.header)
|
||||
})?;
|
||||
}
|
||||
|
||||
if sync_pmmr.last_pos == 0 {
|
||||
txhashset::header_extending(sync_pmmr, &mut batch, |extension| {
|
||||
extension.apply_header(&genesis.header)
|
||||
txhashset::header_extending(sync_pmmr, &mut batch, |ext, _| {
|
||||
ext.apply_header(&genesis.header)
|
||||
})?;
|
||||
}
|
||||
}
|
||||
|
@ -1498,18 +1503,17 @@ fn setup_head(
|
|||
// to match the provided block header.
|
||||
let header = batch.get_block_header(&head.last_block_h)?;
|
||||
|
||||
let res = txhashset::extending(header_pmmr, txhashset, &mut batch, |ext| {
|
||||
pipe::rewind_and_apply_fork(&header, ext)?;
|
||||
let res = txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
|
||||
pipe::rewind_and_apply_fork(&header, ext, batch)?;
|
||||
|
||||
let ref mut extension = ext.extension;
|
||||
|
||||
extension.validate_roots()?;
|
||||
extension.validate_roots(&header)?;
|
||||
|
||||
// now check we have the "block sums" for the block in question
|
||||
// if we have no sums (migrating an existing node) we need to go
|
||||
// back to the txhashset and sum the outputs and kernels
|
||||
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
|
||||
{
|
||||
if header.height > 0 && batch.get_block_sums(&header.hash()).is_err() {
|
||||
debug!(
|
||||
"init: building (missing) block sums for {} @ {}",
|
||||
header.height,
|
||||
|
@ -1519,10 +1523,10 @@ fn setup_head(
|
|||
// Do a full (and slow) validation of the txhashset extension
|
||||
// to calculate the utxo_sum and kernel_sum at this block height.
|
||||
let (utxo_sum, kernel_sum) =
|
||||
extension.validate_kernel_sums(&genesis.header)?;
|
||||
extension.validate_kernel_sums(&genesis.header, &header)?;
|
||||
|
||||
// Save the block_sums to the db for use later.
|
||||
extension.batch.save_block_sums(
|
||||
batch.save_block_sums(
|
||||
&header.hash(),
|
||||
&BlockSums {
|
||||
utxo_sum,
|
||||
|
@ -1571,12 +1575,8 @@ fn setup_head(
|
|||
kernel_sum,
|
||||
};
|
||||
}
|
||||
txhashset::extending(header_pmmr, txhashset, &mut batch, |ext| {
|
||||
let ref mut extension = ext.extension;
|
||||
extension.apply_block(&genesis)?;
|
||||
extension.validate_roots()?;
|
||||
extension.validate_sizes()?;
|
||||
Ok(())
|
||||
txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
|
||||
ext.extension.apply_block(&genesis, batch)
|
||||
})?;
|
||||
|
||||
// Save the block_sums to the db for use later.
|
||||
|
|
|
@ -121,33 +121,33 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
|
|||
let ref mut header_pmmr = &mut ctx.header_pmmr;
|
||||
let ref mut txhashset = &mut ctx.txhashset;
|
||||
let ref mut batch = &mut ctx.batch;
|
||||
let block_sums = txhashset::extending(header_pmmr, txhashset, batch, |ext| {
|
||||
rewind_and_apply_fork(&prev, ext)?;
|
||||
let block_sums = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| {
|
||||
rewind_and_apply_fork(&prev, ext, batch)?;
|
||||
|
||||
// Check any coinbase being spent have matured sufficiently.
|
||||
// This needs to be done within the context of a potentially
|
||||
// rewound txhashset extension to reflect chain state prior
|
||||
// to applying the new block.
|
||||
verify_coinbase_maturity(b, ext)?;
|
||||
verify_coinbase_maturity(b, ext, batch)?;
|
||||
|
||||
// Validate the block against the UTXO set.
|
||||
validate_utxo(b, ext)?;
|
||||
validate_utxo(b, ext, batch)?;
|
||||
|
||||
// Using block_sums (utxo_sum, kernel_sum) for the previous block from the db
|
||||
// we can verify_kernel_sums across the full UTXO sum and full kernel sum
|
||||
// accounting for inputs/outputs/kernels in this new block.
|
||||
// We know there are no double-spends etc. if this verifies successfully.
|
||||
// Remember to save these to the db later on (regardless of extension rollback)
|
||||
let block_sums = verify_block_sums(b, ext.batch())?;
|
||||
let block_sums = verify_block_sums(b, batch)?;
|
||||
|
||||
// Apply the block to the txhashset state.
|
||||
// Validate the txhashset roots and sizes against the block header.
|
||||
// Block is invalid if there are any discrepencies.
|
||||
apply_block_to_txhashset(b, ext)?;
|
||||
apply_block_to_txhashset(b, ext, batch)?;
|
||||
|
||||
// If applying this block does not increase the work on the chain then
|
||||
// we know we have not yet updated the chain to produce a new chain head.
|
||||
let head = ext.batch().head()?;
|
||||
let head = batch.head()?;
|
||||
if !has_more_work(&b.header, &head) {
|
||||
ext.extension.force_rollback();
|
||||
}
|
||||
|
@ -208,8 +208,8 @@ pub fn sync_block_headers(
|
|||
}
|
||||
|
||||
// Now apply this entire chunk of headers to the sync MMR (ctx is sync MMR specific).
|
||||
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext| {
|
||||
rewind_and_apply_header_fork(&last_header, ext)?;
|
||||
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| {
|
||||
rewind_and_apply_header_fork(&last_header, ext, batch)?;
|
||||
Ok(())
|
||||
})?;
|
||||
|
||||
|
@ -245,8 +245,8 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) ->
|
|||
}
|
||||
}
|
||||
|
||||
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext| {
|
||||
rewind_and_apply_header_fork(&prev_header, ext)?;
|
||||
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| {
|
||||
rewind_and_apply_header_fork(&prev_header, ext, batch)?;
|
||||
ext.validate_root(header)?;
|
||||
ext.apply_header(header)?;
|
||||
if !has_more_work(&header, &header_head) {
|
||||
|
@ -393,12 +393,13 @@ fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error
|
|||
fn verify_coinbase_maturity(
|
||||
block: &Block,
|
||||
ext: &txhashset::ExtensionPair<'_>,
|
||||
batch: &store::Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
let ref extension = ext.extension;
|
||||
let ref header_extension = ext.header_extension;
|
||||
extension
|
||||
.utxo_view(header_extension)
|
||||
.verify_coinbase_maturity(&block.inputs(), block.header.height)
|
||||
.verify_coinbase_maturity(&block.inputs(), block.header.height, batch)
|
||||
}
|
||||
|
||||
/// Verify kernel sums across the full utxo and kernel sets based on block_sums
|
||||
|
@ -429,11 +430,11 @@ fn verify_block_sums(b: &Block, batch: &store::Batch<'_>) -> Result<BlockSums, E
|
|||
fn apply_block_to_txhashset(
|
||||
block: &Block,
|
||||
ext: &mut txhashset::ExtensionPair<'_>,
|
||||
batch: &store::Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
let ref mut extension = ext.extension;
|
||||
extension.apply_block(block)?;
|
||||
extension.validate_roots()?;
|
||||
extension.validate_sizes()?;
|
||||
ext.extension.apply_block(block, batch)?;
|
||||
ext.extension.validate_roots(&block.header)?;
|
||||
ext.extension.validate_sizes(&block.header)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -484,12 +485,13 @@ fn has_more_work(header: &BlockHeader, head: &Tip) -> bool {
|
|||
pub fn rewind_and_apply_header_fork(
|
||||
header: &BlockHeader,
|
||||
ext: &mut txhashset::HeaderExtension<'_>,
|
||||
batch: &store::Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
let mut fork_hashes = vec![];
|
||||
let mut current = header.clone();
|
||||
while current.height > 0 && !ext.is_on_current_chain(¤t).is_ok() {
|
||||
while current.height > 0 && !ext.is_on_current_chain(¤t, batch).is_ok() {
|
||||
fork_hashes.push(current.hash());
|
||||
current = ext.batch.get_previous_header(¤t)?;
|
||||
current = batch.get_previous_header(¤t)?;
|
||||
}
|
||||
fork_hashes.reverse();
|
||||
|
||||
|
@ -500,8 +502,7 @@ pub fn rewind_and_apply_header_fork(
|
|||
|
||||
// Re-apply all headers on this fork.
|
||||
for h in fork_hashes {
|
||||
let header = ext
|
||||
.batch
|
||||
let header = batch
|
||||
.get_block_header(&h)
|
||||
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked headers")))?;
|
||||
ext.validate_root(&header)?;
|
||||
|
@ -518,21 +519,25 @@ pub fn rewind_and_apply_header_fork(
|
|||
pub fn rewind_and_apply_fork(
|
||||
header: &BlockHeader,
|
||||
ext: &mut txhashset::ExtensionPair<'_>,
|
||||
batch: &store::Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
let ref mut batch = ext.batch();
|
||||
let ref mut extension = ext.extension;
|
||||
let ref mut header_extension = ext.header_extension;
|
||||
|
||||
// Prepare the header MMR.
|
||||
rewind_and_apply_header_fork(header, header_extension)?;
|
||||
rewind_and_apply_header_fork(header, header_extension, batch)?;
|
||||
|
||||
// Rewind the txhashset extension back to common ancestor based on header MMR.
|
||||
let mut current = batch.head_header()?;
|
||||
while current.height > 0 && !header_extension.is_on_current_chain(¤t).is_ok() {
|
||||
while current.height > 0
|
||||
&& !header_extension
|
||||
.is_on_current_chain(¤t, batch)
|
||||
.is_ok()
|
||||
{
|
||||
current = batch.get_previous_header(¤t)?;
|
||||
}
|
||||
let fork_point = current;
|
||||
extension.rewind(&fork_point)?;
|
||||
extension.rewind(&fork_point, batch)?;
|
||||
|
||||
// Then apply all full blocks since this common ancestor
|
||||
// to put txhashet extension in a state to accept the new block.
|
||||
|
@ -550,20 +555,26 @@ pub fn rewind_and_apply_fork(
|
|||
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked blocks")))?;
|
||||
|
||||
// Re-verify coinbase maturity along this fork.
|
||||
verify_coinbase_maturity(&fb, ext)?;
|
||||
verify_coinbase_maturity(&fb, ext, batch)?;
|
||||
// Validate the block against the UTXO set.
|
||||
validate_utxo(&fb, ext)?;
|
||||
validate_utxo(&fb, ext, batch)?;
|
||||
// Re-verify block_sums to set the block_sums up on this fork correctly.
|
||||
verify_block_sums(&fb, batch)?;
|
||||
// Re-apply the blocks.
|
||||
apply_block_to_txhashset(&fb, ext)?;
|
||||
apply_block_to_txhashset(&fb, ext, batch)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn validate_utxo(block: &Block, ext: &mut txhashset::ExtensionPair<'_>) -> Result<(), Error> {
|
||||
fn validate_utxo(
|
||||
block: &Block,
|
||||
ext: &mut txhashset::ExtensionPair<'_>,
|
||||
batch: &store::Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
let ref mut extension = ext.extension;
|
||||
let ref mut header_extension = ext.header_extension;
|
||||
extension.utxo_view(header_extension).validate_block(block)
|
||||
extension
|
||||
.utxo_view(header_extension)
|
||||
.validate_block(block, batch)
|
||||
}
|
||||
|
|
|
@ -19,13 +19,11 @@ use std::fs::File;
|
|||
use crate::core::core::pmmr::RewindablePMMR;
|
||||
use crate::core::core::{BlockHeader, TxKernel};
|
||||
use crate::error::{Error, ErrorKind};
|
||||
use crate::store::Batch;
|
||||
use grin_store::pmmr::PMMRBackend;
|
||||
|
||||
/// Rewindable (but readonly) view of the kernel set (based on kernel MMR).
|
||||
pub struct RewindableKernelView<'a> {
|
||||
pmmr: RewindablePMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
|
||||
batch: &'a Batch<'a>,
|
||||
header: BlockHeader,
|
||||
}
|
||||
|
||||
|
@ -33,21 +31,9 @@ impl<'a> RewindableKernelView<'a> {
|
|||
/// Build a new readonly kernel view.
|
||||
pub fn new(
|
||||
pmmr: RewindablePMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
|
||||
batch: &'a Batch<'_>,
|
||||
header: BlockHeader,
|
||||
) -> RewindableKernelView<'a> {
|
||||
RewindableKernelView {
|
||||
pmmr,
|
||||
batch,
|
||||
header,
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor for the batch used in this view.
|
||||
/// We will discard this batch (rollback) at the end, so be aware of this.
|
||||
/// Nothing will get written to the db/index via this view.
|
||||
pub fn batch(&self) -> &'a Batch<'_> {
|
||||
self.batch
|
||||
RewindableKernelView { pmmr, header }
|
||||
}
|
||||
|
||||
/// Rewind this readonly view to a previous block.
|
||||
|
|
|
@ -104,10 +104,10 @@ impl PMMRHandle<BlockHeader> {
|
|||
}
|
||||
}
|
||||
|
||||
/// An easy to manipulate structure holding the 3 sum trees necessary to
|
||||
/// validate blocks and capturing the Output set, the range proofs and the
|
||||
/// An easy to manipulate structure holding the 3 MMRs necessary to
|
||||
/// validate blocks and capturing the output set, associated rangeproofs and the
|
||||
/// kernels. Also handles the index of Commitments to positions in the
|
||||
/// output and range proof pmmr trees.
|
||||
/// output and rangeproof MMRs.
|
||||
///
|
||||
/// Note that the index is never authoritative, only the trees are
|
||||
/// guaranteed to indicate whether an output is spent or not. The index
|
||||
|
@ -465,7 +465,7 @@ pub fn extending_readonly<F, T>(
|
|||
inner: F,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
F: FnOnce(&mut ExtensionPair<'_>) -> Result<T, Error>,
|
||||
F: FnOnce(&mut ExtensionPair<'_>, &Batch<'_>) -> Result<T, Error>,
|
||||
{
|
||||
let commit_index = trees.commit_index.clone();
|
||||
let batch = commit_index.batch()?;
|
||||
|
@ -483,13 +483,13 @@ where
|
|||
|
||||
let res = {
|
||||
let header_pmmr = PMMR::at(&mut handle.backend, handle.last_pos);
|
||||
let mut header_extension = HeaderExtension::new(header_pmmr, &batch, header_head);
|
||||
let mut extension = Extension::new(trees, &batch, head);
|
||||
let mut header_extension = HeaderExtension::new(header_pmmr, header_head);
|
||||
let mut extension = Extension::new(trees, head);
|
||||
let mut extension_pair = ExtensionPair {
|
||||
header_extension: &mut header_extension,
|
||||
extension: &mut extension,
|
||||
};
|
||||
inner(&mut extension_pair)
|
||||
inner(&mut extension_pair, &batch)
|
||||
};
|
||||
|
||||
trace!("Rollbacking txhashset (readonly) extension.");
|
||||
|
@ -513,7 +513,7 @@ pub fn utxo_view<F, T>(
|
|||
inner: F,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
F: FnOnce(&UTXOView<'_>) -> Result<T, Error>,
|
||||
F: FnOnce(&UTXOView<'_>, &Batch<'_>) -> Result<T, Error>,
|
||||
{
|
||||
let res: Result<T, Error>;
|
||||
{
|
||||
|
@ -526,8 +526,8 @@ where
|
|||
// Create a new batch here to pass into the utxo_view.
|
||||
// Discard it (rollback) after we finish with the utxo_view.
|
||||
let batch = trees.commit_index.batch()?;
|
||||
let utxo = UTXOView::new(output_pmmr, header_pmmr, rproof_pmmr, &batch);
|
||||
res = inner(&utxo);
|
||||
let utxo = UTXOView::new(output_pmmr, header_pmmr, rproof_pmmr);
|
||||
res = inner(&utxo, &batch);
|
||||
}
|
||||
res
|
||||
}
|
||||
|
@ -539,7 +539,7 @@ where
|
|||
/// when we are done with the view.
|
||||
pub fn rewindable_kernel_view<F, T>(trees: &TxHashSet, inner: F) -> Result<T, Error>
|
||||
where
|
||||
F: FnOnce(&mut RewindableKernelView<'_>) -> Result<T, Error>,
|
||||
F: FnOnce(&mut RewindableKernelView<'_>, &Batch<'_>) -> Result<T, Error>,
|
||||
{
|
||||
let res: Result<T, Error>;
|
||||
{
|
||||
|
@ -550,8 +550,8 @@ where
|
|||
// Discard it (rollback) after we finish with the kernel_view.
|
||||
let batch = trees.commit_index.batch()?;
|
||||
let header = batch.head_header()?;
|
||||
let mut view = RewindableKernelView::new(kernel_pmmr, &batch, header);
|
||||
res = inner(&mut view);
|
||||
let mut view = RewindableKernelView::new(kernel_pmmr, header);
|
||||
res = inner(&mut view, &batch);
|
||||
}
|
||||
res
|
||||
}
|
||||
|
@ -570,7 +570,7 @@ pub fn extending<'a, F, T>(
|
|||
inner: F,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
F: FnOnce(&mut ExtensionPair<'_>) -> Result<T, Error>,
|
||||
F: FnOnce(&mut ExtensionPair<'_>, &Batch<'_>) -> Result<T, Error>,
|
||||
{
|
||||
let sizes: (u64, u64, u64);
|
||||
let res: Result<T, Error>;
|
||||
|
@ -593,13 +593,13 @@ where
|
|||
trace!("Starting new txhashset extension.");
|
||||
|
||||
let header_pmmr = PMMR::at(&mut header_pmmr.backend, header_pmmr.last_pos);
|
||||
let mut header_extension = HeaderExtension::new(header_pmmr, &child_batch, header_head);
|
||||
let mut extension = Extension::new(trees, &child_batch, head);
|
||||
let mut header_extension = HeaderExtension::new(header_pmmr, header_head);
|
||||
let mut extension = Extension::new(trees, head);
|
||||
let mut extension_pair = ExtensionPair {
|
||||
header_extension: &mut header_extension,
|
||||
extension: &mut extension,
|
||||
};
|
||||
res = inner(&mut extension_pair);
|
||||
res = inner(&mut extension_pair, &child_batch);
|
||||
|
||||
rollback = extension_pair.extension.rollback;
|
||||
sizes = extension_pair.extension.sizes();
|
||||
|
@ -653,7 +653,7 @@ pub fn header_extending<'a, F, T>(
|
|||
inner: F,
|
||||
) -> Result<T, Error>
|
||||
where
|
||||
F: FnOnce(&mut HeaderExtension<'_>) -> Result<T, Error>,
|
||||
F: FnOnce(&mut HeaderExtension<'_>, &Batch<'_>) -> Result<T, Error>,
|
||||
{
|
||||
let size: u64;
|
||||
let res: Result<T, Error>;
|
||||
|
@ -674,8 +674,8 @@ where
|
|||
|
||||
{
|
||||
let pmmr = PMMR::at(&mut handle.backend, handle.last_pos);
|
||||
let mut extension = HeaderExtension::new(pmmr, &child_batch, head);
|
||||
res = inner(&mut extension);
|
||||
let mut extension = HeaderExtension::new(pmmr, head);
|
||||
res = inner(&mut extension, &child_batch);
|
||||
|
||||
rollback = extension.rollback;
|
||||
size = extension.size();
|
||||
|
@ -708,24 +708,17 @@ pub struct HeaderExtension<'a> {
|
|||
|
||||
/// Rollback flag.
|
||||
rollback: bool,
|
||||
|
||||
/// Batch in which the extension occurs, public so it can be used within
|
||||
/// an `extending` closure. Just be careful using it that way as it will
|
||||
/// get rolled back with the extension (i.e on a losing fork).
|
||||
pub batch: &'a Batch<'a>,
|
||||
}
|
||||
|
||||
impl<'a> HeaderExtension<'a> {
|
||||
fn new(
|
||||
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
|
||||
batch: &'a Batch<'_>,
|
||||
head: Tip,
|
||||
) -> HeaderExtension<'a> {
|
||||
HeaderExtension {
|
||||
head,
|
||||
pmmr,
|
||||
rollback: false,
|
||||
batch,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -742,10 +735,14 @@ impl<'a> HeaderExtension<'a> {
|
|||
/// Get the header at the specified height based on the current state of the header extension.
|
||||
/// Derives the MMR pos from the height (insertion index) and retrieves the header hash.
|
||||
/// Looks the header up in the db by hash.
|
||||
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||
pub fn get_header_by_height(
|
||||
&self,
|
||||
height: u64,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<BlockHeader, Error> {
|
||||
let pos = pmmr::insertion_to_pmmr_index(height + 1);
|
||||
if let Some(hash) = self.get_header_hash(pos) {
|
||||
Ok(self.batch.get_block_header(&hash)?)
|
||||
Ok(batch.get_block_header(&hash)?)
|
||||
} else {
|
||||
Err(ErrorKind::Other(format!("get header by height")).into())
|
||||
}
|
||||
|
@ -753,11 +750,15 @@ impl<'a> HeaderExtension<'a> {
|
|||
|
||||
/// Compares the provided header to the header in the header MMR at that height.
|
||||
/// If these match we know the header is on the current chain.
|
||||
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||
pub fn is_on_current_chain(
|
||||
&self,
|
||||
header: &BlockHeader,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
if header.height > self.head.height {
|
||||
return Err(ErrorKind::Other(format!("not on current chain, out beyond")).into());
|
||||
}
|
||||
let chain_header = self.get_header_by_height(header.height)?;
|
||||
let chain_header = self.get_header_by_height(header.height, batch)?;
|
||||
if chain_header.hash() == header.hash() {
|
||||
Ok(())
|
||||
} else {
|
||||
|
@ -835,14 +836,7 @@ pub struct ExtensionPair<'a> {
|
|||
pub extension: &'a mut Extension<'a>,
|
||||
}
|
||||
|
||||
impl<'a> ExtensionPair<'a> {
|
||||
/// Accessor for the batch associated with this extension pair.
|
||||
pub fn batch(&mut self) -> &'a Batch<'a> {
|
||||
self.extension.batch
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows the application of new blocks on top of the sum trees in a
|
||||
/// Allows the application of new blocks on top of the txhashset in a
|
||||
/// reversible manner within a unit of work provided by the `extending`
|
||||
/// function.
|
||||
pub struct Extension<'a> {
|
||||
|
@ -856,11 +850,6 @@ pub struct Extension<'a> {
|
|||
|
||||
/// Rollback flag.
|
||||
rollback: bool,
|
||||
|
||||
/// Batch in which the extension occurs, public so it can be used within
|
||||
/// an `extending` closure. Just be careful using it that way as it will
|
||||
/// get rolled back with the extension (i.e on a losing fork).
|
||||
pub batch: &'a Batch<'a>,
|
||||
}
|
||||
|
||||
impl<'a> Committed for Extension<'a> {
|
||||
|
@ -892,7 +881,7 @@ impl<'a> Committed for Extension<'a> {
|
|||
}
|
||||
|
||||
impl<'a> Extension<'a> {
|
||||
fn new(trees: &'a mut TxHashSet, batch: &'a Batch<'_>, head: Tip) -> Extension<'a> {
|
||||
fn new(trees: &'a mut TxHashSet, head: Tip) -> Extension<'a> {
|
||||
Extension {
|
||||
head,
|
||||
output_pmmr: PMMR::at(
|
||||
|
@ -909,7 +898,6 @@ impl<'a> Extension<'a> {
|
|||
),
|
||||
bitmap_accumulator: trees.bitmap_accumulator.clone(),
|
||||
rollback: false,
|
||||
batch,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -925,23 +913,21 @@ impl<'a> Extension<'a> {
|
|||
self.output_pmmr.readonly_pmmr(),
|
||||
header_ext.pmmr.readonly_pmmr(),
|
||||
self.rproof_pmmr.readonly_pmmr(),
|
||||
self.batch,
|
||||
)
|
||||
}
|
||||
|
||||
/// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs).
|
||||
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
||||
pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
let mut affected_pos = vec![];
|
||||
|
||||
for out in b.outputs() {
|
||||
let pos = self.apply_output(out)?;
|
||||
let pos = self.apply_output(out, batch)?;
|
||||
affected_pos.push(pos);
|
||||
self.batch
|
||||
.save_output_pos_height(&out.commitment(), pos, b.header.height)?;
|
||||
batch.save_output_pos_height(&out.commitment(), pos, b.header.height)?;
|
||||
}
|
||||
|
||||
for input in b.inputs() {
|
||||
let pos = self.apply_input(input)?;
|
||||
let pos = self.apply_input(input, batch)?;
|
||||
affected_pos.push(pos);
|
||||
}
|
||||
|
||||
|
@ -977,9 +963,9 @@ impl<'a> Extension<'a> {
|
|||
)
|
||||
}
|
||||
|
||||
fn apply_input(&mut self, input: &Input) -> Result<u64, Error> {
|
||||
fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result<u64, Error> {
|
||||
let commit = input.commitment();
|
||||
let pos_res = self.batch.get_output_pos(&commit);
|
||||
let pos_res = batch.get_output_pos(&commit);
|
||||
if let Ok(pos) = pos_res {
|
||||
// First check this input corresponds to an existing entry in the output MMR.
|
||||
if let Some(hash) = self.output_pmmr.get_hash(pos) {
|
||||
|
@ -1008,10 +994,10 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
fn apply_output(&mut self, out: &Output) -> Result<u64, Error> {
|
||||
fn apply_output(&mut self, out: &Output, batch: &Batch<'_>) -> Result<u64, Error> {
|
||||
let commit = out.commitment();
|
||||
|
||||
if let Ok(pos) = self.batch.get_output_pos(&commit) {
|
||||
if let Ok(pos) = batch.get_output_pos(&commit) {
|
||||
if let Some(out_mmr) = self.output_pmmr.get_data(pos) {
|
||||
if out_mmr.commitment() == commit {
|
||||
return Err(ErrorKind::DuplicateCommitment(commit).into());
|
||||
|
@ -1061,10 +1047,14 @@ impl<'a> Extension<'a> {
|
|||
/// Note: this relies on the MMR being stable even after pruning/compaction.
|
||||
/// We need the hash of each sibling pos from the pos up to the peak
|
||||
/// including the sibling leaf node which may have been removed.
|
||||
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> {
|
||||
pub fn merkle_proof(
|
||||
&self,
|
||||
output: &OutputIdentifier,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<MerkleProof, Error> {
|
||||
debug!("txhashset: merkle_proof: output: {:?}", output.commit,);
|
||||
// then calculate the Merkle Proof based on the known pos
|
||||
let pos = self.batch.get_output_pos(&output.commit)?;
|
||||
let pos = batch.get_output_pos(&output.commit)?;
|
||||
let merkle_proof = self
|
||||
.output_pmmr
|
||||
.merkle_proof(pos)
|
||||
|
@ -1078,8 +1068,8 @@ impl<'a> Extension<'a> {
|
|||
/// the block hash as filename suffix.
|
||||
/// Needed for fast-sync (utxo file needs to be rewound before sending
|
||||
/// across).
|
||||
pub fn snapshot(&mut self) -> Result<(), Error> {
|
||||
let header = self.batch.get_block_header(&self.head.last_block_h)?;
|
||||
pub fn snapshot(&mut self, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
let header = batch.get_block_header(&self.head.last_block_h)?;
|
||||
self.output_pmmr
|
||||
.snapshot(&header)
|
||||
.map_err(|e| ErrorKind::Other(e))?;
|
||||
|
@ -1091,7 +1081,7 @@ impl<'a> Extension<'a> {
|
|||
|
||||
/// Rewinds the MMRs to the provided block, rewinding to the last output pos
|
||||
/// and last kernel pos of that block.
|
||||
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
|
||||
pub fn rewind(&mut self, header: &BlockHeader, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
debug!(
|
||||
"Rewind extension to {} at {} from {} at {}",
|
||||
header.hash(),
|
||||
|
@ -1106,8 +1096,8 @@ impl<'a> Extension<'a> {
|
|||
// undone during rewind).
|
||||
// Rewound output pos will be removed from the MMR.
|
||||
// Rewound input (spent) pos will be added back to the MMR.
|
||||
let head_header = self.batch.get_block_header(&self.head.hash())?;
|
||||
let rewind_rm_pos = input_pos_to_rewind(header, &head_header, &self.batch)?;
|
||||
let head_header = batch.get_block_header(&self.head.hash())?;
|
||||
let rewind_rm_pos = input_pos_to_rewind(header, &head_header, batch)?;
|
||||
|
||||
self.rewind_to_pos(
|
||||
header.output_mmr_size,
|
||||
|
@ -1149,7 +1139,7 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
|
||||
/// Current root hashes and sums (if applicable) for the Output, range proof
|
||||
/// and kernel sum trees.
|
||||
/// and kernel MMRs.
|
||||
pub fn roots(&self) -> Result<TxHashSetRoots, Error> {
|
||||
Ok(TxHashSetRoots {
|
||||
output_roots: OutputRoots {
|
||||
|
@ -1171,24 +1161,22 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
|
||||
/// Validate the MMR (output, rangeproof, kernel) roots against the latest header.
|
||||
pub fn validate_roots(&self) -> Result<(), Error> {
|
||||
if self.head.height == 0 {
|
||||
pub fn validate_roots(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||
if header.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let head_header = self.batch.get_block_header(&self.head.hash())?;
|
||||
self.roots()?.validate(&head_header)
|
||||
self.roots()?.validate(header)
|
||||
}
|
||||
|
||||
/// Validate the header, output and kernel MMR sizes against the block header.
|
||||
pub fn validate_sizes(&self) -> Result<(), Error> {
|
||||
if self.head.height == 0 {
|
||||
pub fn validate_sizes(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||
if header.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
let head_header = self.batch.get_block_header(&self.head.last_block_h)?;
|
||||
if (
|
||||
head_header.output_mmr_size,
|
||||
head_header.output_mmr_size,
|
||||
head_header.kernel_mmr_size,
|
||||
header.output_mmr_size,
|
||||
header.output_mmr_size,
|
||||
header.kernel_mmr_size,
|
||||
) != self.sizes()
|
||||
{
|
||||
Err(ErrorKind::InvalidMMRSize.into())
|
||||
|
@ -1229,13 +1217,13 @@ impl<'a> Extension<'a> {
|
|||
pub fn validate_kernel_sums(
|
||||
&self,
|
||||
genesis: &BlockHeader,
|
||||
header: &BlockHeader,
|
||||
) -> Result<(Commitment, Commitment), Error> {
|
||||
let now = Instant::now();
|
||||
|
||||
let head_header = self.batch.get_block_header(&self.head.last_block_h)?;
|
||||
let (utxo_sum, kernel_sum) = self.verify_kernel_sums(
|
||||
head_header.total_overage(genesis.kernel_mmr_size > 0),
|
||||
head_header.total_kernel_offset(),
|
||||
header.total_overage(genesis.kernel_mmr_size > 0),
|
||||
header.total_kernel_offset(),
|
||||
)?;
|
||||
|
||||
debug!(
|
||||
|
@ -1253,10 +1241,11 @@ impl<'a> Extension<'a> {
|
|||
genesis: &BlockHeader,
|
||||
fast_validation: bool,
|
||||
status: &dyn TxHashsetWriteStatus,
|
||||
header: &BlockHeader,
|
||||
) -> Result<(Commitment, Commitment), Error> {
|
||||
self.validate_mmrs()?;
|
||||
self.validate_roots()?;
|
||||
self.validate_sizes()?;
|
||||
self.validate_roots(header)?;
|
||||
self.validate_sizes(header)?;
|
||||
|
||||
if self.head.height == 0 {
|
||||
let zero_commit = secp_static::commit_to_zero_value();
|
||||
|
@ -1265,7 +1254,7 @@ impl<'a> Extension<'a> {
|
|||
|
||||
// The real magicking happens here. Sum of kernel excesses should equal
|
||||
// sum of unspent outputs minus total supply.
|
||||
let (output_sum, kernel_sum) = self.validate_kernel_sums(genesis)?;
|
||||
let (output_sum, kernel_sum) = self.validate_kernel_sums(genesis, header)?;
|
||||
|
||||
// These are expensive verification step (skipped for "fast validation").
|
||||
if !fast_validation {
|
||||
|
@ -1294,7 +1283,7 @@ impl<'a> Extension<'a> {
|
|||
debug!("-- end of outputs --");
|
||||
}
|
||||
|
||||
/// Dumps the state of the 3 sum trees to stdout for debugging. Short
|
||||
/// Dumps the state of the 3 MMRs to stdout for debugging. Short
|
||||
/// version only prints the Output tree.
|
||||
pub fn dump(&self, short: bool) {
|
||||
debug!("-- outputs --");
|
||||
|
@ -1307,7 +1296,7 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
/// Sizes of each of the sum trees
|
||||
/// Sizes of each of the MMRs
|
||||
pub fn sizes(&self) -> (u64, u64, u64) {
|
||||
(
|
||||
self.output_pmmr.unpruned_size(),
|
||||
|
|
|
@ -29,7 +29,6 @@ pub struct UTXOView<'a> {
|
|||
output_pmmr: ReadonlyPMMR<'a, Output, PMMRBackend<Output>>,
|
||||
header_pmmr: ReadonlyPMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
|
||||
rproof_pmmr: ReadonlyPMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
|
||||
batch: &'a Batch<'a>,
|
||||
}
|
||||
|
||||
impl<'a> UTXOView<'a> {
|
||||
|
@ -38,26 +37,24 @@ impl<'a> UTXOView<'a> {
|
|||
output_pmmr: ReadonlyPMMR<'a, Output, PMMRBackend<Output>>,
|
||||
header_pmmr: ReadonlyPMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
|
||||
rproof_pmmr: ReadonlyPMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
|
||||
batch: &'a Batch<'_>,
|
||||
) -> UTXOView<'a> {
|
||||
UTXOView {
|
||||
output_pmmr,
|
||||
header_pmmr,
|
||||
rproof_pmmr,
|
||||
batch,
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate a block against the current UTXO set.
|
||||
/// Every input must spend an output that currently exists in the UTXO set.
|
||||
/// No duplicate outputs.
|
||||
pub fn validate_block(&self, block: &Block) -> Result<(), Error> {
|
||||
pub fn validate_block(&self, block: &Block, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
for output in block.outputs() {
|
||||
self.validate_output(output)?;
|
||||
self.validate_output(output, batch)?;
|
||||
}
|
||||
|
||||
for input in block.inputs() {
|
||||
self.validate_input(input)?;
|
||||
self.validate_input(input, batch)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -65,13 +62,13 @@ impl<'a> UTXOView<'a> {
|
|||
/// Validate a transaction against the current UTXO set.
|
||||
/// Every input must spend an output that currently exists in the UTXO set.
|
||||
/// No duplicate outputs.
|
||||
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
|
||||
pub fn validate_tx(&self, tx: &Transaction, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
for output in tx.outputs() {
|
||||
self.validate_output(output)?;
|
||||
self.validate_output(output, batch)?;
|
||||
}
|
||||
|
||||
for input in tx.inputs() {
|
||||
self.validate_input(input)?;
|
||||
self.validate_input(input, batch)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -79,8 +76,8 @@ impl<'a> UTXOView<'a> {
|
|||
// Input is valid if it is spending an (unspent) output
|
||||
// that currently exists in the output MMR.
|
||||
// Compare the hash in the output MMR at the expected pos.
|
||||
fn validate_input(&self, input: &Input) -> Result<(), Error> {
|
||||
if let Ok(pos) = self.batch.get_output_pos(&input.commitment()) {
|
||||
fn validate_input(&self, input: &Input, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
if let Ok(pos) = batch.get_output_pos(&input.commitment()) {
|
||||
if let Some(hash) = self.output_pmmr.get_hash(pos) {
|
||||
if hash == input.hash_with_index(pos - 1) {
|
||||
return Ok(());
|
||||
|
@ -91,8 +88,8 @@ impl<'a> UTXOView<'a> {
|
|||
}
|
||||
|
||||
// Output is valid if it would not result in a duplicate commitment in the output MMR.
|
||||
fn validate_output(&self, output: &Output) -> Result<(), Error> {
|
||||
if let Ok(pos) = self.batch.get_output_pos(&output.commitment()) {
|
||||
fn validate_output(&self, output: &Output, batch: &Batch<'_>) -> Result<(), Error> {
|
||||
if let Ok(pos) = batch.get_output_pos(&output.commitment()) {
|
||||
if let Some(out_mmr) = self.output_pmmr.get_data(pos) {
|
||||
if out_mmr.commitment() == output.commitment() {
|
||||
return Err(ErrorKind::DuplicateCommitment(output.commitment()).into());
|
||||
|
@ -115,13 +112,18 @@ impl<'a> UTXOView<'a> {
|
|||
|
||||
/// Verify we are not attempting to spend any coinbase outputs
|
||||
/// that have not sufficiently matured.
|
||||
pub fn verify_coinbase_maturity(&self, inputs: &Vec<Input>, height: u64) -> Result<(), Error> {
|
||||
pub fn verify_coinbase_maturity(
|
||||
&self,
|
||||
inputs: &Vec<Input>,
|
||||
height: u64,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
// Find the greatest output pos of any coinbase
|
||||
// outputs we are attempting to spend.
|
||||
let pos = inputs
|
||||
.iter()
|
||||
.filter(|x| x.is_coinbase())
|
||||
.filter_map(|x| self.batch.get_output_pos(&x.commitment()).ok())
|
||||
.filter_map(|x| batch.get_output_pos(&x.commitment()).ok())
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
|
@ -135,7 +137,7 @@ impl<'a> UTXOView<'a> {
|
|||
// Find the "cutoff" pos in the output MMR based on the
|
||||
// header from 1,000 blocks ago.
|
||||
let cutoff_height = height.checked_sub(global::coinbase_maturity()).unwrap_or(0);
|
||||
let cutoff_header = self.get_header_by_height(cutoff_height)?;
|
||||
let cutoff_header = self.get_header_by_height(cutoff_height, batch)?;
|
||||
let cutoff_pos = cutoff_header.output_mmr_size;
|
||||
|
||||
// If any output pos exceed the cutoff_pos
|
||||
|
@ -156,10 +158,14 @@ impl<'a> UTXOView<'a> {
|
|||
/// Get the header at the specified height based on the current state of the extension.
|
||||
/// Derives the MMR pos from the height (insertion index) and retrieves the header hash.
|
||||
/// Looks the header up in the db by hash.
|
||||
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||
pub fn get_header_by_height(
|
||||
&self,
|
||||
height: u64,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<BlockHeader, Error> {
|
||||
let pos = pmmr::insertion_to_pmmr_index(height + 1);
|
||||
if let Some(hash) = self.get_header_hash(pos) {
|
||||
let header = self.batch.get_block_header(&hash)?;
|
||||
let header = batch.get_block_header(&hash)?;
|
||||
Ok(header)
|
||||
} else {
|
||||
Err(ErrorKind::Other(format!("get header by height")).into())
|
||||
|
|
Loading…
Reference in a new issue