Refactor to pass batch around explicitly (#3176)

* Refactor to pass batch around explicitly rather than keeping it in the extension.
We would like to be able to pass an extension around and potentially clone it.

* cleanup, pass header around where it makes sense
This commit is contained in:
Antioch Peverell 2020-01-28 17:23:11 +00:00 committed by Quentin Le Sceller
parent 2f1e8299b1
commit 9ec9d04457
5 changed files with 194 additions and 202 deletions

View file

@ -509,7 +509,7 @@ impl Chain {
pub fn get_unspent_output_at(&self, pos: u64) -> Result<Output, Error> { pub fn get_unspent_output_at(&self, pos: u64) -> Result<Output, Error> {
let header_pmmr = self.header_pmmr.read(); let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read(); let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo| { txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, _| {
utxo.get_unspent_output_at(pos) utxo.get_unspent_output_at(pos)
}) })
} }
@ -518,8 +518,8 @@ impl Chain {
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> { pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
let header_pmmr = self.header_pmmr.read(); let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read(); let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo| { txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
utxo.validate_tx(tx)?; utxo.validate_tx(tx, batch)?;
Ok(()) Ok(())
}) })
} }
@ -535,8 +535,8 @@ impl Chain {
let height = self.next_block_height()?; let height = self.next_block_height()?;
let header_pmmr = self.header_pmmr.read(); let header_pmmr = self.header_pmmr.read();
let txhashset = self.txhashset.read(); let txhashset = self.txhashset.read();
txhashset::utxo_view(&header_pmmr, &txhashset, |utxo| { txhashset::utxo_view(&header_pmmr, &txhashset, |utxo, batch| {
utxo.verify_coinbase_maturity(&tx.inputs(), height)?; utxo.verify_coinbase_maturity(&tx.inputs(), height, batch)?;
Ok(()) Ok(())
}) })
} }
@ -567,10 +567,10 @@ impl Chain {
// Now create an extension from the txhashset and validate against the // Now create an extension from the txhashset and validate against the
// latest block header. Rewind the extension to the specified header to // latest block header. Rewind the extension to the specified header to
// ensure the view is consistent. // ensure the view is consistent.
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |mut ext| { txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
pipe::rewind_and_apply_fork(&header, &mut ext)?; pipe::rewind_and_apply_fork(&header, ext, batch)?;
let ref mut extension = ext.extension; ext.extension
extension.validate(&self.genesis, fast_validation, &NoStatus)?; .validate(&self.genesis, fast_validation, &NoStatus, &header)?;
Ok(()) Ok(())
}) })
} }
@ -582,9 +582,9 @@ impl Chain {
let mut txhashset = self.txhashset.write(); let mut txhashset = self.txhashset.write();
let (prev_root, roots, sizes) = let (prev_root, roots, sizes) =
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext| { txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
let previous_header = ext.batch().get_previous_header(&b.header)?; let previous_header = batch.get_previous_header(&b.header)?;
pipe::rewind_and_apply_fork(&previous_header, ext)?; pipe::rewind_and_apply_fork(&previous_header, ext, batch)?;
let ref mut extension = ext.extension; let ref mut extension = ext.extension;
let ref mut header_extension = ext.header_extension; let ref mut header_extension = ext.header_extension;
@ -593,7 +593,7 @@ impl Chain {
let prev_root = header_extension.root()?; let prev_root = header_extension.root()?;
// Apply the latest block to the chain state via the extension. // Apply the latest block to the chain state via the extension.
extension.apply_block(b)?; extension.apply_block(b, batch)?;
Ok((prev_root, extension.roots()?, extension.sizes())) Ok((prev_root, extension.roots()?, extension.sizes()))
})?; })?;
@ -628,10 +628,9 @@ impl Chain {
let mut header_pmmr = self.header_pmmr.write(); let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write(); let mut txhashset = self.txhashset.write();
let merkle_proof = let merkle_proof =
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext| { txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
pipe::rewind_and_apply_fork(&header, ext)?; pipe::rewind_and_apply_fork(&header, ext, batch)?;
let ref mut extension = ext.extension; ext.extension.merkle_proof(output, batch)
extension.merkle_proof(output)
})?; })?;
Ok(merkle_proof) Ok(merkle_proof)
@ -647,7 +646,7 @@ impl Chain {
/// Provides a reading view into the current kernel state. /// Provides a reading view into the current kernel state.
pub fn kernel_data_read(&self) -> Result<File, Error> { pub fn kernel_data_read(&self) -> Result<File, Error> {
let txhashset = self.txhashset.read(); let txhashset = self.txhashset.read();
txhashset::rewindable_kernel_view(&txhashset, |view| view.kernel_data_read()) txhashset::rewindable_kernel_view(&txhashset, |view, _| view.kernel_data_read())
} }
/// Writes kernels provided to us (via a kernel data download). /// Writes kernels provided to us (via a kernel data download).
@ -679,9 +678,9 @@ impl Chain {
let mut header_pmmr = self.header_pmmr.write(); let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write(); let mut txhashset = self.txhashset.write();
txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext| { txhashset::extending_readonly(&mut header_pmmr, &mut txhashset, |ext, batch| {
pipe::rewind_and_apply_fork(&header, ext)?; pipe::rewind_and_apply_fork(&header, ext, batch)?;
ext.extension.snapshot()?; ext.extension.snapshot(batch)?;
// prepare the zip // prepare the zip
txhashset::zip_read(self.db_root.clone(), &header) txhashset::zip_read(self.db_root.clone(), &header)
@ -724,11 +723,11 @@ impl Chain {
let mut count = 0; let mut count = 0;
let mut current = header.clone(); let mut current = header.clone();
txhashset::rewindable_kernel_view(&txhashset, |view| { txhashset::rewindable_kernel_view(&txhashset, |view, batch| {
while current.height > 0 { while current.height > 0 {
view.rewind(&current)?; view.rewind(&current)?;
view.validate_root()?; view.validate_root()?;
current = view.batch().get_previous_header(&current)?; current = batch.get_previous_header(&current)?;
count += 1; count += 1;
} }
Ok(()) Ok(())
@ -749,8 +748,8 @@ impl Chain {
let mut sync_pmmr = self.sync_pmmr.write(); let mut sync_pmmr = self.sync_pmmr.write();
let mut batch = self.store.batch()?; let mut batch = self.store.batch()?;
let header = batch.get_block_header(&head.hash())?; let header = batch.get_block_header(&head.hash())?;
txhashset::header_extending(&mut sync_pmmr, &mut batch, |extension| { txhashset::header_extending(&mut sync_pmmr, &mut batch, |ext, batch| {
pipe::rewind_and_apply_header_fork(&header, extension)?; pipe::rewind_and_apply_header_fork(&header, ext, batch)?;
Ok(()) Ok(())
})?; })?;
batch.commit()?; batch.commit()?;
@ -939,16 +938,21 @@ impl Chain {
let mut header_pmmr = self.header_pmmr.write(); let mut header_pmmr = self.header_pmmr.write();
let mut batch = self.store.batch()?; let mut batch = self.store.batch()?;
txhashset::extending(&mut header_pmmr, &mut txhashset, &mut batch, |ext| { txhashset::extending(
&mut header_pmmr,
&mut txhashset,
&mut batch,
|ext, batch| {
let extension = &mut ext.extension; let extension = &mut ext.extension;
extension.rewind(&header)?; extension.rewind(&header, batch)?;
// Validate the extension, generating the utxo_sum and kernel_sum. // Validate the extension, generating the utxo_sum and kernel_sum.
// Full validation, including rangeproofs and kernel signature verification. // Full validation, including rangeproofs and kernel signature verification.
let (utxo_sum, kernel_sum) = extension.validate(&self.genesis, false, status)?; let (utxo_sum, kernel_sum) =
extension.validate(&self.genesis, false, status, &header)?;
// Save the block_sums (utxo_sum, kernel_sum) to the db for use later. // Save the block_sums (utxo_sum, kernel_sum) to the db for use later.
extension.batch.save_block_sums( batch.save_block_sums(
&header.hash(), &header.hash(),
&BlockSums { &BlockSums {
utxo_sum, utxo_sum,
@ -957,7 +961,8 @@ impl Chain {
)?; )?;
Ok(()) Ok(())
})?; },
)?;
debug!("txhashset_write: finished validating and rebuilding"); debug!("txhashset_write: finished validating and rebuilding");
@ -1473,14 +1478,14 @@ fn setup_head(
// We read header_head and sync_head directly from the MMR and assume they are non-empty. // We read header_head and sync_head directly from the MMR and assume they are non-empty.
{ {
if header_pmmr.last_pos == 0 { if header_pmmr.last_pos == 0 {
txhashset::header_extending(header_pmmr, &mut batch, |extension| { txhashset::header_extending(header_pmmr, &mut batch, |ext, _| {
extension.apply_header(&genesis.header) ext.apply_header(&genesis.header)
})?; })?;
} }
if sync_pmmr.last_pos == 0 { if sync_pmmr.last_pos == 0 {
txhashset::header_extending(sync_pmmr, &mut batch, |extension| { txhashset::header_extending(sync_pmmr, &mut batch, |ext, _| {
extension.apply_header(&genesis.header) ext.apply_header(&genesis.header)
})?; })?;
} }
} }
@ -1498,18 +1503,17 @@ fn setup_head(
// to match the provided block header. // to match the provided block header.
let header = batch.get_block_header(&head.last_block_h)?; let header = batch.get_block_header(&head.last_block_h)?;
let res = txhashset::extending(header_pmmr, txhashset, &mut batch, |ext| { let res = txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
pipe::rewind_and_apply_fork(&header, ext)?; pipe::rewind_and_apply_fork(&header, ext, batch)?;
let ref mut extension = ext.extension; let ref mut extension = ext.extension;
extension.validate_roots()?; extension.validate_roots(&header)?;
// now check we have the "block sums" for the block in question // now check we have the "block sums" for the block in question
// if we have no sums (migrating an existing node) we need to go // if we have no sums (migrating an existing node) we need to go
// back to the txhashset and sum the outputs and kernels // back to the txhashset and sum the outputs and kernels
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err() if header.height > 0 && batch.get_block_sums(&header.hash()).is_err() {
{
debug!( debug!(
"init: building (missing) block sums for {} @ {}", "init: building (missing) block sums for {} @ {}",
header.height, header.height,
@ -1519,10 +1523,10 @@ fn setup_head(
// Do a full (and slow) validation of the txhashset extension // Do a full (and slow) validation of the txhashset extension
// to calculate the utxo_sum and kernel_sum at this block height. // to calculate the utxo_sum and kernel_sum at this block height.
let (utxo_sum, kernel_sum) = let (utxo_sum, kernel_sum) =
extension.validate_kernel_sums(&genesis.header)?; extension.validate_kernel_sums(&genesis.header, &header)?;
// Save the block_sums to the db for use later. // Save the block_sums to the db for use later.
extension.batch.save_block_sums( batch.save_block_sums(
&header.hash(), &header.hash(),
&BlockSums { &BlockSums {
utxo_sum, utxo_sum,
@ -1571,12 +1575,8 @@ fn setup_head(
kernel_sum, kernel_sum,
}; };
} }
txhashset::extending(header_pmmr, txhashset, &mut batch, |ext| { txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| {
let ref mut extension = ext.extension; ext.extension.apply_block(&genesis, batch)
extension.apply_block(&genesis)?;
extension.validate_roots()?;
extension.validate_sizes()?;
Ok(())
})?; })?;
// Save the block_sums to the db for use later. // Save the block_sums to the db for use later.

View file

@ -121,33 +121,33 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
let ref mut header_pmmr = &mut ctx.header_pmmr; let ref mut header_pmmr = &mut ctx.header_pmmr;
let ref mut txhashset = &mut ctx.txhashset; let ref mut txhashset = &mut ctx.txhashset;
let ref mut batch = &mut ctx.batch; let ref mut batch = &mut ctx.batch;
let block_sums = txhashset::extending(header_pmmr, txhashset, batch, |ext| { let block_sums = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| {
rewind_and_apply_fork(&prev, ext)?; rewind_and_apply_fork(&prev, ext, batch)?;
// Check any coinbase being spent have matured sufficiently. // Check any coinbase being spent have matured sufficiently.
// This needs to be done within the context of a potentially // This needs to be done within the context of a potentially
// rewound txhashset extension to reflect chain state prior // rewound txhashset extension to reflect chain state prior
// to applying the new block. // to applying the new block.
verify_coinbase_maturity(b, ext)?; verify_coinbase_maturity(b, ext, batch)?;
// Validate the block against the UTXO set. // Validate the block against the UTXO set.
validate_utxo(b, ext)?; validate_utxo(b, ext, batch)?;
// Using block_sums (utxo_sum, kernel_sum) for the previous block from the db // Using block_sums (utxo_sum, kernel_sum) for the previous block from the db
// we can verify_kernel_sums across the full UTXO sum and full kernel sum // we can verify_kernel_sums across the full UTXO sum and full kernel sum
// accounting for inputs/outputs/kernels in this new block. // accounting for inputs/outputs/kernels in this new block.
// We know there are no double-spends etc. if this verifies successfully. // We know there are no double-spends etc. if this verifies successfully.
// Remember to save these to the db later on (regardless of extension rollback) // Remember to save these to the db later on (regardless of extension rollback)
let block_sums = verify_block_sums(b, ext.batch())?; let block_sums = verify_block_sums(b, batch)?;
// Apply the block to the txhashset state. // Apply the block to the txhashset state.
// Validate the txhashset roots and sizes against the block header. // Validate the txhashset roots and sizes against the block header.
// Block is invalid if there are any discrepencies. // Block is invalid if there are any discrepencies.
apply_block_to_txhashset(b, ext)?; apply_block_to_txhashset(b, ext, batch)?;
// If applying this block does not increase the work on the chain then // If applying this block does not increase the work on the chain then
// we know we have not yet updated the chain to produce a new chain head. // we know we have not yet updated the chain to produce a new chain head.
let head = ext.batch().head()?; let head = batch.head()?;
if !has_more_work(&b.header, &head) { if !has_more_work(&b.header, &head) {
ext.extension.force_rollback(); ext.extension.force_rollback();
} }
@ -208,8 +208,8 @@ pub fn sync_block_headers(
} }
// Now apply this entire chunk of headers to the sync MMR (ctx is sync MMR specific). // Now apply this entire chunk of headers to the sync MMR (ctx is sync MMR specific).
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext| { txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| {
rewind_and_apply_header_fork(&last_header, ext)?; rewind_and_apply_header_fork(&last_header, ext, batch)?;
Ok(()) Ok(())
})?; })?;
@ -245,8 +245,8 @@ pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) ->
} }
} }
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext| { txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| {
rewind_and_apply_header_fork(&prev_header, ext)?; rewind_and_apply_header_fork(&prev_header, ext, batch)?;
ext.validate_root(header)?; ext.validate_root(header)?;
ext.apply_header(header)?; ext.apply_header(header)?;
if !has_more_work(&header, &header_head) { if !has_more_work(&header, &header_head) {
@ -393,12 +393,13 @@ fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error
fn verify_coinbase_maturity( fn verify_coinbase_maturity(
block: &Block, block: &Block,
ext: &txhashset::ExtensionPair<'_>, ext: &txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let ref extension = ext.extension; let ref extension = ext.extension;
let ref header_extension = ext.header_extension; let ref header_extension = ext.header_extension;
extension extension
.utxo_view(header_extension) .utxo_view(header_extension)
.verify_coinbase_maturity(&block.inputs(), block.header.height) .verify_coinbase_maturity(&block.inputs(), block.header.height, batch)
} }
/// Verify kernel sums across the full utxo and kernel sets based on block_sums /// Verify kernel sums across the full utxo and kernel sets based on block_sums
@ -429,11 +430,11 @@ fn verify_block_sums(b: &Block, batch: &store::Batch<'_>) -> Result<BlockSums, E
fn apply_block_to_txhashset( fn apply_block_to_txhashset(
block: &Block, block: &Block,
ext: &mut txhashset::ExtensionPair<'_>, ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let ref mut extension = ext.extension; ext.extension.apply_block(block, batch)?;
extension.apply_block(block)?; ext.extension.validate_roots(&block.header)?;
extension.validate_roots()?; ext.extension.validate_sizes(&block.header)?;
extension.validate_sizes()?;
Ok(()) Ok(())
} }
@ -484,12 +485,13 @@ fn has_more_work(header: &BlockHeader, head: &Tip) -> bool {
pub fn rewind_and_apply_header_fork( pub fn rewind_and_apply_header_fork(
header: &BlockHeader, header: &BlockHeader,
ext: &mut txhashset::HeaderExtension<'_>, ext: &mut txhashset::HeaderExtension<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut fork_hashes = vec![]; let mut fork_hashes = vec![];
let mut current = header.clone(); let mut current = header.clone();
while current.height > 0 && !ext.is_on_current_chain(&current).is_ok() { while current.height > 0 && !ext.is_on_current_chain(&current, batch).is_ok() {
fork_hashes.push(current.hash()); fork_hashes.push(current.hash());
current = ext.batch.get_previous_header(&current)?; current = batch.get_previous_header(&current)?;
} }
fork_hashes.reverse(); fork_hashes.reverse();
@ -500,8 +502,7 @@ pub fn rewind_and_apply_header_fork(
// Re-apply all headers on this fork. // Re-apply all headers on this fork.
for h in fork_hashes { for h in fork_hashes {
let header = ext let header = batch
.batch
.get_block_header(&h) .get_block_header(&h)
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked headers")))?; .map_err(|e| ErrorKind::StoreErr(e, format!("getting forked headers")))?;
ext.validate_root(&header)?; ext.validate_root(&header)?;
@ -518,21 +519,25 @@ pub fn rewind_and_apply_header_fork(
pub fn rewind_and_apply_fork( pub fn rewind_and_apply_fork(
header: &BlockHeader, header: &BlockHeader,
ext: &mut txhashset::ExtensionPair<'_>, ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let ref mut batch = ext.batch();
let ref mut extension = ext.extension; let ref mut extension = ext.extension;
let ref mut header_extension = ext.header_extension; let ref mut header_extension = ext.header_extension;
// Prepare the header MMR. // Prepare the header MMR.
rewind_and_apply_header_fork(header, header_extension)?; rewind_and_apply_header_fork(header, header_extension, batch)?;
// Rewind the txhashset extension back to common ancestor based on header MMR. // Rewind the txhashset extension back to common ancestor based on header MMR.
let mut current = batch.head_header()?; let mut current = batch.head_header()?;
while current.height > 0 && !header_extension.is_on_current_chain(&current).is_ok() { while current.height > 0
&& !header_extension
.is_on_current_chain(&current, batch)
.is_ok()
{
current = batch.get_previous_header(&current)?; current = batch.get_previous_header(&current)?;
} }
let fork_point = current; let fork_point = current;
extension.rewind(&fork_point)?; extension.rewind(&fork_point, batch)?;
// Then apply all full blocks since this common ancestor // Then apply all full blocks since this common ancestor
// to put txhashet extension in a state to accept the new block. // to put txhashet extension in a state to accept the new block.
@ -550,20 +555,26 @@ pub fn rewind_and_apply_fork(
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked blocks")))?; .map_err(|e| ErrorKind::StoreErr(e, format!("getting forked blocks")))?;
// Re-verify coinbase maturity along this fork. // Re-verify coinbase maturity along this fork.
verify_coinbase_maturity(&fb, ext)?; verify_coinbase_maturity(&fb, ext, batch)?;
// Validate the block against the UTXO set. // Validate the block against the UTXO set.
validate_utxo(&fb, ext)?; validate_utxo(&fb, ext, batch)?;
// Re-verify block_sums to set the block_sums up on this fork correctly. // Re-verify block_sums to set the block_sums up on this fork correctly.
verify_block_sums(&fb, batch)?; verify_block_sums(&fb, batch)?;
// Re-apply the blocks. // Re-apply the blocks.
apply_block_to_txhashset(&fb, ext)?; apply_block_to_txhashset(&fb, ext, batch)?;
} }
Ok(()) Ok(())
} }
fn validate_utxo(block: &Block, ext: &mut txhashset::ExtensionPair<'_>) -> Result<(), Error> { fn validate_utxo(
block: &Block,
ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>,
) -> Result<(), Error> {
let ref mut extension = ext.extension; let ref mut extension = ext.extension;
let ref mut header_extension = ext.header_extension; let ref mut header_extension = ext.header_extension;
extension.utxo_view(header_extension).validate_block(block) extension
.utxo_view(header_extension)
.validate_block(block, batch)
} }

View file

@ -19,13 +19,11 @@ use std::fs::File;
use crate::core::core::pmmr::RewindablePMMR; use crate::core::core::pmmr::RewindablePMMR;
use crate::core::core::{BlockHeader, TxKernel}; use crate::core::core::{BlockHeader, TxKernel};
use crate::error::{Error, ErrorKind}; use crate::error::{Error, ErrorKind};
use crate::store::Batch;
use grin_store::pmmr::PMMRBackend; use grin_store::pmmr::PMMRBackend;
/// Rewindable (but readonly) view of the kernel set (based on kernel MMR). /// Rewindable (but readonly) view of the kernel set (based on kernel MMR).
pub struct RewindableKernelView<'a> { pub struct RewindableKernelView<'a> {
pmmr: RewindablePMMR<'a, TxKernel, PMMRBackend<TxKernel>>, pmmr: RewindablePMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
batch: &'a Batch<'a>,
header: BlockHeader, header: BlockHeader,
} }
@ -33,21 +31,9 @@ impl<'a> RewindableKernelView<'a> {
/// Build a new readonly kernel view. /// Build a new readonly kernel view.
pub fn new( pub fn new(
pmmr: RewindablePMMR<'a, TxKernel, PMMRBackend<TxKernel>>, pmmr: RewindablePMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
batch: &'a Batch<'_>,
header: BlockHeader, header: BlockHeader,
) -> RewindableKernelView<'a> { ) -> RewindableKernelView<'a> {
RewindableKernelView { RewindableKernelView { pmmr, header }
pmmr,
batch,
header,
}
}
/// Accessor for the batch used in this view.
/// We will discard this batch (rollback) at the end, so be aware of this.
/// Nothing will get written to the db/index via this view.
pub fn batch(&self) -> &'a Batch<'_> {
self.batch
} }
/// Rewind this readonly view to a previous block. /// Rewind this readonly view to a previous block.

View file

@ -104,10 +104,10 @@ impl PMMRHandle<BlockHeader> {
} }
} }
/// An easy to manipulate structure holding the 3 sum trees necessary to /// An easy to manipulate structure holding the 3 MMRs necessary to
/// validate blocks and capturing the Output set, the range proofs and the /// validate blocks and capturing the output set, associated rangeproofs and the
/// kernels. Also handles the index of Commitments to positions in the /// kernels. Also handles the index of Commitments to positions in the
/// output and range proof pmmr trees. /// output and rangeproof MMRs.
/// ///
/// Note that the index is never authoritative, only the trees are /// Note that the index is never authoritative, only the trees are
/// guaranteed to indicate whether an output is spent or not. The index /// guaranteed to indicate whether an output is spent or not. The index
@ -465,7 +465,7 @@ pub fn extending_readonly<F, T>(
inner: F, inner: F,
) -> Result<T, Error> ) -> Result<T, Error>
where where
F: FnOnce(&mut ExtensionPair<'_>) -> Result<T, Error>, F: FnOnce(&mut ExtensionPair<'_>, &Batch<'_>) -> Result<T, Error>,
{ {
let commit_index = trees.commit_index.clone(); let commit_index = trees.commit_index.clone();
let batch = commit_index.batch()?; let batch = commit_index.batch()?;
@ -483,13 +483,13 @@ where
let res = { let res = {
let header_pmmr = PMMR::at(&mut handle.backend, handle.last_pos); let header_pmmr = PMMR::at(&mut handle.backend, handle.last_pos);
let mut header_extension = HeaderExtension::new(header_pmmr, &batch, header_head); let mut header_extension = HeaderExtension::new(header_pmmr, header_head);
let mut extension = Extension::new(trees, &batch, head); let mut extension = Extension::new(trees, head);
let mut extension_pair = ExtensionPair { let mut extension_pair = ExtensionPair {
header_extension: &mut header_extension, header_extension: &mut header_extension,
extension: &mut extension, extension: &mut extension,
}; };
inner(&mut extension_pair) inner(&mut extension_pair, &batch)
}; };
trace!("Rollbacking txhashset (readonly) extension."); trace!("Rollbacking txhashset (readonly) extension.");
@ -513,7 +513,7 @@ pub fn utxo_view<F, T>(
inner: F, inner: F,
) -> Result<T, Error> ) -> Result<T, Error>
where where
F: FnOnce(&UTXOView<'_>) -> Result<T, Error>, F: FnOnce(&UTXOView<'_>, &Batch<'_>) -> Result<T, Error>,
{ {
let res: Result<T, Error>; let res: Result<T, Error>;
{ {
@ -526,8 +526,8 @@ where
// Create a new batch here to pass into the utxo_view. // Create a new batch here to pass into the utxo_view.
// Discard it (rollback) after we finish with the utxo_view. // Discard it (rollback) after we finish with the utxo_view.
let batch = trees.commit_index.batch()?; let batch = trees.commit_index.batch()?;
let utxo = UTXOView::new(output_pmmr, header_pmmr, rproof_pmmr, &batch); let utxo = UTXOView::new(output_pmmr, header_pmmr, rproof_pmmr);
res = inner(&utxo); res = inner(&utxo, &batch);
} }
res res
} }
@ -539,7 +539,7 @@ where
/// when we are done with the view. /// when we are done with the view.
pub fn rewindable_kernel_view<F, T>(trees: &TxHashSet, inner: F) -> Result<T, Error> pub fn rewindable_kernel_view<F, T>(trees: &TxHashSet, inner: F) -> Result<T, Error>
where where
F: FnOnce(&mut RewindableKernelView<'_>) -> Result<T, Error>, F: FnOnce(&mut RewindableKernelView<'_>, &Batch<'_>) -> Result<T, Error>,
{ {
let res: Result<T, Error>; let res: Result<T, Error>;
{ {
@ -550,8 +550,8 @@ where
// Discard it (rollback) after we finish with the kernel_view. // Discard it (rollback) after we finish with the kernel_view.
let batch = trees.commit_index.batch()?; let batch = trees.commit_index.batch()?;
let header = batch.head_header()?; let header = batch.head_header()?;
let mut view = RewindableKernelView::new(kernel_pmmr, &batch, header); let mut view = RewindableKernelView::new(kernel_pmmr, header);
res = inner(&mut view); res = inner(&mut view, &batch);
} }
res res
} }
@ -570,7 +570,7 @@ pub fn extending<'a, F, T>(
inner: F, inner: F,
) -> Result<T, Error> ) -> Result<T, Error>
where where
F: FnOnce(&mut ExtensionPair<'_>) -> Result<T, Error>, F: FnOnce(&mut ExtensionPair<'_>, &Batch<'_>) -> Result<T, Error>,
{ {
let sizes: (u64, u64, u64); let sizes: (u64, u64, u64);
let res: Result<T, Error>; let res: Result<T, Error>;
@ -593,13 +593,13 @@ where
trace!("Starting new txhashset extension."); trace!("Starting new txhashset extension.");
let header_pmmr = PMMR::at(&mut header_pmmr.backend, header_pmmr.last_pos); let header_pmmr = PMMR::at(&mut header_pmmr.backend, header_pmmr.last_pos);
let mut header_extension = HeaderExtension::new(header_pmmr, &child_batch, header_head); let mut header_extension = HeaderExtension::new(header_pmmr, header_head);
let mut extension = Extension::new(trees, &child_batch, head); let mut extension = Extension::new(trees, head);
let mut extension_pair = ExtensionPair { let mut extension_pair = ExtensionPair {
header_extension: &mut header_extension, header_extension: &mut header_extension,
extension: &mut extension, extension: &mut extension,
}; };
res = inner(&mut extension_pair); res = inner(&mut extension_pair, &child_batch);
rollback = extension_pair.extension.rollback; rollback = extension_pair.extension.rollback;
sizes = extension_pair.extension.sizes(); sizes = extension_pair.extension.sizes();
@ -653,7 +653,7 @@ pub fn header_extending<'a, F, T>(
inner: F, inner: F,
) -> Result<T, Error> ) -> Result<T, Error>
where where
F: FnOnce(&mut HeaderExtension<'_>) -> Result<T, Error>, F: FnOnce(&mut HeaderExtension<'_>, &Batch<'_>) -> Result<T, Error>,
{ {
let size: u64; let size: u64;
let res: Result<T, Error>; let res: Result<T, Error>;
@ -674,8 +674,8 @@ where
{ {
let pmmr = PMMR::at(&mut handle.backend, handle.last_pos); let pmmr = PMMR::at(&mut handle.backend, handle.last_pos);
let mut extension = HeaderExtension::new(pmmr, &child_batch, head); let mut extension = HeaderExtension::new(pmmr, head);
res = inner(&mut extension); res = inner(&mut extension, &child_batch);
rollback = extension.rollback; rollback = extension.rollback;
size = extension.size(); size = extension.size();
@ -708,24 +708,17 @@ pub struct HeaderExtension<'a> {
/// Rollback flag. /// Rollback flag.
rollback: bool, rollback: bool,
/// Batch in which the extension occurs, public so it can be used within
/// an `extending` closure. Just be careful using it that way as it will
/// get rolled back with the extension (i.e on a losing fork).
pub batch: &'a Batch<'a>,
} }
impl<'a> HeaderExtension<'a> { impl<'a> HeaderExtension<'a> {
fn new( fn new(
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>, pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
batch: &'a Batch<'_>,
head: Tip, head: Tip,
) -> HeaderExtension<'a> { ) -> HeaderExtension<'a> {
HeaderExtension { HeaderExtension {
head, head,
pmmr, pmmr,
rollback: false, rollback: false,
batch,
} }
} }
@ -742,10 +735,14 @@ impl<'a> HeaderExtension<'a> {
/// Get the header at the specified height based on the current state of the header extension. /// Get the header at the specified height based on the current state of the header extension.
/// Derives the MMR pos from the height (insertion index) and retrieves the header hash. /// Derives the MMR pos from the height (insertion index) and retrieves the header hash.
/// Looks the header up in the db by hash. /// Looks the header up in the db by hash.
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> { pub fn get_header_by_height(
&self,
height: u64,
batch: &Batch<'_>,
) -> Result<BlockHeader, Error> {
let pos = pmmr::insertion_to_pmmr_index(height + 1); let pos = pmmr::insertion_to_pmmr_index(height + 1);
if let Some(hash) = self.get_header_hash(pos) { if let Some(hash) = self.get_header_hash(pos) {
Ok(self.batch.get_block_header(&hash)?) Ok(batch.get_block_header(&hash)?)
} else { } else {
Err(ErrorKind::Other(format!("get header by height")).into()) Err(ErrorKind::Other(format!("get header by height")).into())
} }
@ -753,11 +750,15 @@ impl<'a> HeaderExtension<'a> {
/// Compares the provided header to the header in the header MMR at that height. /// Compares the provided header to the header in the header MMR at that height.
/// If these match we know the header is on the current chain. /// If these match we know the header is on the current chain.
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> { pub fn is_on_current_chain(
&self,
header: &BlockHeader,
batch: &Batch<'_>,
) -> Result<(), Error> {
if header.height > self.head.height { if header.height > self.head.height {
return Err(ErrorKind::Other(format!("not on current chain, out beyond")).into()); return Err(ErrorKind::Other(format!("not on current chain, out beyond")).into());
} }
let chain_header = self.get_header_by_height(header.height)?; let chain_header = self.get_header_by_height(header.height, batch)?;
if chain_header.hash() == header.hash() { if chain_header.hash() == header.hash() {
Ok(()) Ok(())
} else { } else {
@ -835,14 +836,7 @@ pub struct ExtensionPair<'a> {
pub extension: &'a mut Extension<'a>, pub extension: &'a mut Extension<'a>,
} }
impl<'a> ExtensionPair<'a> { /// Allows the application of new blocks on top of the txhashset in a
/// Accessor for the batch associated with this extension pair.
pub fn batch(&mut self) -> &'a Batch<'a> {
self.extension.batch
}
}
/// Allows the application of new blocks on top of the sum trees in a
/// reversible manner within a unit of work provided by the `extending` /// reversible manner within a unit of work provided by the `extending`
/// function. /// function.
pub struct Extension<'a> { pub struct Extension<'a> {
@ -856,11 +850,6 @@ pub struct Extension<'a> {
/// Rollback flag. /// Rollback flag.
rollback: bool, rollback: bool,
/// Batch in which the extension occurs, public so it can be used within
/// an `extending` closure. Just be careful using it that way as it will
/// get rolled back with the extension (i.e on a losing fork).
pub batch: &'a Batch<'a>,
} }
impl<'a> Committed for Extension<'a> { impl<'a> Committed for Extension<'a> {
@ -892,7 +881,7 @@ impl<'a> Committed for Extension<'a> {
} }
impl<'a> Extension<'a> { impl<'a> Extension<'a> {
fn new(trees: &'a mut TxHashSet, batch: &'a Batch<'_>, head: Tip) -> Extension<'a> { fn new(trees: &'a mut TxHashSet, head: Tip) -> Extension<'a> {
Extension { Extension {
head, head,
output_pmmr: PMMR::at( output_pmmr: PMMR::at(
@ -909,7 +898,6 @@ impl<'a> Extension<'a> {
), ),
bitmap_accumulator: trees.bitmap_accumulator.clone(), bitmap_accumulator: trees.bitmap_accumulator.clone(),
rollback: false, rollback: false,
batch,
} }
} }
@ -925,23 +913,21 @@ impl<'a> Extension<'a> {
self.output_pmmr.readonly_pmmr(), self.output_pmmr.readonly_pmmr(),
header_ext.pmmr.readonly_pmmr(), header_ext.pmmr.readonly_pmmr(),
self.rproof_pmmr.readonly_pmmr(), self.rproof_pmmr.readonly_pmmr(),
self.batch,
) )
} }
/// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs).
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> { pub fn apply_block(&mut self, b: &Block, batch: &Batch<'_>) -> Result<(), Error> {
let mut affected_pos = vec![]; let mut affected_pos = vec![];
for out in b.outputs() { for out in b.outputs() {
let pos = self.apply_output(out)?; let pos = self.apply_output(out, batch)?;
affected_pos.push(pos); affected_pos.push(pos);
self.batch batch.save_output_pos_height(&out.commitment(), pos, b.header.height)?;
.save_output_pos_height(&out.commitment(), pos, b.header.height)?;
} }
for input in b.inputs() { for input in b.inputs() {
let pos = self.apply_input(input)?; let pos = self.apply_input(input, batch)?;
affected_pos.push(pos); affected_pos.push(pos);
} }
@ -977,9 +963,9 @@ impl<'a> Extension<'a> {
) )
} }
fn apply_input(&mut self, input: &Input) -> Result<u64, Error> { fn apply_input(&mut self, input: &Input, batch: &Batch<'_>) -> Result<u64, Error> {
let commit = input.commitment(); let commit = input.commitment();
let pos_res = self.batch.get_output_pos(&commit); let pos_res = batch.get_output_pos(&commit);
if let Ok(pos) = pos_res { if let Ok(pos) = pos_res {
// First check this input corresponds to an existing entry in the output MMR. // First check this input corresponds to an existing entry in the output MMR.
if let Some(hash) = self.output_pmmr.get_hash(pos) { if let Some(hash) = self.output_pmmr.get_hash(pos) {
@ -1008,10 +994,10 @@ impl<'a> Extension<'a> {
} }
} }
fn apply_output(&mut self, out: &Output) -> Result<u64, Error> { fn apply_output(&mut self, out: &Output, batch: &Batch<'_>) -> Result<u64, Error> {
let commit = out.commitment(); let commit = out.commitment();
if let Ok(pos) = self.batch.get_output_pos(&commit) { if let Ok(pos) = batch.get_output_pos(&commit) {
if let Some(out_mmr) = self.output_pmmr.get_data(pos) { if let Some(out_mmr) = self.output_pmmr.get_data(pos) {
if out_mmr.commitment() == commit { if out_mmr.commitment() == commit {
return Err(ErrorKind::DuplicateCommitment(commit).into()); return Err(ErrorKind::DuplicateCommitment(commit).into());
@ -1061,10 +1047,14 @@ impl<'a> Extension<'a> {
/// Note: this relies on the MMR being stable even after pruning/compaction. /// Note: this relies on the MMR being stable even after pruning/compaction.
/// We need the hash of each sibling pos from the pos up to the peak /// We need the hash of each sibling pos from the pos up to the peak
/// including the sibling leaf node which may have been removed. /// including the sibling leaf node which may have been removed.
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> { pub fn merkle_proof(
&self,
output: &OutputIdentifier,
batch: &Batch<'_>,
) -> Result<MerkleProof, Error> {
debug!("txhashset: merkle_proof: output: {:?}", output.commit,); debug!("txhashset: merkle_proof: output: {:?}", output.commit,);
// then calculate the Merkle Proof based on the known pos // then calculate the Merkle Proof based on the known pos
let pos = self.batch.get_output_pos(&output.commit)?; let pos = batch.get_output_pos(&output.commit)?;
let merkle_proof = self let merkle_proof = self
.output_pmmr .output_pmmr
.merkle_proof(pos) .merkle_proof(pos)
@ -1078,8 +1068,8 @@ impl<'a> Extension<'a> {
/// the block hash as filename suffix. /// the block hash as filename suffix.
/// Needed for fast-sync (utxo file needs to be rewound before sending /// Needed for fast-sync (utxo file needs to be rewound before sending
/// across). /// across).
pub fn snapshot(&mut self) -> Result<(), Error> { pub fn snapshot(&mut self, batch: &Batch<'_>) -> Result<(), Error> {
let header = self.batch.get_block_header(&self.head.last_block_h)?; let header = batch.get_block_header(&self.head.last_block_h)?;
self.output_pmmr self.output_pmmr
.snapshot(&header) .snapshot(&header)
.map_err(|e| ErrorKind::Other(e))?; .map_err(|e| ErrorKind::Other(e))?;
@ -1091,7 +1081,7 @@ impl<'a> Extension<'a> {
/// Rewinds the MMRs to the provided block, rewinding to the last output pos /// Rewinds the MMRs to the provided block, rewinding to the last output pos
/// and last kernel pos of that block. /// and last kernel pos of that block.
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> { pub fn rewind(&mut self, header: &BlockHeader, batch: &Batch<'_>) -> Result<(), Error> {
debug!( debug!(
"Rewind extension to {} at {} from {} at {}", "Rewind extension to {} at {} from {} at {}",
header.hash(), header.hash(),
@ -1106,8 +1096,8 @@ impl<'a> Extension<'a> {
// undone during rewind). // undone during rewind).
// Rewound output pos will be removed from the MMR. // Rewound output pos will be removed from the MMR.
// Rewound input (spent) pos will be added back to the MMR. // Rewound input (spent) pos will be added back to the MMR.
let head_header = self.batch.get_block_header(&self.head.hash())?; let head_header = batch.get_block_header(&self.head.hash())?;
let rewind_rm_pos = input_pos_to_rewind(header, &head_header, &self.batch)?; let rewind_rm_pos = input_pos_to_rewind(header, &head_header, batch)?;
self.rewind_to_pos( self.rewind_to_pos(
header.output_mmr_size, header.output_mmr_size,
@ -1149,7 +1139,7 @@ impl<'a> Extension<'a> {
} }
/// Current root hashes and sums (if applicable) for the Output, range proof /// Current root hashes and sums (if applicable) for the Output, range proof
/// and kernel sum trees. /// and kernel MMRs.
pub fn roots(&self) -> Result<TxHashSetRoots, Error> { pub fn roots(&self) -> Result<TxHashSetRoots, Error> {
Ok(TxHashSetRoots { Ok(TxHashSetRoots {
output_roots: OutputRoots { output_roots: OutputRoots {
@ -1171,24 +1161,22 @@ impl<'a> Extension<'a> {
} }
/// Validate the MMR (output, rangeproof, kernel) roots against the latest header. /// Validate the MMR (output, rangeproof, kernel) roots against the latest header.
pub fn validate_roots(&self) -> Result<(), Error> { pub fn validate_roots(&self, header: &BlockHeader) -> Result<(), Error> {
if self.head.height == 0 { if header.height == 0 {
return Ok(()); return Ok(());
} }
let head_header = self.batch.get_block_header(&self.head.hash())?; self.roots()?.validate(header)
self.roots()?.validate(&head_header)
} }
/// Validate the header, output and kernel MMR sizes against the block header. /// Validate the header, output and kernel MMR sizes against the block header.
pub fn validate_sizes(&self) -> Result<(), Error> { pub fn validate_sizes(&self, header: &BlockHeader) -> Result<(), Error> {
if self.head.height == 0 { if header.height == 0 {
return Ok(()); return Ok(());
} }
let head_header = self.batch.get_block_header(&self.head.last_block_h)?;
if ( if (
head_header.output_mmr_size, header.output_mmr_size,
head_header.output_mmr_size, header.output_mmr_size,
head_header.kernel_mmr_size, header.kernel_mmr_size,
) != self.sizes() ) != self.sizes()
{ {
Err(ErrorKind::InvalidMMRSize.into()) Err(ErrorKind::InvalidMMRSize.into())
@ -1229,13 +1217,13 @@ impl<'a> Extension<'a> {
pub fn validate_kernel_sums( pub fn validate_kernel_sums(
&self, &self,
genesis: &BlockHeader, genesis: &BlockHeader,
header: &BlockHeader,
) -> Result<(Commitment, Commitment), Error> { ) -> Result<(Commitment, Commitment), Error> {
let now = Instant::now(); let now = Instant::now();
let head_header = self.batch.get_block_header(&self.head.last_block_h)?;
let (utxo_sum, kernel_sum) = self.verify_kernel_sums( let (utxo_sum, kernel_sum) = self.verify_kernel_sums(
head_header.total_overage(genesis.kernel_mmr_size > 0), header.total_overage(genesis.kernel_mmr_size > 0),
head_header.total_kernel_offset(), header.total_kernel_offset(),
)?; )?;
debug!( debug!(
@ -1253,10 +1241,11 @@ impl<'a> Extension<'a> {
genesis: &BlockHeader, genesis: &BlockHeader,
fast_validation: bool, fast_validation: bool,
status: &dyn TxHashsetWriteStatus, status: &dyn TxHashsetWriteStatus,
header: &BlockHeader,
) -> Result<(Commitment, Commitment), Error> { ) -> Result<(Commitment, Commitment), Error> {
self.validate_mmrs()?; self.validate_mmrs()?;
self.validate_roots()?; self.validate_roots(header)?;
self.validate_sizes()?; self.validate_sizes(header)?;
if self.head.height == 0 { if self.head.height == 0 {
let zero_commit = secp_static::commit_to_zero_value(); let zero_commit = secp_static::commit_to_zero_value();
@ -1265,7 +1254,7 @@ impl<'a> Extension<'a> {
// The real magicking happens here. Sum of kernel excesses should equal // The real magicking happens here. Sum of kernel excesses should equal
// sum of unspent outputs minus total supply. // sum of unspent outputs minus total supply.
let (output_sum, kernel_sum) = self.validate_kernel_sums(genesis)?; let (output_sum, kernel_sum) = self.validate_kernel_sums(genesis, header)?;
// These are expensive verification step (skipped for "fast validation"). // These are expensive verification step (skipped for "fast validation").
if !fast_validation { if !fast_validation {
@ -1294,7 +1283,7 @@ impl<'a> Extension<'a> {
debug!("-- end of outputs --"); debug!("-- end of outputs --");
} }
/// Dumps the state of the 3 sum trees to stdout for debugging. Short /// Dumps the state of the 3 MMRs to stdout for debugging. Short
/// version only prints the Output tree. /// version only prints the Output tree.
pub fn dump(&self, short: bool) { pub fn dump(&self, short: bool) {
debug!("-- outputs --"); debug!("-- outputs --");
@ -1307,7 +1296,7 @@ impl<'a> Extension<'a> {
} }
} }
/// Sizes of each of the sum trees /// Sizes of each of the MMRs
pub fn sizes(&self) -> (u64, u64, u64) { pub fn sizes(&self) -> (u64, u64, u64) {
( (
self.output_pmmr.unpruned_size(), self.output_pmmr.unpruned_size(),

View file

@ -29,7 +29,6 @@ pub struct UTXOView<'a> {
output_pmmr: ReadonlyPMMR<'a, Output, PMMRBackend<Output>>, output_pmmr: ReadonlyPMMR<'a, Output, PMMRBackend<Output>>,
header_pmmr: ReadonlyPMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>, header_pmmr: ReadonlyPMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
rproof_pmmr: ReadonlyPMMR<'a, RangeProof, PMMRBackend<RangeProof>>, rproof_pmmr: ReadonlyPMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
batch: &'a Batch<'a>,
} }
impl<'a> UTXOView<'a> { impl<'a> UTXOView<'a> {
@ -38,26 +37,24 @@ impl<'a> UTXOView<'a> {
output_pmmr: ReadonlyPMMR<'a, Output, PMMRBackend<Output>>, output_pmmr: ReadonlyPMMR<'a, Output, PMMRBackend<Output>>,
header_pmmr: ReadonlyPMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>, header_pmmr: ReadonlyPMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
rproof_pmmr: ReadonlyPMMR<'a, RangeProof, PMMRBackend<RangeProof>>, rproof_pmmr: ReadonlyPMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
batch: &'a Batch<'_>,
) -> UTXOView<'a> { ) -> UTXOView<'a> {
UTXOView { UTXOView {
output_pmmr, output_pmmr,
header_pmmr, header_pmmr,
rproof_pmmr, rproof_pmmr,
batch,
} }
} }
/// Validate a block against the current UTXO set. /// Validate a block against the current UTXO set.
/// Every input must spend an output that currently exists in the UTXO set. /// Every input must spend an output that currently exists in the UTXO set.
/// No duplicate outputs. /// No duplicate outputs.
pub fn validate_block(&self, block: &Block) -> Result<(), Error> { pub fn validate_block(&self, block: &Block, batch: &Batch<'_>) -> Result<(), Error> {
for output in block.outputs() { for output in block.outputs() {
self.validate_output(output)?; self.validate_output(output, batch)?;
} }
for input in block.inputs() { for input in block.inputs() {
self.validate_input(input)?; self.validate_input(input, batch)?;
} }
Ok(()) Ok(())
} }
@ -65,13 +62,13 @@ impl<'a> UTXOView<'a> {
/// Validate a transaction against the current UTXO set. /// Validate a transaction against the current UTXO set.
/// Every input must spend an output that currently exists in the UTXO set. /// Every input must spend an output that currently exists in the UTXO set.
/// No duplicate outputs. /// No duplicate outputs.
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> { pub fn validate_tx(&self, tx: &Transaction, batch: &Batch<'_>) -> Result<(), Error> {
for output in tx.outputs() { for output in tx.outputs() {
self.validate_output(output)?; self.validate_output(output, batch)?;
} }
for input in tx.inputs() { for input in tx.inputs() {
self.validate_input(input)?; self.validate_input(input, batch)?;
} }
Ok(()) Ok(())
} }
@ -79,8 +76,8 @@ impl<'a> UTXOView<'a> {
// Input is valid if it is spending an (unspent) output // Input is valid if it is spending an (unspent) output
// that currently exists in the output MMR. // that currently exists in the output MMR.
// Compare the hash in the output MMR at the expected pos. // Compare the hash in the output MMR at the expected pos.
fn validate_input(&self, input: &Input) -> Result<(), Error> { fn validate_input(&self, input: &Input, batch: &Batch<'_>) -> Result<(), Error> {
if let Ok(pos) = self.batch.get_output_pos(&input.commitment()) { if let Ok(pos) = batch.get_output_pos(&input.commitment()) {
if let Some(hash) = self.output_pmmr.get_hash(pos) { if let Some(hash) = self.output_pmmr.get_hash(pos) {
if hash == input.hash_with_index(pos - 1) { if hash == input.hash_with_index(pos - 1) {
return Ok(()); return Ok(());
@ -91,8 +88,8 @@ impl<'a> UTXOView<'a> {
} }
// Output is valid if it would not result in a duplicate commitment in the output MMR. // Output is valid if it would not result in a duplicate commitment in the output MMR.
fn validate_output(&self, output: &Output) -> Result<(), Error> { fn validate_output(&self, output: &Output, batch: &Batch<'_>) -> Result<(), Error> {
if let Ok(pos) = self.batch.get_output_pos(&output.commitment()) { if let Ok(pos) = batch.get_output_pos(&output.commitment()) {
if let Some(out_mmr) = self.output_pmmr.get_data(pos) { if let Some(out_mmr) = self.output_pmmr.get_data(pos) {
if out_mmr.commitment() == output.commitment() { if out_mmr.commitment() == output.commitment() {
return Err(ErrorKind::DuplicateCommitment(output.commitment()).into()); return Err(ErrorKind::DuplicateCommitment(output.commitment()).into());
@ -115,13 +112,18 @@ impl<'a> UTXOView<'a> {
/// Verify we are not attempting to spend any coinbase outputs /// Verify we are not attempting to spend any coinbase outputs
/// that have not sufficiently matured. /// that have not sufficiently matured.
pub fn verify_coinbase_maturity(&self, inputs: &Vec<Input>, height: u64) -> Result<(), Error> { pub fn verify_coinbase_maturity(
&self,
inputs: &Vec<Input>,
height: u64,
batch: &Batch<'_>,
) -> Result<(), Error> {
// Find the greatest output pos of any coinbase // Find the greatest output pos of any coinbase
// outputs we are attempting to spend. // outputs we are attempting to spend.
let pos = inputs let pos = inputs
.iter() .iter()
.filter(|x| x.is_coinbase()) .filter(|x| x.is_coinbase())
.filter_map(|x| self.batch.get_output_pos(&x.commitment()).ok()) .filter_map(|x| batch.get_output_pos(&x.commitment()).ok())
.max() .max()
.unwrap_or(0); .unwrap_or(0);
@ -135,7 +137,7 @@ impl<'a> UTXOView<'a> {
// Find the "cutoff" pos in the output MMR based on the // Find the "cutoff" pos in the output MMR based on the
// header from 1,000 blocks ago. // header from 1,000 blocks ago.
let cutoff_height = height.checked_sub(global::coinbase_maturity()).unwrap_or(0); let cutoff_height = height.checked_sub(global::coinbase_maturity()).unwrap_or(0);
let cutoff_header = self.get_header_by_height(cutoff_height)?; let cutoff_header = self.get_header_by_height(cutoff_height, batch)?;
let cutoff_pos = cutoff_header.output_mmr_size; let cutoff_pos = cutoff_header.output_mmr_size;
// If any output pos exceed the cutoff_pos // If any output pos exceed the cutoff_pos
@ -156,10 +158,14 @@ impl<'a> UTXOView<'a> {
/// Get the header at the specified height based on the current state of the extension. /// Get the header at the specified height based on the current state of the extension.
/// Derives the MMR pos from the height (insertion index) and retrieves the header hash. /// Derives the MMR pos from the height (insertion index) and retrieves the header hash.
/// Looks the header up in the db by hash. /// Looks the header up in the db by hash.
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> { pub fn get_header_by_height(
&self,
height: u64,
batch: &Batch<'_>,
) -> Result<BlockHeader, Error> {
let pos = pmmr::insertion_to_pmmr_index(height + 1); let pos = pmmr::insertion_to_pmmr_index(height + 1);
if let Some(hash) = self.get_header_hash(pos) { if let Some(hash) = self.get_header_hash(pos) {
let header = self.batch.get_block_header(&hash)?; let header = batch.get_block_header(&hash)?;
Ok(header) Ok(header)
} else { } else {
Err(ErrorKind::Other(format!("get header by height")).into()) Err(ErrorKind::Other(format!("get header by height")).into())