mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
UTXOView (readonly, minimal, output only txhashset extension) (#1584)
* make the utxo view explicit * introduce utxo_view (readonly, minimal txhashset extension) * rustfmt * cleanup * cleanup * rustfmt * cleanup build warnings, comments etc. * rustfmt * utxo_view even more readonly now * cleanup * refactor pmmr, split out readonly_pmmr and backend * rustfmt
This commit is contained in:
parent
830f4d6b7c
commit
11f2d7b6d4
15 changed files with 376 additions and 172 deletions
|
@ -446,11 +446,11 @@ impl Chain {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), Error> {
|
/// Validate the tx against the current UTXO set.
|
||||||
let mut txhashset = self.txhashset.write().unwrap();
|
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
|
||||||
txhashset::extending_readonly(&mut txhashset, |extension| {
|
let txhashset = self.txhashset.read().unwrap();
|
||||||
extension.rewind(header)?;
|
txhashset::utxo_view(&txhashset, |utxo| {
|
||||||
extension.validate_utxo_fast(tx.inputs(), tx.outputs())?;
|
utxo.validate_tx(tx)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -46,6 +46,7 @@ mod error;
|
||||||
pub mod pipe;
|
pub mod pipe;
|
||||||
pub mod store;
|
pub mod store;
|
||||||
pub mod txhashset;
|
pub mod txhashset;
|
||||||
|
pub mod utxo_view;
|
||||||
pub mod types;
|
pub mod types;
|
||||||
|
|
||||||
// Re-export the base interface
|
// Re-export the base interface
|
||||||
|
|
|
@ -162,6 +162,9 @@ pub fn process_block(
|
||||||
// to applying the new block.
|
// to applying the new block.
|
||||||
verify_coinbase_maturity(b, &mut extension)?;
|
verify_coinbase_maturity(b, &mut extension)?;
|
||||||
|
|
||||||
|
// Validate the block against the UTXO set.
|
||||||
|
validate_utxo(b, &mut extension)?;
|
||||||
|
|
||||||
// Using block_sums (utxo_sum, kernel_sum) for the previous block from the db
|
// Using block_sums (utxo_sum, kernel_sum) for the previous block from the db
|
||||||
// we can verify_kernel_sums across the full UTXO sum and full kernel sum
|
// we can verify_kernel_sums across the full UTXO sum and full kernel sum
|
||||||
// accounting for inputs/outputs/kernels in this new block.
|
// accounting for inputs/outputs/kernels in this new block.
|
||||||
|
@ -505,6 +508,7 @@ fn validate_block(
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// TODO - This can move into the utxo_view.
|
||||||
/// Verify the block is not attempting to spend coinbase outputs
|
/// Verify the block is not attempting to spend coinbase outputs
|
||||||
/// before they have sufficiently matured.
|
/// before they have sufficiently matured.
|
||||||
/// Note: requires a txhashset extension.
|
/// Note: requires a txhashset extension.
|
||||||
|
@ -519,10 +523,6 @@ fn verify_coinbase_maturity(block: &Block, ext: &mut txhashset::Extension) -> Re
|
||||||
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
|
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
|
||||||
/// of the new block.
|
/// of the new block.
|
||||||
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
||||||
// First check all our inputs exist in the current UTXO set.
|
|
||||||
// And that we are not introducing any duplicate outputs in the UTXO set.
|
|
||||||
ext.validate_utxo_fast(b.inputs(), b.outputs())?;
|
|
||||||
|
|
||||||
// Retrieve the block_sums for the previous block.
|
// Retrieve the block_sums for the previous block.
|
||||||
let block_sums = ext.batch.get_block_sums(&b.header.previous)?;
|
let block_sums = ext.batch.get_block_sums(&b.header.previous)?;
|
||||||
|
|
||||||
|
@ -715,6 +715,8 @@ pub fn rewind_and_apply_fork(
|
||||||
|
|
||||||
// Re-verify coinbase maturity along this fork.
|
// Re-verify coinbase maturity along this fork.
|
||||||
verify_coinbase_maturity(&fb, ext)?;
|
verify_coinbase_maturity(&fb, ext)?;
|
||||||
|
// Validate the block against the UTXO set.
|
||||||
|
validate_utxo(&fb, ext)?;
|
||||||
// Re-verify block_sums to set the block_sums up on this fork correctly.
|
// Re-verify block_sums to set the block_sums up on this fork correctly.
|
||||||
verify_block_sums(&fb, ext)?;
|
verify_block_sums(&fb, ext)?;
|
||||||
// Re-apply the blocks.
|
// Re-apply the blocks.
|
||||||
|
@ -722,3 +724,9 @@ pub fn rewind_and_apply_fork(
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn validate_utxo(block: &Block, ext: &txhashset::Extension) -> Result<(), Error> {
|
||||||
|
let utxo = ext.utxo_view();
|
||||||
|
utxo.validate_block(block)?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
|
@ -189,6 +189,14 @@ pub struct Batch<'a> {
|
||||||
|
|
||||||
#[allow(missing_docs)]
|
#[allow(missing_docs)]
|
||||||
impl<'a> Batch<'a> {
|
impl<'a> Batch<'a> {
|
||||||
|
pub fn head(&self) -> Result<Tip, Error> {
|
||||||
|
option_to_not_found(self.db.get_ser(&vec![HEAD_PREFIX]), "HEAD")
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
||||||
|
self.get_block_header(&self.head()?.last_block_h)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn save_head(&self, t: &Tip) -> Result<(), Error> {
|
pub fn save_head(&self, t: &Tip) -> Result<(), Error> {
|
||||||
self.db.put_ser(&vec![HEAD_PREFIX], t)?;
|
self.db.put_ser(&vec![HEAD_PREFIX], t)?;
|
||||||
self.db.put_ser(&vec![HEADER_HEAD_PREFIX], t)
|
self.db.put_ser(&vec![HEADER_HEAD_PREFIX], t)
|
||||||
|
@ -202,6 +210,13 @@ impl<'a> Batch<'a> {
|
||||||
self.db.put_ser(&vec![HEADER_HEAD_PREFIX], t)
|
self.db.put_ser(&vec![HEADER_HEAD_PREFIX], t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_hash_by_height(&self, height: u64) -> Result<Hash, Error> {
|
||||||
|
option_to_not_found(
|
||||||
|
self.db.get_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, height)),
|
||||||
|
&format!("Hash at height: {}", height),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
pub fn save_sync_head(&self, t: &Tip) -> Result<(), Error> {
|
pub fn save_sync_head(&self, t: &Tip) -> Result<(), Error> {
|
||||||
self.db.put_ser(&vec![SYNC_HEAD_PREFIX], t)
|
self.db.put_ser(&vec![SYNC_HEAD_PREFIX], t)
|
||||||
}
|
}
|
||||||
|
@ -322,6 +337,13 @@ impl<'a> Batch<'a> {
|
||||||
self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec()))
|
self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec()))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||||
|
option_to_not_found(
|
||||||
|
self.db.get_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, height)),
|
||||||
|
&format!("Header at height: {}", height),
|
||||||
|
).and_then(|hash| self.get_block_header(&hash))
|
||||||
|
}
|
||||||
|
|
||||||
/// Maintain consistency of the "header_by_height" index by traversing back
|
/// Maintain consistency of the "header_by_height" index by traversing back
|
||||||
/// through the current chain and updating "header_by_height" until we reach
|
/// through the current chain and updating "header_by_height" until we reach
|
||||||
/// a block_header that is consistent with its height (everything prior to
|
/// a block_header that is consistent with its height (everything prior to
|
||||||
|
|
|
@ -28,7 +28,7 @@ use util::secp::pedersen::{Commitment, RangeProof};
|
||||||
use core::core::committed::Committed;
|
use core::core::committed::Committed;
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::merkle_proof::MerkleProof;
|
use core::core::merkle_proof::MerkleProof;
|
||||||
use core::core::pmmr::{self, PMMR};
|
use core::core::pmmr::{self, ReadonlyPMMR, PMMR};
|
||||||
use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, TxKernel};
|
use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, TxKernel};
|
||||||
use core::global;
|
use core::global;
|
||||||
use core::ser::{PMMRIndexHashable, PMMRable};
|
use core::ser::{PMMRIndexHashable, PMMRable};
|
||||||
|
@ -40,6 +40,7 @@ use grin_store::types::prune_noop;
|
||||||
use store::{Batch, ChainStore};
|
use store::{Batch, ChainStore};
|
||||||
use types::{TxHashSetRoots, TxHashsetWriteStatus};
|
use types::{TxHashSetRoots, TxHashsetWriteStatus};
|
||||||
use util::{file, secp_static, zip, LOGGER};
|
use util::{file, secp_static, zip, LOGGER};
|
||||||
|
use utxo_view::UTXOView;
|
||||||
|
|
||||||
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
const TXHASHSET_SUBDIR: &'static str = "txhashset";
|
||||||
const OUTPUT_SUBDIR: &'static str = "output";
|
const OUTPUT_SUBDIR: &'static str = "output";
|
||||||
|
@ -232,12 +233,7 @@ impl TxHashSet {
|
||||||
|
|
||||||
let batch = self.commit_index.batch()?;
|
let batch = self.commit_index.batch()?;
|
||||||
|
|
||||||
let rewind_rm_pos = input_pos_to_rewind(
|
let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, &batch)?;
|
||||||
self.commit_index.clone(),
|
|
||||||
&horizon_header,
|
|
||||||
&head_header,
|
|
||||||
&batch,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let clean_output_index = |commit: &[u8]| {
|
let clean_output_index = |commit: &[u8]| {
|
||||||
|
@ -277,11 +273,10 @@ where
|
||||||
let res: Result<T, Error>;
|
let res: Result<T, Error>;
|
||||||
{
|
{
|
||||||
let commit_index = trees.commit_index.clone();
|
let commit_index = trees.commit_index.clone();
|
||||||
let commit_index2 = trees.commit_index.clone();
|
|
||||||
let batch = commit_index.batch()?;
|
let batch = commit_index.batch()?;
|
||||||
|
|
||||||
trace!(LOGGER, "Starting new txhashset (readonly) extension.");
|
trace!(LOGGER, "Starting new txhashset (readonly) extension.");
|
||||||
let mut extension = Extension::new(trees, &batch, commit_index2);
|
let mut extension = Extension::new(trees, &batch);
|
||||||
extension.force_rollback();
|
extension.force_rollback();
|
||||||
res = inner(&mut extension);
|
res = inner(&mut extension);
|
||||||
}
|
}
|
||||||
|
@ -297,6 +292,26 @@ where
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Readonly view on the UTXO set.
|
||||||
|
/// Based on the current txhashset output_pmmr.
|
||||||
|
pub fn utxo_view<'a, F, T>(trees: &'a TxHashSet, inner: F) -> Result<T, Error>
|
||||||
|
where
|
||||||
|
F: FnOnce(&UTXOView) -> Result<T, Error>,
|
||||||
|
{
|
||||||
|
let res: Result<T, Error>;
|
||||||
|
{
|
||||||
|
let output_pmmr =
|
||||||
|
ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos);
|
||||||
|
|
||||||
|
// Create a new batch here to pass into the utxo_view.
|
||||||
|
// Discard it (rollback) after we finish with the utxo_view.
|
||||||
|
let batch = trees.commit_index.batch()?;
|
||||||
|
let utxo = UTXOView::new(output_pmmr, &batch);
|
||||||
|
res = inner(&utxo);
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
/// Starts a new unit of work to extend the chain with additional blocks,
|
/// Starts a new unit of work to extend the chain with additional blocks,
|
||||||
/// accepting a closure that will work within that unit of work. The closure
|
/// accepting a closure that will work within that unit of work. The closure
|
||||||
/// has access to an Extension object that allows the addition of blocks to
|
/// has access to an Extension object that allows the addition of blocks to
|
||||||
|
@ -320,10 +335,8 @@ where
|
||||||
// index saving can be undone
|
// index saving can be undone
|
||||||
let child_batch = batch.child()?;
|
let child_batch = batch.child()?;
|
||||||
{
|
{
|
||||||
let commit_index = trees.commit_index.clone();
|
|
||||||
|
|
||||||
trace!(LOGGER, "Starting new txhashset extension.");
|
trace!(LOGGER, "Starting new txhashset extension.");
|
||||||
let mut extension = Extension::new(trees, &child_batch, commit_index);
|
let mut extension = Extension::new(trees, &child_batch);
|
||||||
res = inner(&mut extension);
|
res = inner(&mut extension);
|
||||||
|
|
||||||
rollback = extension.rollback;
|
rollback = extension.rollback;
|
||||||
|
@ -372,11 +385,11 @@ pub struct Extension<'a> {
|
||||||
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
|
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
|
||||||
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
|
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
|
||||||
|
|
||||||
commit_index: Arc<ChainStore>,
|
/// Rollback flag.
|
||||||
rollback: bool,
|
rollback: bool,
|
||||||
|
|
||||||
/// Batch in which the extension occurs, public so it can be used within
|
/// Batch in which the extension occurs, public so it can be used within
|
||||||
/// and `extending` closure. Just be careful using it that way as it will
|
/// an `extending` closure. Just be careful using it that way as it will
|
||||||
/// get rolled back with the extension (i.e on a losing fork).
|
/// get rolled back with the extension (i.e on a losing fork).
|
||||||
pub batch: &'a Batch<'a>,
|
pub batch: &'a Batch<'a>,
|
||||||
}
|
}
|
||||||
|
@ -412,12 +425,7 @@ impl<'a> Committed for Extension<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a> Extension<'a> {
|
impl<'a> Extension<'a> {
|
||||||
// constructor
|
fn new(trees: &'a mut TxHashSet, batch: &'a Batch) -> Extension<'a> {
|
||||||
fn new(
|
|
||||||
trees: &'a mut TxHashSet,
|
|
||||||
batch: &'a Batch,
|
|
||||||
commit_index: Arc<ChainStore>,
|
|
||||||
) -> Extension<'a> {
|
|
||||||
Extension {
|
Extension {
|
||||||
output_pmmr: PMMR::at(
|
output_pmmr: PMMR::at(
|
||||||
&mut trees.output_pmmr_h.backend,
|
&mut trees.output_pmmr_h.backend,
|
||||||
|
@ -431,12 +439,17 @@ impl<'a> Extension<'a> {
|
||||||
&mut trees.kernel_pmmr_h.backend,
|
&mut trees.kernel_pmmr_h.backend,
|
||||||
trees.kernel_pmmr_h.last_pos,
|
trees.kernel_pmmr_h.last_pos,
|
||||||
),
|
),
|
||||||
commit_index,
|
|
||||||
rollback: false,
|
rollback: false,
|
||||||
batch,
|
batch,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Build a view of the current UTXO set based on the output PMMR.
|
||||||
|
pub fn utxo_view(&'a self) -> UTXOView<'a> {
|
||||||
|
UTXOView::new(self.output_pmmr.readonly_pmmr(), self.batch)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO - move this into "utxo_view"
|
||||||
/// Verify we are not attempting to spend any coinbase outputs
|
/// Verify we are not attempting to spend any coinbase outputs
|
||||||
/// that have not sufficiently matured.
|
/// that have not sufficiently matured.
|
||||||
pub fn verify_coinbase_maturity(
|
pub fn verify_coinbase_maturity(
|
||||||
|
@ -449,7 +462,7 @@ impl<'a> Extension<'a> {
|
||||||
let pos = inputs
|
let pos = inputs
|
||||||
.iter()
|
.iter()
|
||||||
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
.filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||||
.filter_map(|x| self.commit_index.get_output_pos(&x.commitment()).ok())
|
.filter_map(|x| self.batch.get_output_pos(&x.commitment()).ok())
|
||||||
.max()
|
.max()
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
|
|
||||||
|
@ -465,7 +478,7 @@ impl<'a> Extension<'a> {
|
||||||
let cutoff_height = height
|
let cutoff_height = height
|
||||||
.checked_sub(global::coinbase_maturity(height))
|
.checked_sub(global::coinbase_maturity(height))
|
||||||
.unwrap_or(0);
|
.unwrap_or(0);
|
||||||
let cutoff_header = self.commit_index.get_header_by_height(cutoff_height)?;
|
let cutoff_header = self.batch.get_header_by_height(cutoff_height)?;
|
||||||
let cutoff_pos = cutoff_header.output_mmr_size;
|
let cutoff_pos = cutoff_header.output_mmr_size;
|
||||||
|
|
||||||
// If any output pos exceed the cutoff_pos
|
// If any output pos exceed the cutoff_pos
|
||||||
|
@ -478,24 +491,6 @@ impl<'a> Extension<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Inputs _must_ spend unspent outputs.
|
|
||||||
// Outputs _must not_ introduce duplicate commitments.
|
|
||||||
pub fn validate_utxo_fast(
|
|
||||||
&mut self,
|
|
||||||
inputs: &Vec<Input>,
|
|
||||||
outputs: &Vec<Output>,
|
|
||||||
) -> Result<(), Error> {
|
|
||||||
for out in outputs {
|
|
||||||
self.validate_utxo_output(out)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
for input in inputs {
|
|
||||||
self.validate_utxo_input(input)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Apply a new block to the existing state.
|
/// Apply a new block to the existing state.
|
||||||
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
||||||
for out in b.outputs() {
|
for out in b.outputs() {
|
||||||
|
@ -515,18 +510,6 @@ impl<'a> Extension<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO - Is this sufficient?
|
|
||||||
fn validate_utxo_input(&mut self, input: &Input) -> Result<(), Error> {
|
|
||||||
let commit = input.commitment();
|
|
||||||
let pos_res = self.batch.get_output_pos(&commit);
|
|
||||||
if let Ok(pos) = pos_res {
|
|
||||||
if let Some(_) = self.output_pmmr.get_data(pos) {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Err(ErrorKind::AlreadySpent(commit).into())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn apply_input(&mut self, input: &Input) -> Result<(), Error> {
|
fn apply_input(&mut self, input: &Input) -> Result<(), Error> {
|
||||||
let commit = input.commitment();
|
let commit = input.commitment();
|
||||||
let pos_res = self.batch.get_output_pos(&commit);
|
let pos_res = self.batch.get_output_pos(&commit);
|
||||||
|
@ -565,19 +548,6 @@ impl<'a> Extension<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// TODO - Is this sufficient?
|
|
||||||
fn validate_utxo_output(&mut self, out: &Output) -> Result<(), Error> {
|
|
||||||
let commit = out.commitment();
|
|
||||||
if let Ok(pos) = self.batch.get_output_pos(&commit) {
|
|
||||||
if let Some(out_mmr) = self.output_pmmr.get_data(pos) {
|
|
||||||
if out_mmr.commitment() == commit {
|
|
||||||
return Err(ErrorKind::DuplicateCommitment(commit).into());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> {
|
fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> {
|
||||||
let commit = out.commitment();
|
let commit = out.commitment();
|
||||||
|
|
||||||
|
@ -626,6 +596,7 @@ impl<'a> Extension<'a> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// TODO - move this into "utxo_view"
|
||||||
/// Build a Merkle proof for the given output and the block by
|
/// Build a Merkle proof for the given output and the block by
|
||||||
/// rewinding the MMR to the last pos of the block.
|
/// rewinding the MMR to the last pos of the block.
|
||||||
/// Note: this relies on the MMR being stable even after pruning/compaction.
|
/// Note: this relies on the MMR being stable even after pruning/compaction.
|
||||||
|
@ -681,7 +652,7 @@ impl<'a> Extension<'a> {
|
||||||
block_header.hash(),
|
block_header.hash(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let head_header = self.commit_index.head_header()?;
|
let head_header = self.batch.head_header()?;
|
||||||
|
|
||||||
// We need to build bitmaps of added and removed output positions
|
// We need to build bitmaps of added and removed output positions
|
||||||
// so we can correctly rewind all operations applied to the output MMR
|
// so we can correctly rewind all operations applied to the output MMR
|
||||||
|
@ -689,12 +660,7 @@ impl<'a> Extension<'a> {
|
||||||
// undone during rewind).
|
// undone during rewind).
|
||||||
// Rewound output pos will be removed from the MMR.
|
// Rewound output pos will be removed from the MMR.
|
||||||
// Rewound input (spent) pos will be added back to the MMR.
|
// Rewound input (spent) pos will be added back to the MMR.
|
||||||
let rewind_rm_pos = input_pos_to_rewind(
|
let rewind_rm_pos = input_pos_to_rewind(block_header, &head_header, &self.batch)?;
|
||||||
self.commit_index.clone(),
|
|
||||||
block_header,
|
|
||||||
&head_header,
|
|
||||||
&self.batch,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
self.rewind_to_pos(
|
self.rewind_to_pos(
|
||||||
block_header.output_mmr_size,
|
block_header.output_mmr_size,
|
||||||
|
@ -996,7 +962,7 @@ impl<'a> Extension<'a> {
|
||||||
|
|
||||||
let mut current = header.clone();
|
let mut current = header.clone();
|
||||||
loop {
|
loop {
|
||||||
current = self.commit_index.get_block_header(¤t.previous)?;
|
current = self.batch.get_block_header(¤t.previous)?;
|
||||||
if current.height == 0 {
|
if current.height == 0 {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -1139,8 +1105,7 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
|
||||||
/// of all inputs (spent outputs) we need to "undo" during a rewind.
|
/// of all inputs (spent outputs) we need to "undo" during a rewind.
|
||||||
/// We do this by leveraging the "block_input_bitmap" cache and OR'ing
|
/// We do this by leveraging the "block_input_bitmap" cache and OR'ing
|
||||||
/// the set of bitmaps together for the set of blocks being rewound.
|
/// the set of bitmaps together for the set of blocks being rewound.
|
||||||
fn input_pos_to_rewind(
|
pub fn input_pos_to_rewind(
|
||||||
commit_index: Arc<ChainStore>,
|
|
||||||
block_header: &BlockHeader,
|
block_header: &BlockHeader,
|
||||||
head_header: &BlockHeader,
|
head_header: &BlockHeader,
|
||||||
batch: &Batch,
|
batch: &Batch,
|
||||||
|
@ -1188,7 +1153,7 @@ fn input_pos_to_rewind(
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
height -= 1;
|
height -= 1;
|
||||||
current = commit_index.get_hash_by_height(height)?;
|
current = batch.get_hash_by_height(height)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let bitmap = bitmap_fast_or(None, &mut block_input_bitmaps).unwrap();
|
let bitmap = bitmap_fast_or(None, &mut block_input_bitmaps).unwrap();
|
||||||
|
|
89
chain/src/utxo_view.rs
Normal file
89
chain/src/utxo_view.rs
Normal file
|
@ -0,0 +1,89 @@
|
||||||
|
// Copyright 2018 The Grin Developers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Lightweight readonly view into output MMR for convenience.
|
||||||
|
|
||||||
|
use core::core::pmmr::ReadonlyPMMR;
|
||||||
|
use core::core::{Block, Input, Output, OutputIdentifier, Transaction};
|
||||||
|
|
||||||
|
use error::{Error, ErrorKind};
|
||||||
|
use grin_store::pmmr::PMMRBackend;
|
||||||
|
use store::Batch;
|
||||||
|
|
||||||
|
/// Readonly view of the UTXO set (based on output MMR).
|
||||||
|
pub struct UTXOView<'a> {
|
||||||
|
pmmr: ReadonlyPMMR<'a, OutputIdentifier, PMMRBackend<OutputIdentifier>>,
|
||||||
|
batch: &'a Batch<'a>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a> UTXOView<'a> {
|
||||||
|
/// Build a new UTXO view.
|
||||||
|
pub fn new(
|
||||||
|
pmmr: ReadonlyPMMR<'a, OutputIdentifier, PMMRBackend<OutputIdentifier>>,
|
||||||
|
batch: &'a Batch,
|
||||||
|
) -> UTXOView<'a> {
|
||||||
|
UTXOView { pmmr, batch }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate a block against the current UTXO set.
|
||||||
|
/// Every input must spend an output that currently exists in the UTXO set.
|
||||||
|
/// No duplicate outputs.
|
||||||
|
pub fn validate_block(&self, block: &Block) -> Result<(), Error> {
|
||||||
|
for output in block.outputs() {
|
||||||
|
self.validate_output(output)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for input in block.inputs() {
|
||||||
|
self.validate_input(input)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Validate a transaction against the current UTXO set.
|
||||||
|
/// Every input must spend an output that currently exists in the UTXO set.
|
||||||
|
/// No duplicate outputs.
|
||||||
|
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
|
||||||
|
for output in tx.outputs() {
|
||||||
|
self.validate_output(output)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
for input in tx.inputs() {
|
||||||
|
self.validate_input(input)?;
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_input(&self, input: &Input) -> Result<(), Error> {
|
||||||
|
let commit = input.commitment();
|
||||||
|
let pos_res = self.batch.get_output_pos(&commit);
|
||||||
|
if let Ok(pos) = pos_res {
|
||||||
|
if let Some(_) = self.pmmr.get_data(pos) {
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(ErrorKind::AlreadySpent(commit).into())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn validate_output(&self, output: &Output) -> Result<(), Error> {
|
||||||
|
let commit = output.commitment();
|
||||||
|
if let Ok(pos) = self.batch.get_output_pos(&commit) {
|
||||||
|
if let Some(out_mmr) = self.pmmr.get_data(pos) {
|
||||||
|
if out_mmr.commitment() == commit {
|
||||||
|
return Err(ErrorKind::DuplicateCommitment(commit).into());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
75
core/src/core/pmmr/backend.rs
Normal file
75
core/src/core/pmmr/backend.rs
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
// Copyright 2018 The Grin Developers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
use croaring::Bitmap;
|
||||||
|
|
||||||
|
use core::hash::Hash;
|
||||||
|
use core::BlockHeader;
|
||||||
|
use ser::PMMRable;
|
||||||
|
|
||||||
|
/// Storage backend for the MMR, just needs to be indexed by order of insertion.
|
||||||
|
/// The PMMR itself does not need the Backend to be accurate on the existence
|
||||||
|
/// of an element (i.e. remove could be a no-op) but layers above can
|
||||||
|
/// depend on an accurate Backend to check existence.
|
||||||
|
pub trait Backend<T>
|
||||||
|
where
|
||||||
|
T: PMMRable,
|
||||||
|
{
|
||||||
|
/// Append the provided Hashes to the backend storage, and optionally an
|
||||||
|
/// associated data element to flatfile storage (for leaf nodes only). The
|
||||||
|
/// position of the first element of the Vec in the MMR is provided to
|
||||||
|
/// help the implementation.
|
||||||
|
fn append(&mut self, position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String>;
|
||||||
|
|
||||||
|
/// Rewind the backend state to a previous position, as if all append
|
||||||
|
/// operations after that had been canceled. Expects a position in the PMMR
|
||||||
|
/// to rewind to as well as bitmaps representing the positions added and
|
||||||
|
/// removed since the rewind position. These are what we will "undo"
|
||||||
|
/// during the rewind.
|
||||||
|
fn rewind(&mut self, position: u64, rewind_rm_pos: &Bitmap) -> Result<(), String>;
|
||||||
|
|
||||||
|
/// Get a Hash by insertion position.
|
||||||
|
fn get_hash(&self, position: u64) -> Option<Hash>;
|
||||||
|
|
||||||
|
/// Get underlying data by insertion position.
|
||||||
|
fn get_data(&self, position: u64) -> Option<T>;
|
||||||
|
|
||||||
|
/// Get a Hash by original insertion position
|
||||||
|
/// (ignoring the remove log).
|
||||||
|
fn get_from_file(&self, position: u64) -> Option<Hash>;
|
||||||
|
|
||||||
|
/// Get a Data Element by original insertion position
|
||||||
|
/// (ignoring the remove log).
|
||||||
|
fn get_data_from_file(&self, position: u64) -> Option<T>;
|
||||||
|
|
||||||
|
/// Remove Hash by insertion position. An index is also provided so the
|
||||||
|
/// underlying backend can implement some rollback of positions up to a
|
||||||
|
/// given index (practically the index is the height of a block that
|
||||||
|
/// triggered removal).
|
||||||
|
fn remove(&mut self, position: u64) -> Result<(), String>;
|
||||||
|
|
||||||
|
/// Returns the data file path.. this is a bit of a hack now that doesn't
|
||||||
|
/// sit well with the design, but TxKernels have to be summed and the
|
||||||
|
/// fastest way to to be able to allow direct access to the file
|
||||||
|
fn get_data_file_path(&self) -> String;
|
||||||
|
|
||||||
|
/// Also a bit of a hack...
|
||||||
|
/// Saves a snapshot of the rewound utxo file with the block hash as
|
||||||
|
/// filename suffix. We need this when sending a txhashset zip file to a
|
||||||
|
/// node for fast sync.
|
||||||
|
fn snapshot(&self, header: &BlockHeader) -> Result<(), String>;
|
||||||
|
|
||||||
|
/// For debugging purposes so we can see how compaction is doing.
|
||||||
|
fn dump_stats(&self);
|
||||||
|
}
|
45
core/src/core/pmmr/mod.rs
Normal file
45
core/src/core/pmmr/mod.rs
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
// Copyright 2018 The Grin Developers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Persistent and prunable Merkle Mountain Range implementation. For a high
|
||||||
|
//! level description of MMRs, see:
|
||||||
|
//!
|
||||||
|
//! https://github.
|
||||||
|
//! com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.
|
||||||
|
//! md
|
||||||
|
//!
|
||||||
|
//! This implementation is built in two major parts:
|
||||||
|
//!
|
||||||
|
//! 1. A set of low-level functions that allow navigation within an arbitrary
|
||||||
|
//! sized binary tree traversed in postorder. To realize why this us useful,
|
||||||
|
//! we start with the standard height sequence in a MMR: 0010012001... This is
|
||||||
|
//! in fact identical to the postorder traversal (left-right-top) of a binary
|
||||||
|
//! tree. In addition postorder traversal is independent of the height of the
|
||||||
|
//! tree. This allows us, with a few primitive, to get the height of any node
|
||||||
|
//! in the MMR from its position in the sequence, as well as calculate the
|
||||||
|
//! position of siblings, parents, etc. As all those functions only rely on
|
||||||
|
//! binary operations, they're extremely fast.
|
||||||
|
//! 2. The implementation of a prunable MMR tree using the above. Each leaf
|
||||||
|
//! is required to be Writeable (which implements Hashed). Tree roots can be
|
||||||
|
//! trivially and efficiently calculated without materializing the full tree.
|
||||||
|
//! The underlying Hashes are stored in a Backend implementation that can
|
||||||
|
//! either be a simple Vec or a database.
|
||||||
|
|
||||||
|
mod backend;
|
||||||
|
mod pmmr;
|
||||||
|
mod readonly_pmmr;
|
||||||
|
|
||||||
|
pub use self::backend::*;
|
||||||
|
pub use self::pmmr::*;
|
||||||
|
pub use self::readonly_pmmr::*;
|
|
@ -12,30 +12,6 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//! Persistent and prunable Merkle Mountain Range implementation. For a high
|
|
||||||
//! level description of MMRs, see:
|
|
||||||
//!
|
|
||||||
//! https://github.
|
|
||||||
//! com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.
|
|
||||||
//! md
|
|
||||||
//!
|
|
||||||
//! This implementation is built in two major parts:
|
|
||||||
//!
|
|
||||||
//! 1. A set of low-level functions that allow navigation within an arbitrary
|
|
||||||
//! sized binary tree traversed in postorder. To realize why this us useful,
|
|
||||||
//! we start with the standard height sequence in a MMR: 0010012001... This is
|
|
||||||
//! in fact identical to the postorder traversal (left-right-top) of a binary
|
|
||||||
//! tree. In addition postorder traversal is independent of the height of the
|
|
||||||
//! tree. This allows us, with a few primitive, to get the height of any node
|
|
||||||
//! in the MMR from its position in the sequence, as well as calculate the
|
|
||||||
//! position of siblings, parents, etc. As all those functions only rely on
|
|
||||||
//! binary operations, they're extremely fast.
|
|
||||||
//! 2. The implementation of a prunable MMR tree using the above. Each leaf
|
|
||||||
//! is required to be Writeable (which implements Hashed). Tree roots can be
|
|
||||||
//! trivially and efficiently calculated without materializing the full tree.
|
|
||||||
//! The underlying Hashes are stored in a Backend implementation that can
|
|
||||||
//! either be a simple Vec or a database.
|
|
||||||
|
|
||||||
use std::marker;
|
use std::marker;
|
||||||
use std::u64;
|
use std::u64;
|
||||||
|
|
||||||
|
@ -43,6 +19,7 @@ use croaring::Bitmap;
|
||||||
|
|
||||||
use core::hash::Hash;
|
use core::hash::Hash;
|
||||||
use core::merkle_proof::MerkleProof;
|
use core::merkle_proof::MerkleProof;
|
||||||
|
use core::pmmr::{Backend, ReadonlyPMMR};
|
||||||
use core::BlockHeader;
|
use core::BlockHeader;
|
||||||
use ser::{PMMRIndexHashable, PMMRable};
|
use ser::{PMMRIndexHashable, PMMRable};
|
||||||
use util::LOGGER;
|
use util::LOGGER;
|
||||||
|
@ -50,62 +27,6 @@ use util::LOGGER;
|
||||||
/// 64 bits all ones: 0b11111111...1
|
/// 64 bits all ones: 0b11111111...1
|
||||||
const ALL_ONES: u64 = u64::MAX;
|
const ALL_ONES: u64 = u64::MAX;
|
||||||
|
|
||||||
/// Storage backend for the MMR, just needs to be indexed by order of insertion.
|
|
||||||
/// The PMMR itself does not need the Backend to be accurate on the existence
|
|
||||||
/// of an element (i.e. remove could be a no-op) but layers above can
|
|
||||||
/// depend on an accurate Backend to check existence.
|
|
||||||
pub trait Backend<T>
|
|
||||||
where
|
|
||||||
T: PMMRable,
|
|
||||||
{
|
|
||||||
/// Append the provided Hashes to the backend storage, and optionally an
|
|
||||||
/// associated data element to flatfile storage (for leaf nodes only). The
|
|
||||||
/// position of the first element of the Vec in the MMR is provided to
|
|
||||||
/// help the implementation.
|
|
||||||
fn append(&mut self, position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String>;
|
|
||||||
|
|
||||||
/// Rewind the backend state to a previous position, as if all append
|
|
||||||
/// operations after that had been canceled. Expects a position in the PMMR
|
|
||||||
/// to rewind to as well as bitmaps representing the positions added and
|
|
||||||
/// removed since the rewind position. These are what we will "undo"
|
|
||||||
/// during the rewind.
|
|
||||||
fn rewind(&mut self, position: u64, rewind_rm_pos: &Bitmap) -> Result<(), String>;
|
|
||||||
|
|
||||||
/// Get a Hash by insertion position.
|
|
||||||
fn get_hash(&self, position: u64) -> Option<Hash>;
|
|
||||||
|
|
||||||
/// Get underlying data by insertion position.
|
|
||||||
fn get_data(&self, position: u64) -> Option<T>;
|
|
||||||
|
|
||||||
/// Get a Hash by original insertion position
|
|
||||||
/// (ignoring the remove log).
|
|
||||||
fn get_from_file(&self, position: u64) -> Option<Hash>;
|
|
||||||
|
|
||||||
/// Get a Data Element by original insertion position
|
|
||||||
/// (ignoring the remove log).
|
|
||||||
fn get_data_from_file(&self, position: u64) -> Option<T>;
|
|
||||||
|
|
||||||
/// Remove Hash by insertion position. An index is also provided so the
|
|
||||||
/// underlying backend can implement some rollback of positions up to a
|
|
||||||
/// given index (practically the index is the height of a block that
|
|
||||||
/// triggered removal).
|
|
||||||
fn remove(&mut self, position: u64) -> Result<(), String>;
|
|
||||||
|
|
||||||
/// Returns the data file path.. this is a bit of a hack now that doesn't
|
|
||||||
/// sit well with the design, but TxKernels have to be summed and the
|
|
||||||
/// fastest way to to be able to allow direct access to the file
|
|
||||||
fn get_data_file_path(&self) -> String;
|
|
||||||
|
|
||||||
/// Also a bit of a hack...
|
|
||||||
/// Saves a snapshot of the rewound utxo file with the block hash as
|
|
||||||
/// filename suffix. We need this when sending a txhashset zip file to a
|
|
||||||
/// node for fast sync.
|
|
||||||
fn snapshot(&self, header: &BlockHeader) -> Result<(), String>;
|
|
||||||
|
|
||||||
/// For debugging purposes so we can see how compaction is doing.
|
|
||||||
fn dump_stats(&self);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prunable Merkle Mountain Range implementation. All positions within the tree
|
/// Prunable Merkle Mountain Range implementation. All positions within the tree
|
||||||
/// start at 1 as they're postorder tree traversal positions rather than array
|
/// start at 1 as they're postorder tree traversal positions rather than array
|
||||||
/// indices.
|
/// indices.
|
||||||
|
@ -149,6 +70,11 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Build a "readonly" view of this PMMR.
|
||||||
|
pub fn readonly_pmmr(&self) -> ReadonlyPMMR<T, B> {
|
||||||
|
ReadonlyPMMR::at(&self.backend, self.last_pos)
|
||||||
|
}
|
||||||
|
|
||||||
/// Returns a vec of the peaks of this MMR.
|
/// Returns a vec of the peaks of this MMR.
|
||||||
pub fn peaks(&self) -> Vec<Hash> {
|
pub fn peaks(&self) -> Vec<Hash> {
|
||||||
let peaks_pos = peaks(self.last_pos);
|
let peaks_pos = peaks(self.last_pos);
|
73
core/src/core/pmmr/readonly_pmmr.rs
Normal file
73
core/src/core/pmmr/readonly_pmmr.rs
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
// Copyright 2018 The Grin Developers
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Readonly view of a PMMR.
|
||||||
|
|
||||||
|
use std::marker;
|
||||||
|
|
||||||
|
use core::pmmr::{is_leaf, Backend};
|
||||||
|
use ser::PMMRable;
|
||||||
|
|
||||||
|
/// Readonly view of a PMMR.
|
||||||
|
pub struct ReadonlyPMMR<'a, T, B>
|
||||||
|
where
|
||||||
|
T: PMMRable,
|
||||||
|
B: 'a + Backend<T>,
|
||||||
|
{
|
||||||
|
/// The last position in the PMMR
|
||||||
|
last_pos: u64,
|
||||||
|
/// The backend for this readonly PMMR
|
||||||
|
backend: &'a B,
|
||||||
|
// only needed to parameterise Backend
|
||||||
|
_marker: marker::PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<'a, T, B> ReadonlyPMMR<'a, T, B>
|
||||||
|
where
|
||||||
|
T: PMMRable + ::std::fmt::Debug,
|
||||||
|
B: 'a + Backend<T>,
|
||||||
|
{
|
||||||
|
/// Build a new readonly PMMR.
|
||||||
|
pub fn new(backend: &'a B) -> ReadonlyPMMR<T, B> {
|
||||||
|
ReadonlyPMMR {
|
||||||
|
last_pos: 0,
|
||||||
|
backend: backend,
|
||||||
|
_marker: marker::PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Build a new readonly PMMR pre-initialized to
|
||||||
|
/// last_pos with the provided backend.
|
||||||
|
pub fn at(backend: &'a B, last_pos: u64) -> ReadonlyPMMR<T, B> {
|
||||||
|
ReadonlyPMMR {
|
||||||
|
last_pos: last_pos,
|
||||||
|
backend: backend,
|
||||||
|
_marker: marker::PhantomData,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the data element at provided position in the MMR.
|
||||||
|
pub fn get_data(&self, pos: u64) -> Option<T> {
|
||||||
|
if pos > self.last_pos {
|
||||||
|
// If we are beyond the rhs of the MMR return None.
|
||||||
|
None
|
||||||
|
} else if is_leaf(pos) {
|
||||||
|
// If we are a leaf then get data from the backend.
|
||||||
|
self.backend.get_data(pos)
|
||||||
|
} else {
|
||||||
|
// If we are not a leaf then return None as only leaves have data.
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -289,7 +289,7 @@ impl Pool {
|
||||||
// Validate the tx against current chain state.
|
// Validate the tx against current chain state.
|
||||||
// Check all inputs are in the current UTXO set.
|
// Check all inputs are in the current UTXO set.
|
||||||
// Check all outputs are unique in current UTXO set.
|
// Check all outputs are unique in current UTXO set.
|
||||||
self.blockchain.validate_tx(&tx, header)?;
|
self.blockchain.validate_tx(&tx)?;
|
||||||
|
|
||||||
let overage = tx.overage();
|
let overage = tx.overage();
|
||||||
let offset = (header.total_kernel_offset() + tx.offset)?;
|
let offset = (header.total_kernel_offset() + tx.offset)?;
|
||||||
|
|
|
@ -214,7 +214,7 @@ pub trait BlockChain: Sync + Send {
|
||||||
/// have matured sufficiently.
|
/// have matured sufficiently.
|
||||||
fn verify_tx_lock_height(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
fn verify_tx_lock_height(&self, tx: &transaction::Transaction) -> Result<(), PoolError>;
|
||||||
|
|
||||||
fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), PoolError>;
|
fn validate_tx(&self, tx: &Transaction) -> Result<(), PoolError>;
|
||||||
|
|
||||||
fn chain_head(&self) -> Result<BlockHeader, PoolError>;
|
fn chain_head(&self) -> Result<BlockHeader, PoolError>;
|
||||||
|
|
||||||
|
|
|
@ -56,7 +56,7 @@ impl BlockChain for CoinbaseMaturityErrorChainAdapter {
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_tx(&self, _tx: &Transaction, _header: &BlockHeader) -> Result<(), PoolError> {
|
fn validate_tx(&self, _tx: &Transaction) -> Result<(), PoolError> {
|
||||||
unimplemented!();
|
unimplemented!();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -128,7 +128,7 @@ impl BlockChain for ChainAdapter {
|
||||||
.map_err(|_| PoolError::Other(format!("failed to get block sums")))
|
.map_err(|_| PoolError::Other(format!("failed to get block sums")))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_tx(&self, tx: &Transaction, _header: &BlockHeader) -> Result<(), pool::PoolError> {
|
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
||||||
let utxo = self.utxo.read().unwrap();
|
let utxo = self.utxo.read().unwrap();
|
||||||
|
|
||||||
for x in tx.outputs() {
|
for x in tx.outputs() {
|
||||||
|
|
|
@ -749,9 +749,9 @@ impl pool::BlockChain for PoolToChainAdapter {
|
||||||
.map_err(|_| pool::PoolError::Other(format!("failed to get block_sums")))
|
.map_err(|_| pool::PoolError::Other(format!("failed to get block_sums")))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), pool::PoolError> {
|
fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
||||||
wo(&self.chain)
|
wo(&self.chain)
|
||||||
.validate_tx(tx, header)
|
.validate_tx(tx)
|
||||||
.map_err(|_| pool::PoolError::Other(format!("failed to validate tx")))
|
.map_err(|_| pool::PoolError::Other(format!("failed to validate tx")))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue