From 11f2d7b6d4922785fcf9634d6d28a72adab43110 Mon Sep 17 00:00:00 2001 From: Antioch Peverell Date: Tue, 25 Sep 2018 11:01:19 +0100 Subject: [PATCH] UTXOView (readonly, minimal, output only txhashset extension) (#1584) * make the utxo view explicit * introduce utxo_view (readonly, minimal txhashset extension) * rustfmt * cleanup * cleanup * rustfmt * cleanup build warnings, comments etc. * rustfmt * utxo_view even more readonly now * cleanup * refactor pmmr, split out readonly_pmmr and backend * rustfmt --- chain/src/chain.rs | 10 +-- chain/src/lib.rs | 1 + chain/src/pipe.rs | 16 +++- chain/src/store.rs | 22 +++++ chain/src/txhashset.rs | 119 ++++++++++------------------ chain/src/utxo_view.rs | 89 +++++++++++++++++++++ core/src/core/pmmr/backend.rs | 75 ++++++++++++++++++ core/src/core/pmmr/mod.rs | 45 +++++++++++ core/src/core/{ => pmmr}/pmmr.rs | 86 ++------------------ core/src/core/pmmr/readonly_pmmr.rs | 73 +++++++++++++++++ pool/src/pool.rs | 2 +- pool/src/types.rs | 2 +- pool/tests/coinbase_maturity.rs | 2 +- pool/tests/common/mod.rs | 2 +- servers/src/common/adapters.rs | 4 +- 15 files changed, 376 insertions(+), 172 deletions(-) create mode 100644 chain/src/utxo_view.rs create mode 100644 core/src/core/pmmr/backend.rs create mode 100644 core/src/core/pmmr/mod.rs rename core/src/core/{ => pmmr}/pmmr.rs (82%) create mode 100644 core/src/core/pmmr/readonly_pmmr.rs diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 78aae1c4a..5bfab5821 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -446,11 +446,11 @@ impl Chain { } } - pub fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), Error> { - let mut txhashset = self.txhashset.write().unwrap(); - txhashset::extending_readonly(&mut txhashset, |extension| { - extension.rewind(header)?; - extension.validate_utxo_fast(tx.inputs(), tx.outputs())?; + /// Validate the tx against the current UTXO set. + pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> { + let txhashset = self.txhashset.read().unwrap(); + txhashset::utxo_view(&txhashset, |utxo| { + utxo.validate_tx(tx)?; Ok(()) }) } diff --git a/chain/src/lib.rs b/chain/src/lib.rs index 83fc8faa9..9c5afce36 100644 --- a/chain/src/lib.rs +++ b/chain/src/lib.rs @@ -46,6 +46,7 @@ mod error; pub mod pipe; pub mod store; pub mod txhashset; +pub mod utxo_view; pub mod types; // Re-export the base interface diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index b4b2db8e2..a6ef62ad1 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -162,6 +162,9 @@ pub fn process_block( // to applying the new block. verify_coinbase_maturity(b, &mut extension)?; + // Validate the block against the UTXO set. + validate_utxo(b, &mut extension)?; + // Using block_sums (utxo_sum, kernel_sum) for the previous block from the db // we can verify_kernel_sums across the full UTXO sum and full kernel sum // accounting for inputs/outputs/kernels in this new block. @@ -505,6 +508,7 @@ fn validate_block( Ok(()) } +/// TODO - This can move into the utxo_view. /// Verify the block is not attempting to spend coinbase outputs /// before they have sufficiently matured. /// Note: requires a txhashset extension. @@ -519,10 +523,6 @@ fn verify_coinbase_maturity(block: &Block, ext: &mut txhashset::Extension) -> Re /// based on block_sums of previous block, accounting for the inputs|outputs|kernels /// of the new block. fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> { - // First check all our inputs exist in the current UTXO set. - // And that we are not introducing any duplicate outputs in the UTXO set. - ext.validate_utxo_fast(b.inputs(), b.outputs())?; - // Retrieve the block_sums for the previous block. let block_sums = ext.batch.get_block_sums(&b.header.previous)?; @@ -715,6 +715,8 @@ pub fn rewind_and_apply_fork( // Re-verify coinbase maturity along this fork. verify_coinbase_maturity(&fb, ext)?; + // Validate the block against the UTXO set. + validate_utxo(&fb, ext)?; // Re-verify block_sums to set the block_sums up on this fork correctly. verify_block_sums(&fb, ext)?; // Re-apply the blocks. @@ -722,3 +724,9 @@ pub fn rewind_and_apply_fork( } Ok(()) } + +fn validate_utxo(block: &Block, ext: &txhashset::Extension) -> Result<(), Error> { + let utxo = ext.utxo_view(); + utxo.validate_block(block)?; + Ok(()) +} diff --git a/chain/src/store.rs b/chain/src/store.rs index 089869d01..ff6711d8f 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -189,6 +189,14 @@ pub struct Batch<'a> { #[allow(missing_docs)] impl<'a> Batch<'a> { + pub fn head(&self) -> Result { + option_to_not_found(self.db.get_ser(&vec![HEAD_PREFIX]), "HEAD") + } + + pub fn head_header(&self) -> Result { + self.get_block_header(&self.head()?.last_block_h) + } + pub fn save_head(&self, t: &Tip) -> Result<(), Error> { self.db.put_ser(&vec![HEAD_PREFIX], t)?; self.db.put_ser(&vec![HEADER_HEAD_PREFIX], t) @@ -202,6 +210,13 @@ impl<'a> Batch<'a> { self.db.put_ser(&vec![HEADER_HEAD_PREFIX], t) } + pub fn get_hash_by_height(&self, height: u64) -> Result { + option_to_not_found( + self.db.get_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, height)), + &format!("Hash at height: {}", height), + ) + } + pub fn save_sync_head(&self, t: &Tip) -> Result<(), Error> { self.db.put_ser(&vec![SYNC_HEAD_PREFIX], t) } @@ -322,6 +337,13 @@ impl<'a> Batch<'a> { self.db.delete(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())) } + pub fn get_header_by_height(&self, height: u64) -> Result { + option_to_not_found( + self.db.get_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, height)), + &format!("Header at height: {}", height), + ).and_then(|hash| self.get_block_header(&hash)) + } + /// Maintain consistency of the "header_by_height" index by traversing back /// through the current chain and updating "header_by_height" until we reach /// a block_header that is consistent with its height (everything prior to diff --git a/chain/src/txhashset.rs b/chain/src/txhashset.rs index d9c6dbf59..add9c5551 100644 --- a/chain/src/txhashset.rs +++ b/chain/src/txhashset.rs @@ -28,7 +28,7 @@ use util::secp::pedersen::{Commitment, RangeProof}; use core::core::committed::Committed; use core::core::hash::{Hash, Hashed}; use core::core::merkle_proof::MerkleProof; -use core::core::pmmr::{self, PMMR}; +use core::core::pmmr::{self, ReadonlyPMMR, PMMR}; use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, TxKernel}; use core::global; use core::ser::{PMMRIndexHashable, PMMRable}; @@ -40,6 +40,7 @@ use grin_store::types::prune_noop; use store::{Batch, ChainStore}; use types::{TxHashSetRoots, TxHashsetWriteStatus}; use util::{file, secp_static, zip, LOGGER}; +use utxo_view::UTXOView; const TXHASHSET_SUBDIR: &'static str = "txhashset"; const OUTPUT_SUBDIR: &'static str = "output"; @@ -232,12 +233,7 @@ impl TxHashSet { let batch = self.commit_index.batch()?; - let rewind_rm_pos = input_pos_to_rewind( - self.commit_index.clone(), - &horizon_header, - &head_header, - &batch, - )?; + let rewind_rm_pos = input_pos_to_rewind(&horizon_header, &head_header, &batch)?; { let clean_output_index = |commit: &[u8]| { @@ -277,11 +273,10 @@ where let res: Result; { let commit_index = trees.commit_index.clone(); - let commit_index2 = trees.commit_index.clone(); let batch = commit_index.batch()?; trace!(LOGGER, "Starting new txhashset (readonly) extension."); - let mut extension = Extension::new(trees, &batch, commit_index2); + let mut extension = Extension::new(trees, &batch); extension.force_rollback(); res = inner(&mut extension); } @@ -297,6 +292,26 @@ where res } +/// Readonly view on the UTXO set. +/// Based on the current txhashset output_pmmr. +pub fn utxo_view<'a, F, T>(trees: &'a TxHashSet, inner: F) -> Result +where + F: FnOnce(&UTXOView) -> Result, +{ + let res: Result; + { + let output_pmmr = + ReadonlyPMMR::at(&trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos); + + // Create a new batch here to pass into the utxo_view. + // Discard it (rollback) after we finish with the utxo_view. + let batch = trees.commit_index.batch()?; + let utxo = UTXOView::new(output_pmmr, &batch); + res = inner(&utxo); + } + res +} + /// Starts a new unit of work to extend the chain with additional blocks, /// accepting a closure that will work within that unit of work. The closure /// has access to an Extension object that allows the addition of blocks to @@ -320,10 +335,8 @@ where // index saving can be undone let child_batch = batch.child()?; { - let commit_index = trees.commit_index.clone(); - trace!(LOGGER, "Starting new txhashset extension."); - let mut extension = Extension::new(trees, &child_batch, commit_index); + let mut extension = Extension::new(trees, &child_batch); res = inner(&mut extension); rollback = extension.rollback; @@ -372,11 +385,11 @@ pub struct Extension<'a> { rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend>, kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend>, - commit_index: Arc, + /// Rollback flag. rollback: bool, /// Batch in which the extension occurs, public so it can be used within - /// and `extending` closure. Just be careful using it that way as it will + /// an `extending` closure. Just be careful using it that way as it will /// get rolled back with the extension (i.e on a losing fork). pub batch: &'a Batch<'a>, } @@ -412,12 +425,7 @@ impl<'a> Committed for Extension<'a> { } impl<'a> Extension<'a> { - // constructor - fn new( - trees: &'a mut TxHashSet, - batch: &'a Batch, - commit_index: Arc, - ) -> Extension<'a> { + fn new(trees: &'a mut TxHashSet, batch: &'a Batch) -> Extension<'a> { Extension { output_pmmr: PMMR::at( &mut trees.output_pmmr_h.backend, @@ -431,12 +439,17 @@ impl<'a> Extension<'a> { &mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos, ), - commit_index, rollback: false, batch, } } + /// Build a view of the current UTXO set based on the output PMMR. + pub fn utxo_view(&'a self) -> UTXOView<'a> { + UTXOView::new(self.output_pmmr.readonly_pmmr(), self.batch) + } + + // TODO - move this into "utxo_view" /// Verify we are not attempting to spend any coinbase outputs /// that have not sufficiently matured. pub fn verify_coinbase_maturity( @@ -449,7 +462,7 @@ impl<'a> Extension<'a> { let pos = inputs .iter() .filter(|x| x.features.contains(OutputFeatures::COINBASE_OUTPUT)) - .filter_map(|x| self.commit_index.get_output_pos(&x.commitment()).ok()) + .filter_map(|x| self.batch.get_output_pos(&x.commitment()).ok()) .max() .unwrap_or(0); @@ -465,7 +478,7 @@ impl<'a> Extension<'a> { let cutoff_height = height .checked_sub(global::coinbase_maturity(height)) .unwrap_or(0); - let cutoff_header = self.commit_index.get_header_by_height(cutoff_height)?; + let cutoff_header = self.batch.get_header_by_height(cutoff_height)?; let cutoff_pos = cutoff_header.output_mmr_size; // If any output pos exceed the cutoff_pos @@ -478,24 +491,6 @@ impl<'a> Extension<'a> { Ok(()) } - // Inputs _must_ spend unspent outputs. - // Outputs _must not_ introduce duplicate commitments. - pub fn validate_utxo_fast( - &mut self, - inputs: &Vec, - outputs: &Vec, - ) -> Result<(), Error> { - for out in outputs { - self.validate_utxo_output(out)?; - } - - for input in inputs { - self.validate_utxo_input(input)?; - } - - Ok(()) - } - /// Apply a new block to the existing state. pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> { for out in b.outputs() { @@ -515,18 +510,6 @@ impl<'a> Extension<'a> { Ok(()) } - // TODO - Is this sufficient? - fn validate_utxo_input(&mut self, input: &Input) -> Result<(), Error> { - let commit = input.commitment(); - let pos_res = self.batch.get_output_pos(&commit); - if let Ok(pos) = pos_res { - if let Some(_) = self.output_pmmr.get_data(pos) { - return Ok(()); - } - } - Err(ErrorKind::AlreadySpent(commit).into()) - } - fn apply_input(&mut self, input: &Input) -> Result<(), Error> { let commit = input.commitment(); let pos_res = self.batch.get_output_pos(&commit); @@ -565,19 +548,6 @@ impl<'a> Extension<'a> { Ok(()) } - /// TODO - Is this sufficient? - fn validate_utxo_output(&mut self, out: &Output) -> Result<(), Error> { - let commit = out.commitment(); - if let Ok(pos) = self.batch.get_output_pos(&commit) { - if let Some(out_mmr) = self.output_pmmr.get_data(pos) { - if out_mmr.commitment() == commit { - return Err(ErrorKind::DuplicateCommitment(commit).into()); - } - } - } - Ok(()) - } - fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> { let commit = out.commitment(); @@ -626,6 +596,7 @@ impl<'a> Extension<'a> { Ok(()) } + /// TODO - move this into "utxo_view" /// Build a Merkle proof for the given output and the block by /// rewinding the MMR to the last pos of the block. /// Note: this relies on the MMR being stable even after pruning/compaction. @@ -681,7 +652,7 @@ impl<'a> Extension<'a> { block_header.hash(), ); - let head_header = self.commit_index.head_header()?; + let head_header = self.batch.head_header()?; // We need to build bitmaps of added and removed output positions // so we can correctly rewind all operations applied to the output MMR @@ -689,12 +660,7 @@ impl<'a> Extension<'a> { // undone during rewind). // Rewound output pos will be removed from the MMR. // Rewound input (spent) pos will be added back to the MMR. - let rewind_rm_pos = input_pos_to_rewind( - self.commit_index.clone(), - block_header, - &head_header, - &self.batch, - )?; + let rewind_rm_pos = input_pos_to_rewind(block_header, &head_header, &self.batch)?; self.rewind_to_pos( block_header.output_mmr_size, @@ -996,7 +962,7 @@ impl<'a> Extension<'a> { let mut current = header.clone(); loop { - current = self.commit_index.get_block_header(¤t.previous)?; + current = self.batch.get_block_header(¤t.previous)?; if current.height == 0 { break; } @@ -1139,8 +1105,7 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res /// of all inputs (spent outputs) we need to "undo" during a rewind. /// We do this by leveraging the "block_input_bitmap" cache and OR'ing /// the set of bitmaps together for the set of blocks being rewound. -fn input_pos_to_rewind( - commit_index: Arc, +pub fn input_pos_to_rewind( block_header: &BlockHeader, head_header: &BlockHeader, batch: &Batch, @@ -1188,7 +1153,7 @@ fn input_pos_to_rewind( break; } height -= 1; - current = commit_index.get_hash_by_height(height)?; + current = batch.get_hash_by_height(height)?; } let bitmap = bitmap_fast_or(None, &mut block_input_bitmaps).unwrap(); diff --git a/chain/src/utxo_view.rs b/chain/src/utxo_view.rs new file mode 100644 index 000000000..10fd06380 --- /dev/null +++ b/chain/src/utxo_view.rs @@ -0,0 +1,89 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Lightweight readonly view into output MMR for convenience. + +use core::core::pmmr::ReadonlyPMMR; +use core::core::{Block, Input, Output, OutputIdentifier, Transaction}; + +use error::{Error, ErrorKind}; +use grin_store::pmmr::PMMRBackend; +use store::Batch; + +/// Readonly view of the UTXO set (based on output MMR). +pub struct UTXOView<'a> { + pmmr: ReadonlyPMMR<'a, OutputIdentifier, PMMRBackend>, + batch: &'a Batch<'a>, +} + +impl<'a> UTXOView<'a> { + /// Build a new UTXO view. + pub fn new( + pmmr: ReadonlyPMMR<'a, OutputIdentifier, PMMRBackend>, + batch: &'a Batch, + ) -> UTXOView<'a> { + UTXOView { pmmr, batch } + } + + /// Validate a block against the current UTXO set. + /// Every input must spend an output that currently exists in the UTXO set. + /// No duplicate outputs. + pub fn validate_block(&self, block: &Block) -> Result<(), Error> { + for output in block.outputs() { + self.validate_output(output)?; + } + + for input in block.inputs() { + self.validate_input(input)?; + } + Ok(()) + } + + /// Validate a transaction against the current UTXO set. + /// Every input must spend an output that currently exists in the UTXO set. + /// No duplicate outputs. + pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> { + for output in tx.outputs() { + self.validate_output(output)?; + } + + for input in tx.inputs() { + self.validate_input(input)?; + } + Ok(()) + } + + fn validate_input(&self, input: &Input) -> Result<(), Error> { + let commit = input.commitment(); + let pos_res = self.batch.get_output_pos(&commit); + if let Ok(pos) = pos_res { + if let Some(_) = self.pmmr.get_data(pos) { + return Ok(()); + } + } + Err(ErrorKind::AlreadySpent(commit).into()) + } + + fn validate_output(&self, output: &Output) -> Result<(), Error> { + let commit = output.commitment(); + if let Ok(pos) = self.batch.get_output_pos(&commit) { + if let Some(out_mmr) = self.pmmr.get_data(pos) { + if out_mmr.commitment() == commit { + return Err(ErrorKind::DuplicateCommitment(commit).into()); + } + } + } + Ok(()) + } +} diff --git a/core/src/core/pmmr/backend.rs b/core/src/core/pmmr/backend.rs new file mode 100644 index 000000000..2907a53a5 --- /dev/null +++ b/core/src/core/pmmr/backend.rs @@ -0,0 +1,75 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use croaring::Bitmap; + +use core::hash::Hash; +use core::BlockHeader; +use ser::PMMRable; + +/// Storage backend for the MMR, just needs to be indexed by order of insertion. +/// The PMMR itself does not need the Backend to be accurate on the existence +/// of an element (i.e. remove could be a no-op) but layers above can +/// depend on an accurate Backend to check existence. +pub trait Backend +where + T: PMMRable, +{ + /// Append the provided Hashes to the backend storage, and optionally an + /// associated data element to flatfile storage (for leaf nodes only). The + /// position of the first element of the Vec in the MMR is provided to + /// help the implementation. + fn append(&mut self, position: u64, data: Vec<(Hash, Option)>) -> Result<(), String>; + + /// Rewind the backend state to a previous position, as if all append + /// operations after that had been canceled. Expects a position in the PMMR + /// to rewind to as well as bitmaps representing the positions added and + /// removed since the rewind position. These are what we will "undo" + /// during the rewind. + fn rewind(&mut self, position: u64, rewind_rm_pos: &Bitmap) -> Result<(), String>; + + /// Get a Hash by insertion position. + fn get_hash(&self, position: u64) -> Option; + + /// Get underlying data by insertion position. + fn get_data(&self, position: u64) -> Option; + + /// Get a Hash by original insertion position + /// (ignoring the remove log). + fn get_from_file(&self, position: u64) -> Option; + + /// Get a Data Element by original insertion position + /// (ignoring the remove log). + fn get_data_from_file(&self, position: u64) -> Option; + + /// Remove Hash by insertion position. An index is also provided so the + /// underlying backend can implement some rollback of positions up to a + /// given index (practically the index is the height of a block that + /// triggered removal). + fn remove(&mut self, position: u64) -> Result<(), String>; + + /// Returns the data file path.. this is a bit of a hack now that doesn't + /// sit well with the design, but TxKernels have to be summed and the + /// fastest way to to be able to allow direct access to the file + fn get_data_file_path(&self) -> String; + + /// Also a bit of a hack... + /// Saves a snapshot of the rewound utxo file with the block hash as + /// filename suffix. We need this when sending a txhashset zip file to a + /// node for fast sync. + fn snapshot(&self, header: &BlockHeader) -> Result<(), String>; + + /// For debugging purposes so we can see how compaction is doing. + fn dump_stats(&self); +} diff --git a/core/src/core/pmmr/mod.rs b/core/src/core/pmmr/mod.rs new file mode 100644 index 000000000..986c292f6 --- /dev/null +++ b/core/src/core/pmmr/mod.rs @@ -0,0 +1,45 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Persistent and prunable Merkle Mountain Range implementation. For a high +//! level description of MMRs, see: +//! +//! https://github. +//! com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range. +//! md +//! +//! This implementation is built in two major parts: +//! +//! 1. A set of low-level functions that allow navigation within an arbitrary +//! sized binary tree traversed in postorder. To realize why this us useful, +//! we start with the standard height sequence in a MMR: 0010012001... This is +//! in fact identical to the postorder traversal (left-right-top) of a binary +//! tree. In addition postorder traversal is independent of the height of the +//! tree. This allows us, with a few primitive, to get the height of any node +//! in the MMR from its position in the sequence, as well as calculate the +//! position of siblings, parents, etc. As all those functions only rely on +//! binary operations, they're extremely fast. +//! 2. The implementation of a prunable MMR tree using the above. Each leaf +//! is required to be Writeable (which implements Hashed). Tree roots can be +//! trivially and efficiently calculated without materializing the full tree. +//! The underlying Hashes are stored in a Backend implementation that can +//! either be a simple Vec or a database. + +mod backend; +mod pmmr; +mod readonly_pmmr; + +pub use self::backend::*; +pub use self::pmmr::*; +pub use self::readonly_pmmr::*; diff --git a/core/src/core/pmmr.rs b/core/src/core/pmmr/pmmr.rs similarity index 82% rename from core/src/core/pmmr.rs rename to core/src/core/pmmr/pmmr.rs index aa50feecd..46e0e89c9 100644 --- a/core/src/core/pmmr.rs +++ b/core/src/core/pmmr/pmmr.rs @@ -12,30 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -//! Persistent and prunable Merkle Mountain Range implementation. For a high -//! level description of MMRs, see: -//! -//! https://github. -//! com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range. -//! md -//! -//! This implementation is built in two major parts: -//! -//! 1. A set of low-level functions that allow navigation within an arbitrary -//! sized binary tree traversed in postorder. To realize why this us useful, -//! we start with the standard height sequence in a MMR: 0010012001... This is -//! in fact identical to the postorder traversal (left-right-top) of a binary -//! tree. In addition postorder traversal is independent of the height of the -//! tree. This allows us, with a few primitive, to get the height of any node -//! in the MMR from its position in the sequence, as well as calculate the -//! position of siblings, parents, etc. As all those functions only rely on -//! binary operations, they're extremely fast. -//! 2. The implementation of a prunable MMR tree using the above. Each leaf -//! is required to be Writeable (which implements Hashed). Tree roots can be -//! trivially and efficiently calculated without materializing the full tree. -//! The underlying Hashes are stored in a Backend implementation that can -//! either be a simple Vec or a database. - use std::marker; use std::u64; @@ -43,6 +19,7 @@ use croaring::Bitmap; use core::hash::Hash; use core::merkle_proof::MerkleProof; +use core::pmmr::{Backend, ReadonlyPMMR}; use core::BlockHeader; use ser::{PMMRIndexHashable, PMMRable}; use util::LOGGER; @@ -50,62 +27,6 @@ use util::LOGGER; /// 64 bits all ones: 0b11111111...1 const ALL_ONES: u64 = u64::MAX; -/// Storage backend for the MMR, just needs to be indexed by order of insertion. -/// The PMMR itself does not need the Backend to be accurate on the existence -/// of an element (i.e. remove could be a no-op) but layers above can -/// depend on an accurate Backend to check existence. -pub trait Backend -where - T: PMMRable, -{ - /// Append the provided Hashes to the backend storage, and optionally an - /// associated data element to flatfile storage (for leaf nodes only). The - /// position of the first element of the Vec in the MMR is provided to - /// help the implementation. - fn append(&mut self, position: u64, data: Vec<(Hash, Option)>) -> Result<(), String>; - - /// Rewind the backend state to a previous position, as if all append - /// operations after that had been canceled. Expects a position in the PMMR - /// to rewind to as well as bitmaps representing the positions added and - /// removed since the rewind position. These are what we will "undo" - /// during the rewind. - fn rewind(&mut self, position: u64, rewind_rm_pos: &Bitmap) -> Result<(), String>; - - /// Get a Hash by insertion position. - fn get_hash(&self, position: u64) -> Option; - - /// Get underlying data by insertion position. - fn get_data(&self, position: u64) -> Option; - - /// Get a Hash by original insertion position - /// (ignoring the remove log). - fn get_from_file(&self, position: u64) -> Option; - - /// Get a Data Element by original insertion position - /// (ignoring the remove log). - fn get_data_from_file(&self, position: u64) -> Option; - - /// Remove Hash by insertion position. An index is also provided so the - /// underlying backend can implement some rollback of positions up to a - /// given index (practically the index is the height of a block that - /// triggered removal). - fn remove(&mut self, position: u64) -> Result<(), String>; - - /// Returns the data file path.. this is a bit of a hack now that doesn't - /// sit well with the design, but TxKernels have to be summed and the - /// fastest way to to be able to allow direct access to the file - fn get_data_file_path(&self) -> String; - - /// Also a bit of a hack... - /// Saves a snapshot of the rewound utxo file with the block hash as - /// filename suffix. We need this when sending a txhashset zip file to a - /// node for fast sync. - fn snapshot(&self, header: &BlockHeader) -> Result<(), String>; - - /// For debugging purposes so we can see how compaction is doing. - fn dump_stats(&self); -} - /// Prunable Merkle Mountain Range implementation. All positions within the tree /// start at 1 as they're postorder tree traversal positions rather than array /// indices. @@ -149,6 +70,11 @@ where } } + /// Build a "readonly" view of this PMMR. + pub fn readonly_pmmr(&self) -> ReadonlyPMMR { + ReadonlyPMMR::at(&self.backend, self.last_pos) + } + /// Returns a vec of the peaks of this MMR. pub fn peaks(&self) -> Vec { let peaks_pos = peaks(self.last_pos); diff --git a/core/src/core/pmmr/readonly_pmmr.rs b/core/src/core/pmmr/readonly_pmmr.rs new file mode 100644 index 000000000..064df9958 --- /dev/null +++ b/core/src/core/pmmr/readonly_pmmr.rs @@ -0,0 +1,73 @@ +// Copyright 2018 The Grin Developers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Readonly view of a PMMR. + +use std::marker; + +use core::pmmr::{is_leaf, Backend}; +use ser::PMMRable; + +/// Readonly view of a PMMR. +pub struct ReadonlyPMMR<'a, T, B> +where + T: PMMRable, + B: 'a + Backend, +{ + /// The last position in the PMMR + last_pos: u64, + /// The backend for this readonly PMMR + backend: &'a B, + // only needed to parameterise Backend + _marker: marker::PhantomData, +} + +impl<'a, T, B> ReadonlyPMMR<'a, T, B> +where + T: PMMRable + ::std::fmt::Debug, + B: 'a + Backend, +{ + /// Build a new readonly PMMR. + pub fn new(backend: &'a B) -> ReadonlyPMMR { + ReadonlyPMMR { + last_pos: 0, + backend: backend, + _marker: marker::PhantomData, + } + } + + /// Build a new readonly PMMR pre-initialized to + /// last_pos with the provided backend. + pub fn at(backend: &'a B, last_pos: u64) -> ReadonlyPMMR { + ReadonlyPMMR { + last_pos: last_pos, + backend: backend, + _marker: marker::PhantomData, + } + } + + /// Get the data element at provided position in the MMR. + pub fn get_data(&self, pos: u64) -> Option { + if pos > self.last_pos { + // If we are beyond the rhs of the MMR return None. + None + } else if is_leaf(pos) { + // If we are a leaf then get data from the backend. + self.backend.get_data(pos) + } else { + // If we are not a leaf then return None as only leaves have data. + None + } + } +} diff --git a/pool/src/pool.rs b/pool/src/pool.rs index 64ebde2e7..437ea5a41 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -289,7 +289,7 @@ impl Pool { // Validate the tx against current chain state. // Check all inputs are in the current UTXO set. // Check all outputs are unique in current UTXO set. - self.blockchain.validate_tx(&tx, header)?; + self.blockchain.validate_tx(&tx)?; let overage = tx.overage(); let offset = (header.total_kernel_offset() + tx.offset)?; diff --git a/pool/src/types.rs b/pool/src/types.rs index fa5c441f7..a268602cc 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -214,7 +214,7 @@ pub trait BlockChain: Sync + Send { /// have matured sufficiently. fn verify_tx_lock_height(&self, tx: &transaction::Transaction) -> Result<(), PoolError>; - fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), PoolError>; + fn validate_tx(&self, tx: &Transaction) -> Result<(), PoolError>; fn chain_head(&self) -> Result; diff --git a/pool/tests/coinbase_maturity.rs b/pool/tests/coinbase_maturity.rs index 5c155ffd0..5edba6390 100644 --- a/pool/tests/coinbase_maturity.rs +++ b/pool/tests/coinbase_maturity.rs @@ -56,7 +56,7 @@ impl BlockChain for CoinbaseMaturityErrorChainAdapter { unimplemented!(); } - fn validate_tx(&self, _tx: &Transaction, _header: &BlockHeader) -> Result<(), PoolError> { + fn validate_tx(&self, _tx: &Transaction) -> Result<(), PoolError> { unimplemented!(); } diff --git a/pool/tests/common/mod.rs b/pool/tests/common/mod.rs index 385532547..38456dfd5 100644 --- a/pool/tests/common/mod.rs +++ b/pool/tests/common/mod.rs @@ -128,7 +128,7 @@ impl BlockChain for ChainAdapter { .map_err(|_| PoolError::Other(format!("failed to get block sums"))) } - fn validate_tx(&self, tx: &Transaction, _header: &BlockHeader) -> Result<(), pool::PoolError> { + fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { let utxo = self.utxo.read().unwrap(); for x in tx.outputs() { diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index 21a111ca6..bc9e5ad08 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -749,9 +749,9 @@ impl pool::BlockChain for PoolToChainAdapter { .map_err(|_| pool::PoolError::Other(format!("failed to get block_sums"))) } - fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), pool::PoolError> { + fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { wo(&self.chain) - .validate_tx(tx, header) + .validate_tx(tx) .map_err(|_| pool::PoolError::Other(format!("failed to validate tx"))) }