mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
Integrate sum trees with the rest of the system (#116)
* Integrate PMMR and its persistent backend with the Chain * Chain can set tree roots; PMMR backend discard * Check spent and prune for each input in new block * Handling of forks by rewinding the state * More PMMR tests and fixes, mostly around rewind * Rewrite get_unspent to use the sumtrees, fix remaining compilation issues
This commit is contained in:
parent
8800d1339d
commit
36bcd3cc39
22 changed files with 951 additions and 1420 deletions
|
@ -16,7 +16,7 @@
|
|||
//! and mostly the chain pipeline.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, Mutex, RwLock};
|
||||
|
||||
use secp::pedersen::Commitment;
|
||||
|
||||
|
@ -26,6 +26,7 @@ use core::core::hash::Hash;
|
|||
use grin_store::Error::NotFoundErr;
|
||||
use pipe;
|
||||
use store;
|
||||
use sumtree;
|
||||
use types::*;
|
||||
|
||||
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
|
||||
|
@ -40,8 +41,8 @@ pub struct Chain {
|
|||
adapter: Arc<ChainAdapter>,
|
||||
|
||||
head: Arc<Mutex<Tip>>,
|
||||
block_process_lock: Arc<Mutex<bool>>,
|
||||
orphans: Arc<Mutex<VecDeque<(Options, Block)>>>,
|
||||
sumtrees: Arc<RwLock<sumtree::SumTrees>>,
|
||||
|
||||
//POW verification function
|
||||
pow_verifier: fn(&BlockHeader, u32) -> bool,
|
||||
|
@ -75,7 +76,7 @@ impl Chain {
|
|||
gen_block: Option<Block>,
|
||||
pow_verifier: fn(&BlockHeader, u32) -> bool,
|
||||
) -> Result<Chain, Error> {
|
||||
let chain_store = store::ChainKVStore::new(db_root)?;
|
||||
let chain_store = store::ChainKVStore::new(db_root.clone())?;
|
||||
|
||||
// check if we have a head in store, otherwise the genesis block is it
|
||||
let head = match chain_store.head() {
|
||||
|
@ -87,6 +88,7 @@ impl Chain {
|
|||
|
||||
let gen = gen_block.unwrap();
|
||||
chain_store.save_block(&gen)?;
|
||||
chain_store.setup_height(&gen.header)?;
|
||||
|
||||
// saving a new tip based on genesis
|
||||
let tip = Tip::new(gen.hash());
|
||||
|
@ -97,16 +99,15 @@ impl Chain {
|
|||
Err(e) => return Err(Error::StoreErr(e)),
|
||||
};
|
||||
|
||||
// TODO - confirm this was safe to remove based on code above?
|
||||
// let head = chain_store.head()?;
|
||||
|
||||
let store = Arc::new(chain_store);
|
||||
let sumtrees = sumtree::SumTrees::open(db_root, store.clone())?;
|
||||
|
||||
Ok(Chain {
|
||||
store: Arc::new(chain_store),
|
||||
store: store,
|
||||
adapter: adapter,
|
||||
head: Arc::new(Mutex::new(head)),
|
||||
block_process_lock: Arc::new(Mutex::new(true)),
|
||||
orphans: Arc::new(Mutex::new(VecDeque::with_capacity(MAX_ORPHANS + 1))),
|
||||
sumtrees: Arc::new(RwLock::new(sumtrees)),
|
||||
pow_verifier: pow_verifier,
|
||||
})
|
||||
}
|
||||
|
@ -128,15 +129,17 @@ impl Chain {
|
|||
let mut head = chain_head.lock().unwrap();
|
||||
*head = tip.clone();
|
||||
}
|
||||
|
||||
self.check_orphans();
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(Error::Orphan) => {
|
||||
let mut orphans = self.orphans.lock().unwrap();
|
||||
orphans.push_front((opts, b));
|
||||
orphans.truncate(MAX_ORPHANS);
|
||||
}
|
||||
_ => {}
|
||||
Err(ref e) => {
|
||||
info!("Rejected block {} at {} : {:?}", b.hash(), b.header.height, e);
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
|
@ -171,7 +174,7 @@ impl Chain {
|
|||
adapter: self.adapter.clone(),
|
||||
head: head,
|
||||
pow_verifier: self.pow_verifier,
|
||||
lock: self.block_process_lock.clone(),
|
||||
sumtrees: self.sumtrees.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -198,37 +201,36 @@ impl Chain {
|
|||
}
|
||||
}
|
||||
|
||||
/// Gets an unspent output from its commitment.
|
||||
/// Will return an Error if the output doesn't exist or has been spent.
|
||||
/// This querying is done in a way that's
|
||||
/// consistent with the current chain state and more specifically the
|
||||
/// current
|
||||
/// branch it is on in case of forks.
|
||||
/// Gets an unspent output from its commitment. With return None if the
|
||||
/// output doesn't exist or has been spent. This querying is done in a
|
||||
/// way that's consistent with the current chain state and more
|
||||
/// specifically the current winning fork.
|
||||
pub fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, Error> {
|
||||
// TODO use an actual UTXO tree
|
||||
// in the meantime doing it the *very* expensive way:
|
||||
// 1. check the output exists
|
||||
// 2. run the chain back from the head to check it hasn't been spent
|
||||
if let Ok(out) = self.store.get_output_by_commit(output_ref) {
|
||||
if let Ok(head) = self.store.head() {
|
||||
let mut block_h = head.last_block_h;
|
||||
loop {
|
||||
if let Ok(b) = self.store.get_block(&block_h) {
|
||||
for input in b.inputs {
|
||||
if input.commitment() == *output_ref {
|
||||
return Err(Error::OutputSpent);
|
||||
}
|
||||
}
|
||||
if b.header.height == 1 {
|
||||
return Ok(out);
|
||||
} else {
|
||||
block_h = b.header.previous;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let sumtrees = self.sumtrees.read().unwrap();
|
||||
let is_unspent = sumtrees.is_unspent(output_ref)?;
|
||||
if is_unspent {
|
||||
self.store.get_output_by_commit(output_ref).map_err(&Error::StoreErr)
|
||||
} else {
|
||||
Err(Error::OutputNotFound)
|
||||
}
|
||||
Err(Error::OutputNotFound)
|
||||
}
|
||||
|
||||
/// Sets the sumtree roots on a brand new block by applying the block on the
|
||||
/// current sumtree state.
|
||||
pub fn set_sumtree_roots(&self, b: &mut Block) -> Result<(), Error> {
|
||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||
|
||||
let roots = sumtree::extending(&mut sumtrees, |mut extension| {
|
||||
// apply the block on the sumtrees and check the resulting root
|
||||
extension.apply_block(b)?;
|
||||
extension.force_rollback();
|
||||
Ok(extension.roots())
|
||||
})?;
|
||||
|
||||
b.header.utxo_root = roots.0.hash;
|
||||
b.header.range_proof_root = roots.1.hash;
|
||||
b.header.kernel_root = roots.2.hash;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Total difficulty at the head of the chain
|
||||
|
|
|
@ -37,6 +37,7 @@ extern crate secp256k1zkp as secp;
|
|||
mod chain;
|
||||
pub mod pipe;
|
||||
pub mod store;
|
||||
pub mod sumtree;
|
||||
pub mod types;
|
||||
|
||||
// Re-export the base interface
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
||||
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use secp;
|
||||
use time;
|
||||
|
@ -25,6 +25,7 @@ use core::core::{BlockHeader, Block};
|
|||
use core::core::transaction;
|
||||
use types::*;
|
||||
use store;
|
||||
use sumtree;
|
||||
use core::global;
|
||||
|
||||
/// Contextual information required to process a new block and either reject or
|
||||
|
@ -40,8 +41,8 @@ pub struct BlockContext {
|
|||
pub head: Tip,
|
||||
/// The POW verification function
|
||||
pub pow_verifier: fn(&BlockHeader, u32) -> bool,
|
||||
/// The lock
|
||||
pub lock: Arc<Mutex<bool>>,
|
||||
/// MMR sum tree states
|
||||
pub sumtrees: Arc<RwLock<sumtree::SumTrees>>,
|
||||
}
|
||||
|
||||
/// Runs the block processing pipeline, including validation and finding a
|
||||
|
@ -65,16 +66,26 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
|
|||
validate_header(&b.header, &mut ctx)?;
|
||||
}
|
||||
|
||||
validate_block(b, &mut ctx)?;
|
||||
debug!(
|
||||
"Block at {} with hash {} is valid, going to save and append.",
|
||||
b.header.height,
|
||||
b.hash()
|
||||
);
|
||||
// take the lock on the sum trees and start a chain extension unit of work
|
||||
// dependent on the success of the internal validation and saving operations
|
||||
let local_sumtrees = ctx.sumtrees.clone();
|
||||
let mut sumtrees = local_sumtrees.write().unwrap();
|
||||
sumtree::extending(&mut sumtrees, |mut extension| {
|
||||
|
||||
let _ = ctx.lock.lock().unwrap();
|
||||
add_block(b, &mut ctx)?;
|
||||
update_head(b, &mut ctx)
|
||||
validate_block(b, &mut ctx, &mut extension)?;
|
||||
debug!(
|
||||
"Block at {} with hash {} is valid, going to save and append.",
|
||||
b.header.height,
|
||||
b.hash()
|
||||
);
|
||||
|
||||
add_block(b, &mut ctx)?;
|
||||
let h = update_head(b, &mut ctx)?;
|
||||
if h.is_none() {
|
||||
extension.force_rollback();
|
||||
}
|
||||
Ok(h)
|
||||
})
|
||||
}
|
||||
|
||||
/// Process the block header
|
||||
|
@ -89,7 +100,9 @@ pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<O
|
|||
validate_header(&bh, &mut ctx)?;
|
||||
add_block_header(bh, &mut ctx)?;
|
||||
|
||||
let _ = ctx.lock.lock().unwrap();
|
||||
// just taking the shared lock
|
||||
let _ = ctx.sumtrees.write().unwrap();
|
||||
|
||||
update_header_head(bh, &mut ctx)
|
||||
}
|
||||
|
||||
|
@ -169,13 +182,14 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
}
|
||||
|
||||
/// Fully validate the block content.
|
||||
fn validate_block(block: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||
if block.header.height > ctx.head.height + 1 {
|
||||
fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extension) -> Result<(), Error> {
|
||||
if b.header.height > ctx.head.height + 1 {
|
||||
return Err(Error::Orphan);
|
||||
}
|
||||
|
||||
|
||||
// main isolated block validation, checks all commitment sums and sigs
|
||||
let curve = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
try!(block.validate(&curve).map_err(&Error::InvalidBlockProof));
|
||||
try!(b.validate(&curve).map_err(&Error::InvalidBlockProof));
|
||||
|
||||
// check that all the outputs of the block are "new" -
|
||||
// that they do not clobber any existing unspent outputs (by their commitment)
|
||||
|
@ -187,16 +201,61 @@ fn validate_block(block: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
|
|||
// };
|
||||
|
||||
|
||||
// TODO check every input exists as a UTXO using the UTXO index
|
||||
// apply the new block to the MMR trees and check the new root hashes
|
||||
if b.header.previous == ctx.head.last_block_h {
|
||||
// standard head extension
|
||||
ext.apply_block(b)?;
|
||||
} else {
|
||||
|
||||
// extending a fork, first identify the block where forking occurred
|
||||
// keeping the hashes of blocks along the fork
|
||||
let mut current = b.header.previous;
|
||||
let mut hashes = vec![];
|
||||
loop {
|
||||
let curr_header = ctx.store.get_block_header(¤t)?;
|
||||
let height_header = ctx.store.get_header_by_height(curr_header.height)?;
|
||||
if curr_header.hash() != height_header.hash() {
|
||||
hashes.insert(0, curr_header.hash());
|
||||
current = curr_header.previous;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// rewind the sum trees up the forking block, providing the height of the
|
||||
// forked block and the last commitment we want to rewind to
|
||||
let forked_block = ctx.store.get_block(¤t)?;
|
||||
if forked_block.header.height > 0 {
|
||||
let last_output = &forked_block.outputs[forked_block.outputs.len() - 1];
|
||||
let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1];
|
||||
ext.rewind(forked_block.header.height, last_output, last_kernel)?;
|
||||
}
|
||||
|
||||
// apply all forked blocks, including this new one
|
||||
for h in hashes {
|
||||
let fb = ctx.store.get_block(&h)?;
|
||||
ext.apply_block(&fb)?;
|
||||
}
|
||||
ext.apply_block(&b)?;
|
||||
}
|
||||
|
||||
let (utxo_root, rproof_root, kernel_root) = ext.roots();
|
||||
if utxo_root.hash != b.header.utxo_root ||
|
||||
rproof_root.hash != b.header.range_proof_root ||
|
||||
kernel_root.hash != b.header.kernel_root {
|
||||
|
||||
ext.dump();
|
||||
return Err(Error::InvalidRoot);
|
||||
}
|
||||
|
||||
// check that any coinbase outputs are spendable (that they have matured sufficiently)
|
||||
for input in &block.inputs {
|
||||
for input in &b.inputs {
|
||||
if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) {
|
||||
if output.features.contains(transaction::COINBASE_OUTPUT) {
|
||||
if let Ok(output_header) = ctx.store.get_block_header_by_output_commit(&input.commitment()) {
|
||||
|
||||
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction validation rule
|
||||
if block.header.height <= output_header.height + consensus::COINBASE_MATURITY {
|
||||
if b.header.height <= output_header.height + consensus::COINBASE_MATURITY {
|
||||
return Err(Error::ImmatureCoinbase);
|
||||
}
|
||||
};
|
||||
|
@ -243,6 +302,7 @@ fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error>
|
|||
} else {
|
||||
ctx.store.save_head(&tip).map_err(&Error::StoreErr)?;
|
||||
}
|
||||
// TODO if we're switching branch, make sure to backtrack the sum trees
|
||||
|
||||
ctx.head = tip.clone();
|
||||
info!("Updated head to {} at {}.", b.hash(), b.header.height);
|
||||
|
|
|
@ -34,6 +34,8 @@ const HEADER_HEAD_PREFIX: u8 = 'I' as u8;
|
|||
const HEADER_HEIGHT_PREFIX: u8 = '8' as u8;
|
||||
const OUTPUT_COMMIT_PREFIX: u8 = 'o' as u8;
|
||||
const HEADER_BY_OUTPUT_PREFIX: u8 = 'p' as u8;
|
||||
const COMMIT_POS_PREFIX: u8 = 'c' as u8;
|
||||
const KERNEL_POS_PREFIX: u8 = 'k' as u8;
|
||||
|
||||
/// An implementation of the ChainStore trait backed by a simple key-value
|
||||
/// store.
|
||||
|
@ -151,23 +153,30 @@ impl ChainStore for ChainKVStore {
|
|||
)))
|
||||
}
|
||||
|
||||
// TODO - this looks identical to get_output_by_commit above
|
||||
// TODO - are we sure this returns a hash correctly?
|
||||
fn has_output_commit(&self, commit: &Commitment) -> Result<Hash, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(
|
||||
OUTPUT_COMMIT_PREFIX,
|
||||
&mut commit.as_ref().to_vec(),
|
||||
)))
|
||||
fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), Error> {
|
||||
self.db.put_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())[..], &pos)
|
||||
}
|
||||
|
||||
fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())))
|
||||
}
|
||||
|
||||
fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> {
|
||||
self.db.put_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], &pos)
|
||||
}
|
||||
|
||||
fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())))
|
||||
}
|
||||
|
||||
/// Maintain consistency of the "header_by_height" index by traversing back through the
|
||||
/// current chain and updating "header_by_height" until we reach a block_header
|
||||
/// that is consistent with its height (everything prior to this will be consistent)
|
||||
fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> {
|
||||
self.db.put_ser(
|
||||
&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height),
|
||||
bh,
|
||||
)?;
|
||||
self.db.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), bh)?;
|
||||
if bh.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut prev_h = bh.previous;
|
||||
let mut prev_height = bh.height - 1;
|
||||
|
|
283
chain/src/sumtree.rs
Normal file
283
chain/src/sumtree.rs
Normal file
|
@ -0,0 +1,283 @@
|
|||
// Copyright 2016 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Utility structs to handle the 3 sumtrees (utxo, range proof, kernel) more
|
||||
//! conveniently and transactionally.
|
||||
|
||||
use std::fs;
|
||||
use std::collections::HashMap;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use secp;
|
||||
use secp::pedersen::{RangeProof, Commitment};
|
||||
|
||||
use core::core::{Block, TxKernel, Output, SumCommit};
|
||||
use core::core::pmmr::{Summable, NoSum, PMMR, HashSum, Backend};
|
||||
use grin_store;
|
||||
use grin_store::sumtree::PMMRBackend;
|
||||
use types::ChainStore;
|
||||
use types::Error;
|
||||
|
||||
const SUMTREES_SUBDIR: &'static str = "sumtrees";
|
||||
const UTXO_SUBDIR: &'static str = "utxo";
|
||||
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
|
||||
const KERNEL_SUBDIR: &'static str = "kernel";
|
||||
|
||||
struct PMMRHandle<T> where T: Summable + Clone {
|
||||
backend: PMMRBackend<T>,
|
||||
last_pos: u64,
|
||||
}
|
||||
|
||||
impl<T> PMMRHandle<T> where T: Summable + Clone {
|
||||
fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> {
|
||||
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
|
||||
fs::create_dir_all(path.clone())?;
|
||||
let be = PMMRBackend::new(path.to_str().unwrap().to_string())?;
|
||||
let sz = be.unpruned_size()?;
|
||||
Ok(PMMRHandle {
|
||||
backend: be,
|
||||
last_pos: sz,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// An easy to manipulate structure holding the 3 sum trees necessary to
|
||||
/// validate blocks and capturing the UTXO set, the range proofs and the
|
||||
/// kernels. Also handles the index of Commitments to positions in the
|
||||
/// output and range proof sum trees.
|
||||
///
|
||||
/// Note that the index is never authoritative, only the trees are
|
||||
/// guaranteed to indicate whether an output is spent or not. The index
|
||||
/// may have commitments that have already been spent, even with
|
||||
/// pruning enabled.
|
||||
pub struct SumTrees {
|
||||
output_pmmr_h: PMMRHandle<SumCommit>,
|
||||
rproof_pmmr_h: PMMRHandle<NoSum<RangeProof>>,
|
||||
kernel_pmmr_h: PMMRHandle<NoSum<TxKernel>>,
|
||||
|
||||
// chain store used as index of commitments to MMR positions
|
||||
commit_index: Arc<ChainStore>,
|
||||
}
|
||||
|
||||
impl SumTrees {
|
||||
/// Open an existing or new set of backends for the SumTrees
|
||||
pub fn open(root_dir: String, commit_index: Arc<ChainStore>) -> Result<SumTrees, Error> {
|
||||
Ok(SumTrees {
|
||||
output_pmmr_h: PMMRHandle::new(root_dir.clone(), UTXO_SUBDIR)?,
|
||||
rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR)?,
|
||||
kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR)?,
|
||||
commit_index: commit_index,
|
||||
})
|
||||
}
|
||||
|
||||
/// Wether a given commitment exists in the Output MMR and it's unspent
|
||||
pub fn is_unspent(&self, commit: &Commitment) -> Result<bool, Error> {
|
||||
let rpos = self.commit_index.get_output_pos(commit);
|
||||
match rpos {
|
||||
Ok(pos) => Ok(self.output_pmmr_h.backend.get(pos).is_some()),
|
||||
Err(grin_store::Error::NotFoundErr) => Ok(false),
|
||||
Err(e) => Err(Error::StoreErr(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts a new unit of work to extend the chain with additional blocks,
|
||||
/// accepting a closure that will work within that unit of work. The closure
|
||||
/// has access to an Extension object that allows the addition of blocks to
|
||||
/// the sumtrees and the checking of the current tree roots.
|
||||
///
|
||||
/// If the closure returns an error, modifications are canceled and the unit
|
||||
/// of work is abandoned. Otherwise, the unit of work is permanently applied.
|
||||
pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result<T, Error>
|
||||
where F: FnOnce(&mut Extension) -> Result<T, Error> {
|
||||
|
||||
let sizes: (u64, u64, u64);
|
||||
let res: Result<T, Error>;
|
||||
let rollback: bool;
|
||||
{
|
||||
let commit_index = trees.commit_index.clone();
|
||||
let mut extension = Extension::new(trees, commit_index);
|
||||
res = inner(&mut extension);
|
||||
rollback = extension.rollback;
|
||||
if res.is_ok() && !rollback {
|
||||
extension.save_pos_index()?;
|
||||
}
|
||||
sizes = extension.sizes();
|
||||
}
|
||||
match res {
|
||||
Err(e) => {
|
||||
trees.output_pmmr_h.backend.discard();
|
||||
trees.rproof_pmmr_h.backend.discard();
|
||||
trees.kernel_pmmr_h.backend.discard();
|
||||
Err(e)
|
||||
}
|
||||
Ok(r) => {
|
||||
if rollback {
|
||||
trees.output_pmmr_h.backend.discard();
|
||||
trees.rproof_pmmr_h.backend.discard();
|
||||
trees.kernel_pmmr_h.backend.discard();
|
||||
} else {
|
||||
trees.output_pmmr_h.backend.sync()?;
|
||||
trees.rproof_pmmr_h.backend.sync()?;
|
||||
trees.kernel_pmmr_h.backend.sync()?;
|
||||
trees.output_pmmr_h.last_pos = sizes.0;
|
||||
trees.rproof_pmmr_h.last_pos = sizes.1;
|
||||
trees.kernel_pmmr_h.last_pos = sizes.2;
|
||||
}
|
||||
|
||||
Ok(r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Allows the application of new blocks on top of the sum trees in a
|
||||
/// reversible manner within a unit of work provided by the `extending`
|
||||
/// function.
|
||||
pub struct Extension<'a> {
|
||||
output_pmmr: PMMR<'a, SumCommit, PMMRBackend<SumCommit>>,
|
||||
rproof_pmmr: PMMR<'a, NoSum<RangeProof>, PMMRBackend<NoSum<RangeProof>>>,
|
||||
kernel_pmmr: PMMR<'a, NoSum<TxKernel>, PMMRBackend<NoSum<TxKernel>>>,
|
||||
|
||||
commit_index: Arc<ChainStore>,
|
||||
new_output_commits: HashMap<Commitment, u64>,
|
||||
new_kernel_excesses: HashMap<Commitment, u64>,
|
||||
rollback: bool
|
||||
}
|
||||
|
||||
impl<'a> Extension<'a> {
|
||||
|
||||
// constructor
|
||||
fn new(trees: &'a mut SumTrees, commit_index: Arc<ChainStore>) -> Extension<'a> {
|
||||
Extension {
|
||||
output_pmmr: PMMR::at(&mut trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos),
|
||||
rproof_pmmr: PMMR::at(&mut trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.last_pos),
|
||||
kernel_pmmr: PMMR::at(&mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos),
|
||||
commit_index: commit_index,
|
||||
new_output_commits: HashMap::new(),
|
||||
new_kernel_excesses: HashMap::new(),
|
||||
rollback: false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply a new set of blocks on top the existing sum trees. Blocks are
|
||||
/// applied in order of the provided Vec. If pruning is enabled, inputs also
|
||||
/// prune MMR data.
|
||||
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
|
||||
// doing inputs first guarantees an input can't spend an output in the
|
||||
// same block, enforcing block cut-through
|
||||
for input in &b.inputs {
|
||||
let pos_res = self.commit_index.get_output_pos(&input.commitment());
|
||||
if let Ok(pos) = pos_res {
|
||||
match self.output_pmmr.prune(pos, b.header.height as u32) {
|
||||
Ok(true) => {
|
||||
self.rproof_pmmr.prune(pos, b.header.height as u32)
|
||||
.map_err(|s| Error::SumTreeErr(s))?;
|
||||
},
|
||||
Ok(false) => return Err(Error::AlreadySpent),
|
||||
Err(s) => return Err(Error::SumTreeErr(s)),
|
||||
}
|
||||
} else {
|
||||
return Err(Error::SumTreeErr(format!("Missing index for {:?}", input.commitment())));
|
||||
}
|
||||
}
|
||||
|
||||
for out in &b.outputs {
|
||||
if let Ok(_) = self.commit_index.get_output_pos(&out.commitment()) {
|
||||
return Err(Error::DuplicateCommitment(out.commitment()));
|
||||
}
|
||||
// push new outputs commitments in their MMR and save them in the index
|
||||
let pos = self.output_pmmr.push(SumCommit {
|
||||
commit: out.commitment(),
|
||||
secp: secp.clone(),
|
||||
}).map_err(&Error::SumTreeErr)?;
|
||||
|
||||
self.new_output_commits.insert(out.commitment(), pos);
|
||||
|
||||
// push range proofs in their MMR
|
||||
self.rproof_pmmr.push(NoSum(out.proof)).map_err(&Error::SumTreeErr)?;
|
||||
}
|
||||
|
||||
for kernel in &b.kernels {
|
||||
if let Ok(_) = self.commit_index.get_kernel_pos(&kernel.excess) {
|
||||
return Err(Error::DuplicateKernel(kernel.excess.clone()));
|
||||
}
|
||||
// push kernels in their MMR
|
||||
let pos = self.kernel_pmmr.push(NoSum(kernel.clone())).map_err(&Error::SumTreeErr)?;
|
||||
self.new_kernel_excesses.insert(kernel.excess, pos);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn save_pos_index(&self) -> Result<(), Error> {
|
||||
for (commit, pos) in &self.new_output_commits {
|
||||
self.commit_index.save_output_pos(commit, *pos)?;
|
||||
}
|
||||
for (excess, pos) in &self.new_kernel_excesses {
|
||||
self.commit_index.save_kernel_pos(excess, *pos)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Rewinds the MMRs to the provided position, given the last output and
|
||||
/// last kernel of the block we want to rewind to.
|
||||
pub fn rewind(&mut self, height: u64, output: &Output, kernel: &TxKernel) -> Result<(), Error> {
|
||||
let out_pos_rew = self.commit_index.get_output_pos(&output.commitment())?;
|
||||
let kern_pos_rew = self.commit_index.get_kernel_pos(&kernel.excess)?;
|
||||
|
||||
self.output_pmmr.rewind(out_pos_rew, height as u32).map_err(&Error::SumTreeErr)?;
|
||||
self.rproof_pmmr.rewind(out_pos_rew, height as u32).map_err(&Error::SumTreeErr)?;
|
||||
self.kernel_pmmr.rewind(kern_pos_rew, height as u32).map_err(&Error::SumTreeErr)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Current root hashes and sums (if applicable) for the UTXO, range proof
|
||||
/// and kernel sum trees.
|
||||
pub fn roots(&self) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
|
||||
(self.output_pmmr.root(), self.rproof_pmmr.root(), self.kernel_pmmr.root())
|
||||
}
|
||||
|
||||
/// Force the rollback of this extension, no matter the result
|
||||
pub fn force_rollback(&mut self) {
|
||||
self.rollback = true;
|
||||
}
|
||||
|
||||
// Sizes of the sum trees, used by `extending` on rollback.
|
||||
fn sizes(&self) -> (u64, u64, u64) {
|
||||
(self.output_pmmr.unpruned_size(), self.rproof_pmmr.unpruned_size(), self.kernel_pmmr.unpruned_size())
|
||||
}
|
||||
|
||||
/// Debugging utility to print information about the MMRs.
|
||||
pub fn dump(&self) {
|
||||
let sz = self.output_pmmr.unpruned_size();
|
||||
if sz > 25 {
|
||||
return;
|
||||
}
|
||||
println!("UXTO set, size: {}", sz);
|
||||
for n in 0..sz {
|
||||
print!("{:>8} ", n + 1);
|
||||
}
|
||||
println!("");
|
||||
for n in 1..(sz+1) {
|
||||
let ohs = self.output_pmmr.get(n);
|
||||
match ohs {
|
||||
Some(hs) => print!("{} ", hs.hash),
|
||||
None => print!("??"),
|
||||
}
|
||||
}
|
||||
println!("");
|
||||
}
|
||||
}
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
//! Base types that the block chain pipeline requires.
|
||||
|
||||
use std::io;
|
||||
|
||||
use secp;
|
||||
use secp::pedersen::Commitment;
|
||||
|
||||
|
@ -57,6 +59,14 @@ pub enum Error {
|
|||
InvalidBlockTime,
|
||||
/// Block height is invalid (not previous + 1)
|
||||
InvalidBlockHeight,
|
||||
/// One of the root hashes in the block is invalid
|
||||
InvalidRoot,
|
||||
/// One of the inputs in the block has already been spent
|
||||
AlreadySpent,
|
||||
/// An output with that commitment already exists (should be unique)
|
||||
DuplicateCommitment(Commitment),
|
||||
/// A kernel with that excess commitment already exists (should be unique)
|
||||
DuplicateKernel(Commitment),
|
||||
/// coinbase can only be spent after it has matured (n blocks)
|
||||
ImmatureCoinbase,
|
||||
/// output not found
|
||||
|
@ -67,6 +77,8 @@ pub enum Error {
|
|||
StoreErr(grin_store::Error),
|
||||
/// Error serializing or deserializing a type
|
||||
SerErr(ser::Error),
|
||||
/// Error while updating the sum trees
|
||||
SumTreeErr(String),
|
||||
/// No chain exists and genesis block is required
|
||||
GenesisBlockRequired,
|
||||
/// Anything else
|
||||
|
@ -83,6 +95,11 @@ impl From<ser::Error> for Error {
|
|||
Error::SerErr(e)
|
||||
}
|
||||
}
|
||||
impl From<io::Error> for Error {
|
||||
fn from(e: io::Error) -> Error {
|
||||
Error::SumTreeErr(e.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// The tip of a fork. A handle to the fork ancestry from its leaf in the
|
||||
/// blockchain tree. References the max height and the latest and previous
|
||||
|
@ -190,16 +207,28 @@ pub trait ChainStore: Send + Sync {
|
|||
/// Gets an output by its commitment
|
||||
fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, store::Error>;
|
||||
|
||||
/// Checks whether an output commitment exists and returns the output hash
|
||||
fn has_output_commit(&self, commit: &Commitment) -> Result<Hash, store::Error>;
|
||||
/// Gets a block_header for the given input commit
|
||||
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, store::Error>;
|
||||
|
||||
/// Gets a block_header for the given input commit
|
||||
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, store::Error>;
|
||||
/// Saves the position of an output, represented by its commitment, in the
|
||||
/// UTXO MMR. Used as an index for spending and pruning.
|
||||
fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), store::Error>;
|
||||
|
||||
/// Gets the position of an output, represented by its commitment, in the
|
||||
/// UTXO MMR. Used as an index for spending and pruning.
|
||||
fn get_output_pos(&self, commit: &Commitment) -> Result<u64, store::Error>;
|
||||
|
||||
/// Saves the position of a kernel, represented by its excess, in the
|
||||
/// UTXO MMR. Used as an index for spending and pruning.
|
||||
fn save_kernel_pos(&self, commit: &Commitment, pos: u64) -> Result<(), store::Error>;
|
||||
|
||||
/// Gets the position of a kernel, represented by its excess, in the
|
||||
/// UTXO MMR. Used as an index for spending and pruning.
|
||||
fn get_kernel_pos(&self, commit: &Commitment) -> Result<u64, store::Error>;
|
||||
|
||||
/// Saves the provided block header at the corresponding height. Also check
|
||||
/// the consistency of the height chain in store by assuring previous
|
||||
/// headers
|
||||
/// are also at their respective heights.
|
||||
/// headers are also at their respective heights.
|
||||
fn setup_height(&self, bh: &BlockHeader) -> Result<(), store::Error>;
|
||||
}
|
||||
|
||||
|
|
|
@ -22,10 +22,11 @@ extern crate grin_pow as pow;
|
|||
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
use rand::os::OsRng;
|
||||
|
||||
use chain::Chain;
|
||||
use chain::types::*;
|
||||
use core::core::{Block, BlockHeader};
|
||||
use core::core::hash::Hashed;
|
||||
use core::core::target::Difficulty;
|
||||
use core::consensus;
|
||||
|
@ -35,26 +36,28 @@ use core::global::MiningParameterMode;
|
|||
use pow::{types, cuckoo, MiningWorker};
|
||||
|
||||
fn clean_output_dir(dir_name:&str){
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
}
|
||||
|
||||
fn setup(dir_name: &str) -> Chain {
|
||||
let _ = env_logger::init();
|
||||
clean_output_dir(dir_name);
|
||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||
let mut genesis_block = None;
|
||||
if !chain::Chain::chain_exists(dir_name.to_string()){
|
||||
genesis_block=pow::mine_genesis_block(None);
|
||||
}
|
||||
chain::Chain::init(dir_name.to_string(), Arc::new(NoopAdapter {}),
|
||||
genesis_block, pow::verify_size).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mine_empty_chain() {
|
||||
let _ = env_logger::init();
|
||||
clean_output_dir(".grin");
|
||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||
|
||||
let mut rng = OsRng::new().unwrap();
|
||||
let mut genesis_block = None;
|
||||
if !chain::Chain::chain_exists(".grin".to_string()){
|
||||
genesis_block=pow::mine_genesis_block(None);
|
||||
}
|
||||
let chain = chain::Chain::init(".grin".to_string(), Arc::new(NoopAdapter {}),
|
||||
genesis_block, pow::verify_size).unwrap();
|
||||
let chain = setup(".grin");
|
||||
|
||||
// mine and add a few blocks
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
|
||||
let mut miner_config = types::MinerConfig {
|
||||
enable_mining: true,
|
||||
|
@ -63,21 +66,24 @@ fn mine_empty_chain() {
|
|||
};
|
||||
miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps"));
|
||||
|
||||
let mut cuckoo_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize());
|
||||
let mut cuckoo_miner = cuckoo::Miner::new(
|
||||
consensus::EASINESS, global::sizeshift() as u32, global::proofsize());
|
||||
for n in 1..4 {
|
||||
let prev = chain.head_header().unwrap();
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let mut b = core::core::Block::new(&prev, vec![], reward_key).unwrap();
|
||||
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
b.header.difficulty = difficulty.clone();
|
||||
chain.set_sumtree_roots(&mut b).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
&mut b.header,
|
||||
difficulty,
|
||||
global::sizeshift() as u32,
|
||||
).unwrap();
|
||||
).unwrap();
|
||||
|
||||
let bhash = b.hash();
|
||||
chain.process_block(b, chain::EASY_POW).unwrap();
|
||||
|
@ -87,73 +93,156 @@ fn mine_empty_chain() {
|
|||
assert_eq!(head.height, n);
|
||||
assert_eq!(head.last_block_h, bhash);
|
||||
|
||||
// now check the block_header of the head
|
||||
let header = chain.head_header().unwrap();
|
||||
assert_eq!(header.height, n);
|
||||
assert_eq!(header.hash(), bhash);
|
||||
// now check the block_header of the head
|
||||
let header = chain.head_header().unwrap();
|
||||
assert_eq!(header.height, n);
|
||||
assert_eq!(header.hash(), bhash);
|
||||
|
||||
// now check the block itself
|
||||
let block = chain.get_block(&header.hash()).unwrap();
|
||||
assert_eq!(block.header.height, n);
|
||||
assert_eq!(block.hash(), bhash);
|
||||
assert_eq!(block.outputs.len(), 1);
|
||||
// now check the block itself
|
||||
let block = chain.get_block(&header.hash()).unwrap();
|
||||
assert_eq!(block.header.height, n);
|
||||
assert_eq!(block.hash(), bhash);
|
||||
assert_eq!(block.outputs.len(), 1);
|
||||
|
||||
// now check the block height index
|
||||
let header_by_height = chain.get_header_by_height(n).unwrap();
|
||||
assert_eq!(header_by_height.hash(), bhash);
|
||||
// now check the block height index
|
||||
let header_by_height = chain.get_header_by_height(n).unwrap();
|
||||
assert_eq!(header_by_height.hash(), bhash);
|
||||
|
||||
// now check the header output index
|
||||
let output = block.outputs[0];
|
||||
let header_by_output_commit = chain.get_block_header_by_output_commit(&output.commitment()).unwrap();
|
||||
assert_eq!(header_by_output_commit.hash(), bhash);
|
||||
// now check the header output index
|
||||
let output = block.outputs[0];
|
||||
let header_by_output_commit = chain.
|
||||
get_block_header_by_output_commit(&output.commitment()).unwrap();
|
||||
assert_eq!(header_by_output_commit.hash(), bhash);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mine_forks() {
|
||||
let _ = env_logger::init();
|
||||
clean_output_dir(".grin2");
|
||||
let chain = setup(".grin2");
|
||||
|
||||
let mut rng = OsRng::new().unwrap();
|
||||
|
||||
let mut genesis_block = None;
|
||||
if !chain::Chain::chain_exists(".grin2".to_string()){
|
||||
genesis_block=pow::mine_genesis_block(None);
|
||||
}
|
||||
let chain = chain::Chain::init(".grin2".to_string(), Arc::new(NoopAdapter {}),
|
||||
genesis_block, pow::verify_size).unwrap();
|
||||
// add a first block to not fork genesis
|
||||
let prev = chain.head_header().unwrap();
|
||||
let b = prepare_block(&prev, &chain, 2);
|
||||
chain.process_block(b, chain::SKIP_POW).unwrap();
|
||||
|
||||
// mine and add a few blocks
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
|
||||
for n in 1..4 {
|
||||
// first block for one branch
|
||||
let prev = chain.head_header().unwrap();
|
||||
let mut b = core::core::Block::new(&prev, vec![], reward_key).unwrap();
|
||||
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
b.header.total_difficulty = Difficulty::from_num(2 * n);
|
||||
let bhash = b.hash();
|
||||
chain.process_block(b, chain::SKIP_POW).unwrap();
|
||||
let b1 = prepare_block(&prev, &chain, 3 * n);
|
||||
|
||||
// 2nd block with higher difficulty for other branch
|
||||
let b2 = prepare_block(&prev, &chain, 3 * n + 1);
|
||||
|
||||
// process the first block to extend the chain
|
||||
let bhash = b1.hash();
|
||||
chain.process_block(b1, chain::SKIP_POW).unwrap();
|
||||
|
||||
// checking our new head
|
||||
thread::sleep(::std::time::Duration::from_millis(50));
|
||||
let head = chain.head().unwrap();
|
||||
assert_eq!(head.height, n as u64);
|
||||
assert_eq!(head.height, (n+1) as u64);
|
||||
assert_eq!(head.last_block_h, bhash);
|
||||
assert_eq!(head.prev_block_h, prev.hash());
|
||||
|
||||
// build another block with higher difficulty
|
||||
let mut b = core::core::Block::new(&prev, vec![], reward_key).unwrap();
|
||||
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
b.header.total_difficulty = Difficulty::from_num(2 * n + 1);
|
||||
let bhash = b.hash();
|
||||
chain.process_block(b, chain::SKIP_POW).unwrap();
|
||||
// process the 2nd block to build a fork with more work
|
||||
let bhash = b2.hash();
|
||||
chain.process_block(b2, chain::SKIP_POW).unwrap();
|
||||
|
||||
// checking head switch
|
||||
thread::sleep(::std::time::Duration::from_millis(50));
|
||||
let head = chain.head().unwrap();
|
||||
assert_eq!(head.height, n as u64);
|
||||
assert_eq!(head.height, (n+1) as u64);
|
||||
assert_eq!(head.last_block_h, bhash);
|
||||
assert_eq!(head.prev_block_h, prev.hash());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn mine_losing_fork() {
|
||||
let chain = setup(".grin3");
|
||||
|
||||
// add a first block we'll be forking from
|
||||
let prev = chain.head_header().unwrap();
|
||||
let b1 = prepare_block(&prev, &chain, 2);
|
||||
let b1head = b1.header.clone();
|
||||
chain.process_block(b1, chain::SKIP_POW).unwrap();
|
||||
|
||||
// prepare the 2 successor, sibling blocks, one with lower diff
|
||||
let b2 = prepare_block(&b1head, &chain, 4);
|
||||
let b2head = b2.header.clone();
|
||||
let bfork = prepare_block(&b1head, &chain, 3);
|
||||
|
||||
// add higher difficulty first, prepare its successor, then fork
|
||||
// with lower diff
|
||||
chain.process_block(b2, chain::SKIP_POW).unwrap();
|
||||
assert_eq!(chain.head_header().unwrap().hash(), b2head.hash());
|
||||
let b3 = prepare_block(&b2head, &chain, 5);
|
||||
chain.process_block(bfork, chain::SKIP_POW).unwrap();
|
||||
|
||||
// adding the successor
|
||||
let b3head = b3.header.clone();
|
||||
chain.process_block(b3, chain::SKIP_POW).unwrap();
|
||||
assert_eq!(chain.head_header().unwrap().hash(), b3head.hash());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn longer_fork() {
|
||||
// to make it easier to compute the sumtree roots in the test, we
|
||||
// prepare 2 chains, the 2nd will be have the forked blocks we can
|
||||
// then send back on the 1st
|
||||
let chain = setup(".grin4");
|
||||
let chain_fork = setup(".grin5");
|
||||
|
||||
// add blocks to both chains, 20 on the main one, only the first 5
|
||||
// for the forked chain
|
||||
let mut prev = chain.head_header().unwrap();
|
||||
let forking_header: BlockHeader;
|
||||
for n in 0..10 {
|
||||
let b = prepare_block(&prev, &chain, n + 2);
|
||||
let bh = b.header.clone();
|
||||
|
||||
if n < 5 {
|
||||
let b_fork = b.clone();
|
||||
chain_fork.process_block(b_fork, chain::SKIP_POW).unwrap();
|
||||
}
|
||||
|
||||
chain.process_block(b, chain::SKIP_POW).unwrap();
|
||||
prev = bh;
|
||||
}
|
||||
|
||||
// check both chains are in the expected state
|
||||
let head = chain.head_header().unwrap();
|
||||
assert_eq!(head.height, 10);
|
||||
assert_eq!(head.hash(), prev.hash());
|
||||
let head_fork = chain_fork.head_header().unwrap();
|
||||
assert_eq!(head_fork.height, 5);
|
||||
|
||||
let mut prev_fork = head_fork.clone();
|
||||
for n in 0..7 {
|
||||
let b_fork = prepare_block(&prev_fork, &chain_fork, n + 7);
|
||||
let bh_fork = b_fork.header.clone();
|
||||
|
||||
let b = b_fork.clone();
|
||||
let bh = b.header.clone();
|
||||
chain.process_block(b, chain::SKIP_POW).unwrap();
|
||||
|
||||
chain_fork.process_block(b_fork, chain::SKIP_POW).unwrap();
|
||||
prev_fork = bh_fork;
|
||||
}
|
||||
}
|
||||
|
||||
fn prepare_block(prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
|
||||
let mut b = prepare_block_nosum(prev, diff);
|
||||
chain.set_sumtree_roots(&mut b).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
fn prepare_block_nosum(prev: &BlockHeader, diff: u64) -> Block {
|
||||
let mut rng = OsRng::new().unwrap();
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let mut b = core::core::Block::new(prev, vec![], reward_key).unwrap();
|
||||
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
b.header.total_difficulty = Difficulty::from_num(diff);
|
||||
b
|
||||
}
|
||||
|
|
|
@ -69,6 +69,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
block.header.difficulty = difficulty.clone();
|
||||
chain.set_sumtree_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
|
@ -98,6 +99,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
block.header.difficulty = difficulty.clone();
|
||||
chain.set_sumtree_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
|
@ -123,6 +125,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
block.header.difficulty = difficulty.clone();
|
||||
chain.set_sumtree_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
|
@ -143,6 +146,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
block.header.difficulty = difficulty.clone();
|
||||
chain.set_sumtree_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
|
|
|
@ -21,7 +21,6 @@ use std::collections::HashSet;
|
|||
|
||||
use core::Committed;
|
||||
use core::{Input, Output, Proof, TxKernel, Transaction, COINBASE_KERNEL, COINBASE_OUTPUT};
|
||||
use core::transaction::merkle_inputs_outputs;
|
||||
use consensus::REWARD;
|
||||
use consensus::MINIMUM_DIFFICULTY;
|
||||
use core::hash::{Hash, Hashed, ZERO_HASH};
|
||||
|
@ -46,10 +45,12 @@ pub struct BlockHeader {
|
|||
pub previous: Hash,
|
||||
/// Timestamp at which the block was built.
|
||||
pub timestamp: time::Tm,
|
||||
/// Merkle root of the UTXO set
|
||||
pub utxo_merkle: Hash,
|
||||
/// Merkle tree of hashes for all inputs, outputs and kernels in the block
|
||||
pub tx_merkle: Hash,
|
||||
/// Merklish root of all the commitments in the UTXO set
|
||||
pub utxo_root: Hash,
|
||||
/// Merklish root of all range proofs in the UTXO set
|
||||
pub range_proof_root: Hash,
|
||||
/// Merklish root of all transaction kernels in the UTXO set
|
||||
pub kernel_root: Hash,
|
||||
/// Features specific to this block, allowing possible future extensions
|
||||
pub features: BlockFeatures,
|
||||
/// Nonce increment used to mine this block.
|
||||
|
@ -71,8 +72,9 @@ impl Default for BlockHeader {
|
|||
timestamp: time::at_utc(time::Timespec { sec: 0, nsec: 0 }),
|
||||
difficulty: Difficulty::from_num(MINIMUM_DIFFICULTY),
|
||||
total_difficulty: Difficulty::from_num(MINIMUM_DIFFICULTY),
|
||||
utxo_merkle: ZERO_HASH,
|
||||
tx_merkle: ZERO_HASH,
|
||||
utxo_root: ZERO_HASH,
|
||||
range_proof_root: ZERO_HASH,
|
||||
kernel_root: ZERO_HASH,
|
||||
features: DEFAULT_BLOCK,
|
||||
nonce: 0,
|
||||
pow: Proof::zero(proof_size),
|
||||
|
@ -87,8 +89,9 @@ impl Writeable for BlockHeader {
|
|||
[write_u64, self.height],
|
||||
[write_fixed_bytes, &self.previous],
|
||||
[write_i64, self.timestamp.to_timespec().sec],
|
||||
[write_fixed_bytes, &self.utxo_merkle],
|
||||
[write_fixed_bytes, &self.tx_merkle],
|
||||
[write_fixed_bytes, &self.utxo_root],
|
||||
[write_fixed_bytes, &self.range_proof_root],
|
||||
[write_fixed_bytes, &self.kernel_root],
|
||||
[write_u8, self.features.bits()]);
|
||||
|
||||
try!(writer.write_u64(self.nonce));
|
||||
|
@ -108,8 +111,9 @@ impl Readable for BlockHeader {
|
|||
let height = try!(reader.read_u64());
|
||||
let previous = try!(Hash::read(reader));
|
||||
let timestamp = reader.read_i64()?;
|
||||
let utxo_merkle = try!(Hash::read(reader));
|
||||
let tx_merkle = try!(Hash::read(reader));
|
||||
let utxo_root = try!(Hash::read(reader));
|
||||
let rproof_root = try!(Hash::read(reader));
|
||||
let kernel_root = try!(Hash::read(reader));
|
||||
let (features, nonce) = ser_multiread!(reader, read_u8, read_u64);
|
||||
let difficulty = try!(Difficulty::read(reader));
|
||||
let total_difficulty = try!(Difficulty::read(reader));
|
||||
|
@ -122,8 +126,9 @@ impl Readable for BlockHeader {
|
|||
sec: timestamp,
|
||||
nsec: 0,
|
||||
}),
|
||||
utxo_merkle: utxo_merkle,
|
||||
tx_merkle: tx_merkle,
|
||||
utxo_root: utxo_root,
|
||||
range_proof_root: rproof_root,
|
||||
kernel_root: kernel_root,
|
||||
features: BlockFeatures::from_bits(features).ok_or(ser::Error::CorruptedData)?,
|
||||
pow: pow,
|
||||
nonce: nonce,
|
||||
|
@ -137,7 +142,7 @@ impl Readable for BlockHeader {
|
|||
/// non-explicit, assumed to be deducible from block height (similar to
|
||||
/// bitcoin's schedule) and expressed as a global transaction fee (added v.H),
|
||||
/// additive to the total of fees ever collected.
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Block {
|
||||
/// The header with metadata and commitments to the rest of the data
|
||||
pub header: BlockHeader,
|
||||
|
@ -339,11 +344,8 @@ impl Block {
|
|||
.map(|&out| out)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let tx_merkle = merkle_inputs_outputs(&new_inputs, &new_outputs);
|
||||
|
||||
Block {
|
||||
header: BlockHeader {
|
||||
tx_merkle: tx_merkle,
|
||||
pow: self.header.pow.clone(),
|
||||
difficulty: self.header.difficulty.clone(),
|
||||
total_difficulty: self.header.total_difficulty.clone(),
|
||||
|
@ -392,20 +394,9 @@ impl Block {
|
|||
pub fn validate(&self, secp: &Secp256k1) -> Result<(), secp::Error> {
|
||||
self.verify_coinbase(secp)?;
|
||||
self.verify_kernels(secp)?;
|
||||
self.verify_merkle_inputs_outputs()?;
|
||||
Ok(())
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Verify the transaction Merkle root
|
||||
pub fn verify_merkle_inputs_outputs(&self) -> Result<(), secp::Error> {
|
||||
let tx_merkle = merkle_inputs_outputs(&self.inputs, &self.outputs);
|
||||
if tx_merkle != self.header.tx_merkle {
|
||||
// TODO more specific error
|
||||
return Err(secp::Error::IncorrectCommitSum);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate the sum of input/output commitments match the sum in kernels
|
||||
/// and
|
||||
/// that all kernel signatures are valid.
|
||||
|
@ -609,7 +600,6 @@ mod test {
|
|||
|
||||
assert_eq!(b.verify_coinbase(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
assert_eq!(b.verify_kernels(&secp), Ok(()));
|
||||
assert_eq!(b.verify_merkle_inputs_outputs(), Err(secp::Error::IncorrectCommitSum));
|
||||
|
||||
assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
}
|
||||
|
@ -626,7 +616,6 @@ mod test {
|
|||
|
||||
assert_eq!(b.verify_coinbase(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
assert_eq!(b.verify_kernels(&secp), Ok(()));
|
||||
assert_eq!(b.verify_merkle_inputs_outputs(), Ok(()));
|
||||
|
||||
assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
}
|
||||
|
|
|
@ -18,7 +18,6 @@ pub mod block;
|
|||
pub mod build;
|
||||
pub mod hash;
|
||||
pub mod pmmr;
|
||||
pub mod sumtree;
|
||||
pub mod target;
|
||||
pub mod transaction;
|
||||
//pub mod txoset;
|
||||
|
@ -184,52 +183,6 @@ impl Writeable for Proof {
|
|||
}
|
||||
}
|
||||
|
||||
/// Two hashes that will get hashed together in a Merkle tree to build the next
|
||||
/// level up.
|
||||
struct HPair(Hash, Hash);
|
||||
|
||||
impl Writeable for HPair {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
try!(writer.write_bytes(&self.0));
|
||||
try!(writer.write_bytes(&self.1));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
/// An iterator over hashes in a vector that pairs them to build a row in a
|
||||
/// Merkle tree. If the vector has an odd number of hashes, it appends a zero
|
||||
/// hash
|
||||
/// See https://bitcointalk.org/index.php?topic=102395.0 CVE-2012-2459 (block
|
||||
/// merkle calculation exploit)
|
||||
/// for the argument against duplication of last hash
|
||||
struct HPairIter(Vec<Hash>);
|
||||
impl Iterator for HPairIter {
|
||||
type Item = HPair;
|
||||
|
||||
fn next(&mut self) -> Option<HPair> {
|
||||
self.0.pop().map(|first| HPair(first, self.0.pop().unwrap_or(ZERO_HASH)))
|
||||
}
|
||||
}
|
||||
/// A row in a Merkle tree. Can be built from a vector of hashes. Calculates
|
||||
/// the next level up, or can recursively go all the way up to its root.
|
||||
struct MerkleRow(Vec<HPair>);
|
||||
impl MerkleRow {
|
||||
fn new(hs: Vec<Hash>) -> MerkleRow {
|
||||
MerkleRow(HPairIter(hs).map(|hp| hp).collect())
|
||||
}
|
||||
fn up(&self) -> MerkleRow {
|
||||
MerkleRow::new(map_vec!(self.0, |hp| hp.hash()))
|
||||
}
|
||||
fn root(&self) -> Hash {
|
||||
if self.0.len() == 0 {
|
||||
[].hash()
|
||||
} else if self.0.len() == 1 {
|
||||
self.0[0].hash()
|
||||
} else {
|
||||
self.up().root()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
|
|
@ -36,7 +36,6 @@
|
|||
//! a simple Vec or a database.
|
||||
|
||||
use std::clone::Clone;
|
||||
use std::fmt::Debug;
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::{self, Deref};
|
||||
|
||||
|
@ -81,7 +80,8 @@ impl Writeable for NullSum {
|
|||
}
|
||||
|
||||
/// Wrapper for a type that allows it to be inserted in a tree without summing
|
||||
pub struct NoSum<T>(T);
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct NoSum<T>(pub T);
|
||||
impl<T> Summable for NoSum<T> {
|
||||
type Sum = NullSum;
|
||||
fn sum(&self) -> NullSum {
|
||||
|
@ -91,6 +91,11 @@ impl<T> Summable for NoSum<T> {
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
impl<T> Writeable for NoSum<T> where T: Writeable {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
self.0.write(writer)
|
||||
}
|
||||
}
|
||||
|
||||
/// A utility type to handle (Hash, Sum) pairs more conveniently. The addition
|
||||
/// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum.
|
||||
|
@ -102,10 +107,10 @@ pub struct HashSum<T> where T: Summable {
|
|||
pub sum: T::Sum,
|
||||
}
|
||||
|
||||
impl<T> HashSum<T> where T: Summable + Writeable {
|
||||
impl<T> HashSum<T> where T: Summable + Hashed {
|
||||
/// Create a hash sum from a summable
|
||||
pub fn from_summable(idx: u64, elmt: &T) -> HashSum<T> {
|
||||
let hash = Hashed::hash(elmt);
|
||||
let hash = elmt.hash();
|
||||
let sum = elmt.sum();
|
||||
let node_hash = (idx, &sum, hash).hash();
|
||||
HashSum {
|
||||
|
@ -142,18 +147,26 @@ impl<T> ops::Add for HashSum<T> where T: Summable {
|
|||
}
|
||||
|
||||
/// Storage backend for the MMR, just needs to be indexed by order of insertion.
|
||||
/// The remove operation can be a no-op for unoptimized backends.
|
||||
/// The PMMR itself does not need the Backend to be accurate on the existence
|
||||
/// of an element (i.e. remove could be a no-op) but layers above can
|
||||
/// depend on an accurate Backend to check existence.
|
||||
pub trait Backend<T> where T: Summable {
|
||||
|
||||
/// Append the provided HashSums to the backend storage. The position of the
|
||||
/// first element of the Vec in the MMR is provided to help the
|
||||
/// implementation.
|
||||
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String>;
|
||||
|
||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String>;
|
||||
|
||||
/// Get a HashSum by insertion position
|
||||
fn get(&self, position: u64) -> Option<HashSum<T>>;
|
||||
|
||||
/// Remove HashSums by insertion position
|
||||
fn remove(&mut self, positions: Vec<u64>) -> Result<(), String>;
|
||||
/// Remove HashSums by insertion position. An index is also provided so the
|
||||
/// underlying backend can implement some rollback of positions up to a
|
||||
/// given index (practically the index is a the height of a block that
|
||||
/// triggered removal).
|
||||
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String>;
|
||||
}
|
||||
|
||||
/// Prunable Merkle Mountain Range implementation. All positions within the tree
|
||||
|
@ -170,7 +183,7 @@ pub struct PMMR<'a, T, B> where T: Summable, B: 'a + Backend<T> {
|
|||
summable: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Writeable + Debug + Clone, B: 'a + Backend<T> {
|
||||
impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backend<T> {
|
||||
|
||||
/// Build a new prunable Merkle Mountain Range using the provided backend.
|
||||
pub fn new(backend: &'a mut B) -> PMMR<T, B> {
|
||||
|
@ -210,7 +223,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Writeable + Debug + Clone, B:
|
|||
|
||||
/// Push a new Summable element in the MMR. Computes new related peaks at
|
||||
/// the same time if applicable.
|
||||
pub fn push(&mut self, elmt: T) -> u64 {
|
||||
pub fn push(&mut self, elmt: T) -> Result<u64, String> {
|
||||
let elmt_pos = self.last_pos + 1;
|
||||
let mut current_hashsum = HashSum::from_summable(elmt_pos, &elmt);
|
||||
let mut to_append = vec![current_hashsum.clone()];
|
||||
|
@ -233,20 +246,40 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Writeable + Debug + Clone, B:
|
|||
}
|
||||
|
||||
// append all the new nodes and update the MMR index
|
||||
self.backend.append(elmt_pos, to_append);
|
||||
self.backend.append(elmt_pos, to_append)?;
|
||||
self.last_pos = pos;
|
||||
elmt_pos
|
||||
Ok(elmt_pos)
|
||||
}
|
||||
|
||||
/// Prune an element from the tree given its index. Note that to be able to
|
||||
/// Rewind the PMMR to a previous position, as is all push operations after
|
||||
/// that had been canceled. Expects a position in the PMMR to rewind to as
|
||||
/// well as the consumer-provided index of when the change occurred.
|
||||
pub fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||
// identify which actual position we should rewind to as the provided
|
||||
// position is a leaf, which may had some parent that needs to exist
|
||||
// afterward for the MMR to be valid
|
||||
let mut pos = position;
|
||||
while bintree_postorder_height(pos+1) > 0 {
|
||||
pos += 1;
|
||||
}
|
||||
|
||||
self.backend.rewind(pos, index)?;
|
||||
self.last_pos = pos;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Prune an element from the tree given its position. Note that to be able to
|
||||
/// provide that position and prune, consumers of this API are expected to
|
||||
/// keep an index of elements to positions in the tree. Prunes parent
|
||||
/// nodes as well when they become childless.
|
||||
pub fn prune(&mut self, position: u64) {
|
||||
pub fn prune(&mut self, position: u64, index: u32) -> Result<bool, String> {
|
||||
if let None = self.backend.get(position) {
|
||||
return Ok(false)
|
||||
}
|
||||
let prunable_height = bintree_postorder_height(position);
|
||||
if prunable_height > 0 {
|
||||
// only leaves can be pruned
|
||||
return;
|
||||
return Err(format!("Node at {} is not a leaf, can't prune.", position));
|
||||
}
|
||||
|
||||
// loop going up the tree, from node to parent, as long as we stay inside
|
||||
|
@ -270,7 +303,14 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Writeable + Debug + Clone, B:
|
|||
}
|
||||
}
|
||||
|
||||
self.backend.remove(to_prune);
|
||||
self.backend.remove(to_prune, index)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Helper function to get the HashSum of a node at a given position from
|
||||
/// the backend.
|
||||
pub fn get(&self, position: u64) -> Option<HashSum<T>> {
|
||||
self.backend.get(position)
|
||||
}
|
||||
|
||||
/// Total size of the tree, including intermediary nodes an ignoring any
|
||||
|
@ -285,10 +325,11 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Writeable + Debug + Clone, B:
|
|||
/// underlying HashSum.
|
||||
#[derive(Clone)]
|
||||
pub struct VecBackend<T> where T: Summable + Clone {
|
||||
elems: Vec<Option<HashSum<T>>>,
|
||||
pub elems: Vec<Option<HashSum<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Backend<T> for VecBackend<T> where T: Summable + Clone {
|
||||
#[allow(unused_variables)]
|
||||
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> {
|
||||
self.elems.append(&mut map_vec!(data, |d| Some(d.clone())));
|
||||
Ok(())
|
||||
|
@ -296,12 +337,17 @@ impl<T> Backend<T> for VecBackend<T> where T: Summable + Clone {
|
|||
fn get(&self, position: u64) -> Option<HashSum<T>> {
|
||||
self.elems[(position-1) as usize].clone()
|
||||
}
|
||||
fn remove(&mut self, positions: Vec<u64>) -> Result<(), String> {
|
||||
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> {
|
||||
for n in positions {
|
||||
self.elems[(n-1) as usize] = None
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
#[allow(unused_variables)]
|
||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||
self.elems = self.elems[0..(position as usize)+1].to_vec();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> VecBackend<T> where T: Summable + Clone {
|
||||
|
@ -344,10 +390,12 @@ impl<T> VecBackend<T> where T: Summable + Clone {
|
|||
/// backend storage anymore. The PruneList accounts for that mismatch and does
|
||||
/// the position translation.
|
||||
pub struct PruneList {
|
||||
/// Vector of pruned nodes positions
|
||||
pub pruned_nodes: Vec<u64>,
|
||||
}
|
||||
|
||||
impl PruneList {
|
||||
/// Instantiate a new empty prune list
|
||||
pub fn new() -> PruneList {
|
||||
PruneList{pruned_nodes: vec![]}
|
||||
}
|
||||
|
@ -639,6 +687,7 @@ mod test {
|
|||
}
|
||||
|
||||
#[test]
|
||||
#[allow(unused_variables)]
|
||||
fn first_50_mmr_heights() {
|
||||
let first_100_str =
|
||||
"0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 \
|
||||
|
@ -654,6 +703,7 @@ mod test {
|
|||
}
|
||||
|
||||
#[test]
|
||||
#[allow(unused_variables)]
|
||||
fn some_peaks() {
|
||||
let empty: Vec<u64> = vec![];
|
||||
assert_eq!(peaks(1), vec![1]);
|
||||
|
@ -692,6 +742,7 @@ mod test {
|
|||
}
|
||||
|
||||
#[test]
|
||||
#[allow(unused_variables)]
|
||||
fn pmmr_push_root() {
|
||||
let elems = [
|
||||
TestElem([0, 0, 0, 1]),
|
||||
|
@ -709,7 +760,7 @@ mod test {
|
|||
let mut pmmr = PMMR::new(&mut ba);
|
||||
|
||||
// one element
|
||||
pmmr.push(elems[0]);
|
||||
pmmr.push(elems[0]).unwrap();
|
||||
let hash = Hashed::hash(&elems[0]);
|
||||
let sum = elems[0].sum();
|
||||
let node_hash = (1 as u64, &sum, hash).hash();
|
||||
|
@ -717,55 +768,56 @@ mod test {
|
|||
assert_eq!(pmmr.unpruned_size(), 1);
|
||||
|
||||
// two elements
|
||||
pmmr.push(elems[1]);
|
||||
pmmr.push(elems[1]).unwrap();
|
||||
let sum2 = HashSum::from_summable(1, &elems[0]) + HashSum::from_summable(2, &elems[1]);
|
||||
assert_eq!(pmmr.root(), sum2);
|
||||
assert_eq!(pmmr.unpruned_size(), 3);
|
||||
|
||||
// three elements
|
||||
pmmr.push(elems[2]);
|
||||
pmmr.push(elems[2]).unwrap();
|
||||
let sum3 = sum2.clone() + HashSum::from_summable(4, &elems[2]);
|
||||
assert_eq!(pmmr.root(), sum3);
|
||||
assert_eq!(pmmr.unpruned_size(), 4);
|
||||
|
||||
// four elements
|
||||
pmmr.push(elems[3]);
|
||||
pmmr.push(elems[3]).unwrap();
|
||||
let sum4 = sum2 + (HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3]));
|
||||
assert_eq!(pmmr.root(), sum4);
|
||||
assert_eq!(pmmr.unpruned_size(), 7);
|
||||
|
||||
// five elements
|
||||
pmmr.push(elems[4]);
|
||||
pmmr.push(elems[4]).unwrap();
|
||||
let sum5 = sum4.clone() + HashSum::from_summable(8, &elems[4]);
|
||||
assert_eq!(pmmr.root(), sum5);
|
||||
assert_eq!(pmmr.unpruned_size(), 8);
|
||||
|
||||
// six elements
|
||||
pmmr.push(elems[5]);
|
||||
pmmr.push(elems[5]).unwrap();
|
||||
let sum6 = sum4.clone() + (HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5]));
|
||||
assert_eq!(pmmr.root(), sum6.clone());
|
||||
assert_eq!(pmmr.unpruned_size(), 10);
|
||||
|
||||
// seven elements
|
||||
pmmr.push(elems[6]);
|
||||
pmmr.push(elems[6]).unwrap();
|
||||
let sum7 = sum6 + HashSum::from_summable(11, &elems[6]);
|
||||
assert_eq!(pmmr.root(), sum7);
|
||||
assert_eq!(pmmr.unpruned_size(), 11);
|
||||
|
||||
// eight elements
|
||||
pmmr.push(elems[7]);
|
||||
pmmr.push(elems[7]).unwrap();
|
||||
let sum8 = sum4 + ((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) + (HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7])));
|
||||
assert_eq!(pmmr.root(), sum8);
|
||||
assert_eq!(pmmr.unpruned_size(), 15);
|
||||
|
||||
// nine elements
|
||||
pmmr.push(elems[8]);
|
||||
pmmr.push(elems[8]).unwrap();
|
||||
let sum9 = sum8 + HashSum::from_summable(16, &elems[8]);
|
||||
assert_eq!(pmmr.root(), sum9);
|
||||
assert_eq!(pmmr.unpruned_size(), 16);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[allow(unused_variables)]
|
||||
fn pmmr_prune() {
|
||||
let elems = [
|
||||
TestElem([0, 0, 0, 1]),
|
||||
|
@ -785,7 +837,7 @@ mod test {
|
|||
{
|
||||
let mut pmmr = PMMR::new(&mut ba);
|
||||
for elem in &elems[..] {
|
||||
pmmr.push(*elem);
|
||||
pmmr.push(*elem).unwrap();
|
||||
}
|
||||
orig_root = pmmr.root();
|
||||
sz = pmmr.unpruned_size();
|
||||
|
@ -794,7 +846,7 @@ mod test {
|
|||
// pruning a leaf with no parent should do nothing
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(16);
|
||||
pmmr.prune(16, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
assert_eq!(ba.used_size(), 16);
|
||||
|
@ -802,14 +854,14 @@ mod test {
|
|||
// pruning leaves with no shared parent just removes 1 element
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(2);
|
||||
pmmr.prune(2, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
assert_eq!(ba.used_size(), 15);
|
||||
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(4);
|
||||
pmmr.prune(4, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
assert_eq!(ba.used_size(), 14);
|
||||
|
@ -817,7 +869,7 @@ mod test {
|
|||
// pruning a non-leaf node has no effect
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(3);
|
||||
pmmr.prune(3, 0).unwrap_err();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
assert_eq!(ba.used_size(), 14);
|
||||
|
@ -825,7 +877,7 @@ mod test {
|
|||
// pruning sibling removes subtree
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(5);
|
||||
pmmr.prune(5, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
assert_eq!(ba.used_size(), 12);
|
||||
|
@ -833,7 +885,7 @@ mod test {
|
|||
// pruning all leaves under level >1 removes all subtree
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(1);
|
||||
pmmr.prune(1, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
assert_eq!(ba.used_size(), 9);
|
||||
|
@ -842,7 +894,7 @@ mod test {
|
|||
{
|
||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
||||
for n in 1..16 {
|
||||
pmmr.prune(n);
|
||||
pmmr.prune(n, 0);
|
||||
}
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
|
|
|
@ -1,999 +0,0 @@
|
|||
// Copyright 2016 The Grin Developers
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Sum-Merkle Trees
|
||||
//!
|
||||
//! Generic sum-merkle tree. See `doc/merkle.md` for design and motivation for
|
||||
//! this structure. Most trees in Grin are stored and transmitted as either
|
||||
//! (a) only a root, or (b) the entire unpruned tree. For these it is sufficient
|
||||
//! to have a root-calculating function, for which the `compute_root` function
|
||||
//! should be used.
|
||||
//!
|
||||
//! The output set structure has much stronger requirements, as it is updated
|
||||
//! and pruned in place, and needs to be efficiently storable even when it is
|
||||
//! very sparse.
|
||||
//!
|
||||
|
||||
use core::hash::{Hash, Hashed};
|
||||
use ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use std::collections::HashMap;
|
||||
use std::{self, mem, ops};
|
||||
|
||||
/// Trait describing an object that has a well-defined sum that the tree can
|
||||
/// sum over
|
||||
pub trait Summable {
|
||||
/// The type of an object's sum
|
||||
type Sum: Clone + ops::Add<Output = Self::Sum> + Readable + Writeable;
|
||||
|
||||
/// Obtain the sum of the object
|
||||
fn sum(&self) -> Self::Sum;
|
||||
}
|
||||
|
||||
/// An empty sum that takes no space
|
||||
#[derive(Copy, Clone)]
|
||||
pub struct NullSum;
|
||||
impl ops::Add for NullSum {
|
||||
type Output = NullSum;
|
||||
fn add(self, _: NullSum) -> NullSum {
|
||||
NullSum
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for NullSum {
|
||||
fn read(_: &mut Reader) -> Result<NullSum, ser::Error> {
|
||||
Ok(NullSum)
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for NullSum {
|
||||
fn write<W: Writer>(&self, _: &mut W) -> Result<(), ser::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Wrapper for a type that allows it to be inserted in a tree without summing
|
||||
pub struct NoSum<T>(T);
|
||||
impl<T> Summable for NoSum<T> {
|
||||
type Sum = NullSum;
|
||||
fn sum(&self) -> NullSum {
|
||||
NullSum
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
enum Node<T: Summable> {
|
||||
/// Node with 2^n children which are not stored with the tree
|
||||
Pruned(T::Sum),
|
||||
/// Actual data
|
||||
Leaf(T::Sum),
|
||||
/// Node with 2^n children
|
||||
Internal {
|
||||
lchild: Box<NodeData<T>>,
|
||||
rchild: Box<NodeData<T>>,
|
||||
sum: T::Sum,
|
||||
},
|
||||
}
|
||||
|
||||
impl<T: Summable> Summable for Node<T> {
|
||||
type Sum = T::Sum;
|
||||
fn sum(&self) -> T::Sum {
|
||||
match *self {
|
||||
Node::Pruned(ref sum) => sum.clone(),
|
||||
Node::Leaf(ref sum) => sum.clone(),
|
||||
Node::Internal { ref sum, .. } => sum.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct NodeData<T: Summable> {
|
||||
full: bool,
|
||||
node: Node<T>,
|
||||
hash: Hash,
|
||||
depth: u8,
|
||||
}
|
||||
|
||||
impl<T: Summable> Summable for NodeData<T> {
|
||||
type Sum = T::Sum;
|
||||
fn sum(&self) -> T::Sum {
|
||||
self.node.sum()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Summable> NodeData<T> {
|
||||
/// Get the root hash and sum of the node
|
||||
fn root_sum(&self) -> (Hash, T::Sum) {
|
||||
(self.hash, self.sum())
|
||||
}
|
||||
|
||||
fn n_leaves(&self) -> usize {
|
||||
if self.full {
|
||||
1 << self.depth
|
||||
} else {
|
||||
if let Node::Internal {
|
||||
ref lchild,
|
||||
ref rchild,
|
||||
..
|
||||
} = self.node
|
||||
{
|
||||
lchild.n_leaves() + rchild.n_leaves()
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An insertion ordered merkle sum tree.
|
||||
#[derive(Clone)]
|
||||
pub struct SumTree<T: Summable + Writeable> {
|
||||
/// Index mapping data to its index in the tree
|
||||
index: HashMap<Hash, usize>,
|
||||
/// Tree contents
|
||||
root: Option<NodeData<T>>,
|
||||
}
|
||||
|
||||
impl<T> SumTree<T>
|
||||
where
|
||||
T: Summable + Writeable,
|
||||
{
|
||||
/// Create a new empty tree
|
||||
pub fn new() -> SumTree<T> {
|
||||
SumTree {
|
||||
index: HashMap::new(),
|
||||
root: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor for the tree's root
|
||||
pub fn root_sum(&self) -> Option<(Hash, T::Sum)> {
|
||||
self.root.as_ref().map(|node| node.root_sum())
|
||||
}
|
||||
|
||||
fn insert_right_of(mut old: NodeData<T>, new: NodeData<T>) -> NodeData<T> {
|
||||
assert!(old.depth >= new.depth);
|
||||
|
||||
// If we are inserting next to a full node, make a parent. If we're
|
||||
// inserting a tree of equal depth then we get a full node, otherwise
|
||||
// we get a partial node. Leaves and pruned data both count as full
|
||||
// nodes.
|
||||
if old.full {
|
||||
let parent_depth = old.depth + 1;
|
||||
let parent_sum = old.sum() + new.sum();
|
||||
let parent_hash = (parent_depth, &parent_sum, old.hash, new.hash).hash();
|
||||
let parent_full = old.depth == new.depth;
|
||||
let parent_node = Node::Internal {
|
||||
lchild: Box::new(old),
|
||||
rchild: Box::new(new),
|
||||
sum: parent_sum,
|
||||
};
|
||||
|
||||
NodeData {
|
||||
full: parent_full,
|
||||
node: parent_node,
|
||||
hash: parent_hash,
|
||||
depth: parent_depth,
|
||||
}
|
||||
// If we are inserting next to a partial node, we should actually be
|
||||
// inserting under the node, so we recurse. The right child of a partial
|
||||
// node is always another partial node or a leaf.
|
||||
} else {
|
||||
if let Node::Internal {
|
||||
ref lchild,
|
||||
ref mut rchild,
|
||||
ref mut sum,
|
||||
} = old.node
|
||||
{
|
||||
// Recurse
|
||||
let dummy_child = NodeData {
|
||||
full: true,
|
||||
node: Node::Pruned(sum.clone()),
|
||||
hash: old.hash,
|
||||
depth: 0,
|
||||
};
|
||||
let moved_rchild = mem::replace(&mut **rchild, dummy_child);
|
||||
mem::replace(&mut **rchild, SumTree::insert_right_of(moved_rchild, new));
|
||||
// Update this node's states to reflect the new right child
|
||||
if rchild.full && rchild.depth == old.depth - 1 {
|
||||
old.full = rchild.full;
|
||||
}
|
||||
*sum = lchild.sum() + rchild.sum();
|
||||
old.hash = (old.depth, &*sum, lchild.hash, rchild.hash).hash();
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
old
|
||||
}
|
||||
}
|
||||
|
||||
/// Accessor for number of elements (leaves) in the tree, not including
|
||||
/// pruned ones.
|
||||
pub fn len(&self) -> usize {
|
||||
self.index.len()
|
||||
}
|
||||
|
||||
/// Accessor for number of elements (leaves) in the tree, including pruned
|
||||
/// ones.
|
||||
pub fn unpruned_len(&self) -> usize {
|
||||
match self.root {
|
||||
None => 0,
|
||||
Some(ref node) => node.n_leaves(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add an element to the tree. Returns true if the element was added,
|
||||
/// false if it already existed in the tree.
|
||||
pub fn push(&mut self, elem: T) -> bool {
|
||||
// Compute element hash and depth-0 node hash
|
||||
let index_hash = Hashed::hash(&elem);
|
||||
let elem_sum = elem.sum();
|
||||
let elem_hash = (0u8, &elem_sum, index_hash).hash();
|
||||
|
||||
if self.index.contains_key(&index_hash) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Special-case the first element
|
||||
if self.root.is_none() {
|
||||
self.root = Some(NodeData {
|
||||
full: true,
|
||||
node: Node::Leaf(elem_sum),
|
||||
hash: elem_hash,
|
||||
depth: 0,
|
||||
});
|
||||
self.index.insert(index_hash, 0);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Next, move the old root out of the structure so that we are allowed to
|
||||
// move it. We will move a new root back in at the end of the function
|
||||
let old_root = mem::replace(&mut self.root, None).unwrap();
|
||||
|
||||
// Insert into tree, compute new root
|
||||
let new_node = NodeData {
|
||||
full: true,
|
||||
node: Node::Leaf(elem_sum),
|
||||
hash: elem_hash,
|
||||
depth: 0,
|
||||
};
|
||||
|
||||
// Put new root in place and record insertion
|
||||
let index = old_root.n_leaves();
|
||||
self.root = Some(SumTree::insert_right_of(old_root, new_node));
|
||||
self.index.insert(index_hash, index);
|
||||
true
|
||||
}
|
||||
|
||||
fn replace_recurse(node: &mut NodeData<T>, index: usize, new_elem: T) {
|
||||
assert!(index < (1 << node.depth));
|
||||
|
||||
if node.depth == 0 {
|
||||
assert!(node.full);
|
||||
node.hash = (0u8, new_elem.sum(), Hashed::hash(&new_elem)).hash();
|
||||
node.node = Node::Leaf(new_elem.sum());
|
||||
} else {
|
||||
match node.node {
|
||||
Node::Internal {
|
||||
ref mut lchild,
|
||||
ref mut rchild,
|
||||
ref mut sum,
|
||||
} => {
|
||||
let bit = index & (1 << (node.depth - 1));
|
||||
if bit > 0 {
|
||||
SumTree::replace_recurse(rchild, index - bit, new_elem);
|
||||
} else {
|
||||
SumTree::replace_recurse(lchild, index, new_elem);
|
||||
}
|
||||
*sum = lchild.sum() + rchild.sum();
|
||||
node.hash = (node.depth, &*sum, lchild.hash, rchild.hash).hash();
|
||||
}
|
||||
// Pruned data would not have been in the index
|
||||
Node::Pruned(_) => unreachable!(),
|
||||
Node::Leaf(_) => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Replaces an element in the tree. Returns true if the element existed
|
||||
/// and was replaced. Returns false if the old element did not exist or
|
||||
/// if the new element already existed
|
||||
pub fn replace(&mut self, elem: &T, new_elem: T) -> bool {
|
||||
let index_hash = Hashed::hash(elem);
|
||||
|
||||
let root = match self.root {
|
||||
Some(ref mut node) => node,
|
||||
None => {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
match self.index.remove(&index_hash) {
|
||||
None => false,
|
||||
Some(index) => {
|
||||
let new_index_hash = Hashed::hash(&new_elem);
|
||||
if self.index.contains_key(&new_index_hash) {
|
||||
false
|
||||
} else {
|
||||
SumTree::replace_recurse(root, index, new_elem);
|
||||
self.index.insert(new_index_hash, index);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Determine whether an element exists in the tree.
|
||||
/// If so, return its index
|
||||
pub fn contains(&self, elem: &T) -> Option<usize> {
|
||||
let index_hash = Hashed::hash(elem);
|
||||
self.index.get(&index_hash).map(|x| *x)
|
||||
}
|
||||
|
||||
fn prune_recurse(node: &mut NodeData<T>, index: usize) {
|
||||
assert!(index < (1 << node.depth));
|
||||
|
||||
if node.depth == 0 {
|
||||
let sum = if let Node::Leaf(ref sum) = node.node {
|
||||
sum.clone()
|
||||
} else {
|
||||
unreachable!()
|
||||
};
|
||||
node.node = Node::Pruned(sum);
|
||||
} else {
|
||||
let mut prune_me = None;
|
||||
match node.node {
|
||||
Node::Internal {
|
||||
ref mut lchild,
|
||||
ref mut rchild,
|
||||
..
|
||||
} => {
|
||||
let bit = index & (1 << (node.depth - 1));
|
||||
if bit > 0 {
|
||||
SumTree::prune_recurse(rchild, index - bit);
|
||||
} else {
|
||||
SumTree::prune_recurse(lchild, index);
|
||||
}
|
||||
if let (&Node::Pruned(ref lsum), &Node::Pruned(ref rsum)) =
|
||||
(&lchild.node, &rchild.node)
|
||||
{
|
||||
if node.full {
|
||||
prune_me = Some(lsum.clone() + rsum.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
Node::Pruned(_) => {
|
||||
// Already pruned. Ok.
|
||||
}
|
||||
Node::Leaf(_) => unreachable!(),
|
||||
}
|
||||
if let Some(sum) = prune_me {
|
||||
node.node = Node::Pruned(sum);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Removes an element from storage, not affecting the tree
|
||||
/// Returns true if the element was actually in the tree
|
||||
pub fn prune(&mut self, elem: &T) -> bool {
|
||||
let index_hash = Hashed::hash(elem);
|
||||
|
||||
let root = match self.root {
|
||||
Some(ref mut node) => node,
|
||||
None => {
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
match self.index.remove(&index_hash) {
|
||||
None => false,
|
||||
Some(index) => {
|
||||
SumTree::prune_recurse(root, index);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn clone_pruned_recurse(node: &NodeData<T>) -> NodeData<T> {
|
||||
if node.full {
|
||||
// replaces full internal nodes, leaves and already pruned nodes are full
|
||||
// as well
|
||||
NodeData {
|
||||
full: true,
|
||||
node: Node::Pruned(node.sum()),
|
||||
hash: node.hash,
|
||||
depth: node.depth,
|
||||
}
|
||||
} else {
|
||||
if let Node::Internal { ref lchild, ref rchild, ref sum } = node.node {
|
||||
// just recurse on each side to get the pruned version
|
||||
NodeData {
|
||||
full: false,
|
||||
node: Node::Internal {
|
||||
lchild: Box::new(SumTree::clone_pruned_recurse(lchild)),
|
||||
rchild: Box::new(SumTree::clone_pruned_recurse(rchild)),
|
||||
sum: sum.clone(),
|
||||
},
|
||||
hash: node.hash,
|
||||
depth: node.depth,
|
||||
}
|
||||
} else {
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Minimal clone of this tree, replacing all full nodes with a pruned node,
|
||||
/// therefore only copying non-full subtrees.
|
||||
pub fn clone_pruned(&self) -> SumTree<T> {
|
||||
match self.root {
|
||||
Some(ref node) => {
|
||||
SumTree {
|
||||
index: HashMap::new(),
|
||||
root: Some(SumTree::clone_pruned_recurse(node)),
|
||||
}
|
||||
},
|
||||
None => SumTree::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO push_many, truncate to allow bulk updates
|
||||
}
|
||||
|
||||
// A SumTree is encoded as follows: an empty tree is the single byte 0x00.
|
||||
// An nonempty tree is encoded recursively by encoding its root node. Each
|
||||
// node is encoded as follows:
|
||||
// flag: two bits, 01 for partial, 10 for full, 11 for pruned
|
||||
// 00 is reserved so that the 0 byte can uniquely specify an empty tree
|
||||
// depth: six bits, zero indicates a leaf
|
||||
// hash: 32 bytes
|
||||
// sum: <length of sum encoding>
|
||||
//
|
||||
// For a leaf, this is followed by an encoding of the element. For an
|
||||
// internal node, the left child is encoded followed by the right child.
|
||||
// For a pruned internal node, it is followed by nothing.
|
||||
//
|
||||
impl<T> Writeable for SumTree<T>
|
||||
where
|
||||
T: Summable + Writeable,
|
||||
{
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
match self.root {
|
||||
None => writer.write_u8(0),
|
||||
Some(ref node) => node.write(writer),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Writeable for NodeData<T>
|
||||
where
|
||||
T: Summable + Writeable,
|
||||
{
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
assert!(self.depth < 64);
|
||||
|
||||
// Compute depth byte: 0x80 means full, 0xc0 means unpruned
|
||||
let mut depth = 0;
|
||||
if self.full {
|
||||
depth |= 0x80;
|
||||
}
|
||||
if let Node::Pruned(_) = self.node {
|
||||
} else {
|
||||
depth |= 0xc0;
|
||||
}
|
||||
depth |= self.depth;
|
||||
// Encode node
|
||||
try!(writer.write_u8(depth));
|
||||
try!(self.hash.write(writer));
|
||||
match self.node {
|
||||
Node::Pruned(ref sum) => sum.write(writer),
|
||||
Node::Leaf(ref sum) => sum.write(writer),
|
||||
Node::Internal {
|
||||
ref lchild,
|
||||
ref rchild,
|
||||
ref sum,
|
||||
} => {
|
||||
try!(sum.write(writer));
|
||||
try!(lchild.write(writer));
|
||||
rchild.write(writer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn node_read_recurse<T>(
|
||||
reader: &mut Reader,
|
||||
index: &mut HashMap<Hash, usize>,
|
||||
tree_index: &mut usize,
|
||||
) -> Result<NodeData<T>, ser::Error>
|
||||
where
|
||||
T: Summable + Readable + Hashed,
|
||||
{
|
||||
// Read depth byte
|
||||
let depth = try!(reader.read_u8());
|
||||
let full = depth & 0x80 == 0x80;
|
||||
let pruned = depth & 0xc0 != 0xc0;
|
||||
let depth = depth & 0x3f;
|
||||
|
||||
// Sanity-check for zero byte
|
||||
if pruned && !full {
|
||||
return Err(ser::Error::CorruptedData);
|
||||
}
|
||||
|
||||
// Read remainder of node
|
||||
let hash = try!(Readable::read(reader));
|
||||
let sum = try!(Readable::read(reader));
|
||||
let data = match (depth, pruned) {
|
||||
(_, true) => {
|
||||
*tree_index += 1 << depth as usize;
|
||||
Node::Pruned(sum)
|
||||
}
|
||||
(0, _) => {
|
||||
index.insert(hash, *tree_index);
|
||||
*tree_index += 1;
|
||||
Node::Leaf(sum)
|
||||
}
|
||||
(_, _) => {
|
||||
Node::Internal {
|
||||
lchild: Box::new(try!(node_read_recurse(reader, index, tree_index))),
|
||||
rchild: Box::new(try!(node_read_recurse(reader, index, tree_index))),
|
||||
sum: sum,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(NodeData {
|
||||
full: full,
|
||||
node: data,
|
||||
hash: hash,
|
||||
depth: depth,
|
||||
})
|
||||
}
|
||||
|
||||
impl<T> Readable for SumTree<T>
|
||||
where
|
||||
T: Summable + Writeable + Readable + Hashed,
|
||||
{
|
||||
fn read(reader: &mut Reader) -> Result<SumTree<T>, ser::Error> {
|
||||
// Read depth byte of root node
|
||||
let depth = try!(reader.read_u8());
|
||||
let full = depth & 0x80 == 0x80;
|
||||
let pruned = depth & 0xc0 != 0xc0;
|
||||
let depth = depth & 0x3f;
|
||||
|
||||
// Special-case the zero byte
|
||||
if pruned && !full {
|
||||
return Ok(SumTree {
|
||||
index: HashMap::new(),
|
||||
root: None,
|
||||
});
|
||||
}
|
||||
|
||||
// Otherwise continue reading it
|
||||
let mut index = HashMap::new();
|
||||
|
||||
let hash = try!(Readable::read(reader));
|
||||
let sum = try!(Readable::read(reader));
|
||||
let data = match (depth, pruned) {
|
||||
(_, true) => Node::Pruned(sum),
|
||||
(0, _) => Node::Leaf(sum),
|
||||
(_, _) => {
|
||||
let mut tree_index = 0;
|
||||
Node::Internal {
|
||||
lchild: Box::new(try!(node_read_recurse(reader, &mut index, &mut tree_index))),
|
||||
rchild: Box::new(try!(node_read_recurse(reader, &mut index, &mut tree_index))),
|
||||
sum: sum,
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
Ok(SumTree {
|
||||
index: index,
|
||||
root: Some(NodeData {
|
||||
full: full,
|
||||
node: data,
|
||||
hash: hash,
|
||||
depth: depth,
|
||||
}),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// This is used to as a scratch space during root calculation so that we can
|
||||
/// keep everything on the stack in a fixed-size array. It reflects a maximum
|
||||
/// tree capacity of 2^48, which is not practically reachable.
|
||||
const MAX_MMR_HEIGHT: usize = 48;
|
||||
|
||||
/// This algorithm is based on Peter Todd's in
|
||||
/// https://github.com/opentimestamps/opentimestamps-server/blob/master/python-opentimestamps/opentimestamps/core/timestamp.py#L324
|
||||
///
|
||||
fn compute_peaks<S, I>(iter: I, peaks: &mut [Option<(u8, Hash, S)>])
|
||||
where
|
||||
S: Clone + ops::Add<Output = S> + Writeable,
|
||||
I: Iterator<Item = (u8, Hash, S)>,
|
||||
{
|
||||
for peak in peaks.iter_mut() {
|
||||
*peak = None;
|
||||
}
|
||||
for (mut new_depth, mut new_hash, mut new_sum) in iter {
|
||||
let mut index = 0;
|
||||
while let Some((old_depth, old_hash, old_sum)) = peaks[index].take() {
|
||||
// Erase current peak (done by `take()` above), then combine
|
||||
// it with the new addition, to be inserted one higher
|
||||
index += 1;
|
||||
new_depth = old_depth + 1;
|
||||
new_sum = old_sum.clone() + new_sum.clone();
|
||||
new_hash = (new_depth, &new_sum, old_hash, new_hash).hash();
|
||||
}
|
||||
peaks[index] = Some((new_depth, new_hash, new_sum));
|
||||
}
|
||||
}
|
||||
|
||||
/// Directly compute the Merkle root of a sum-tree whose contents are given
|
||||
/// explicitly in the passed iterator.
|
||||
pub fn compute_root<'a, T, I>(iter: I) -> Option<(Hash, T::Sum)>
|
||||
where
|
||||
T: 'a + Summable + Writeable,
|
||||
I: Iterator<Item = &'a T>,
|
||||
{
|
||||
let mut peaks = vec![None; MAX_MMR_HEIGHT];
|
||||
compute_peaks(
|
||||
iter.map(|elem| {
|
||||
let depth = 0u8;
|
||||
let sum = elem.sum();
|
||||
let hash = (depth, &sum, Hashed::hash(elem)).hash();
|
||||
(depth, hash, sum)
|
||||
}),
|
||||
&mut peaks,
|
||||
);
|
||||
|
||||
let mut ret = None;
|
||||
for peak in peaks {
|
||||
ret = match (peak, ret) {
|
||||
(None, x) => x,
|
||||
(Some((_, hash, sum)), None) => Some((hash, sum)),
|
||||
(Some((depth, lhash, lsum)), Some((rhash, rsum))) => {
|
||||
let sum = lsum + rsum;
|
||||
let hash = (depth + 1, &sum, lhash, rhash).hash();
|
||||
Some((hash, sum))
|
||||
}
|
||||
};
|
||||
}
|
||||
ret
|
||||
}
|
||||
|
||||
// a couple functions that help debugging
|
||||
#[allow(dead_code)]
|
||||
fn print_node<T>(node: &NodeData<T>, tab_level: usize)
|
||||
where
|
||||
T: Summable + Writeable,
|
||||
T::Sum: std::fmt::Debug,
|
||||
{
|
||||
for _ in 0..tab_level {
|
||||
print!(" ");
|
||||
}
|
||||
print!("[{:03}] {} {:?}", node.depth, node.hash, node.sum());
|
||||
match node.node {
|
||||
Node::Pruned(_) => println!(" X"),
|
||||
Node::Leaf(_) => println!(" L"),
|
||||
Node::Internal {
|
||||
ref lchild,
|
||||
ref rchild,
|
||||
..
|
||||
} => {
|
||||
println!(":");
|
||||
print_node(lchild, tab_level + 1);
|
||||
print_node(rchild, tab_level + 1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[allow(missing_docs)]
|
||||
pub fn print_tree<T>(tree: &SumTree<T>)
|
||||
where
|
||||
T: Summable + Writeable,
|
||||
T::Sum: std::fmt::Debug,
|
||||
{
|
||||
match tree.root {
|
||||
None => println!("[empty tree]"),
|
||||
Some(ref node) => {
|
||||
print_node(node, 0);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use rand::{thread_rng, Rng};
|
||||
use core::hash::Hashed;
|
||||
use ser;
|
||||
use super::*;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct TestElem([u32; 4]);
|
||||
impl Summable for TestElem {
|
||||
type Sum = u64;
|
||||
fn sum(&self) -> u64 {
|
||||
// sums are not allowed to overflow, so we use this simple
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
|
||||
self.0[3] as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for TestElem {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
try!(writer.write_u32(self.0[0]));
|
||||
try!(writer.write_u32(self.0[1]));
|
||||
try!(writer.write_u32(self.0[2]));
|
||||
writer.write_u32(self.0[3])
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn sumtree_create_(prune: bool) {
|
||||
let mut tree = SumTree::new();
|
||||
|
||||
macro_rules! leaf {
|
||||
($data: expr) => ({
|
||||
(0u8, $data.sum(), $data.hash())
|
||||
})
|
||||
};
|
||||
|
||||
macro_rules! node {
|
||||
($left: expr, $right: expr) => (
|
||||
($left.0 + 1, $left.1 + $right.1, $left.hash(), $right.hash())
|
||||
)
|
||||
};
|
||||
|
||||
macro_rules! prune {
|
||||
($prune: expr, $tree: expr, $elem: expr) => {
|
||||
if $prune {
|
||||
assert_eq!($tree.len(), 1);
|
||||
$tree.prune(&$elem);
|
||||
assert_eq!($tree.len(), 0);
|
||||
// double-pruning shouldn't hurt anything
|
||||
$tree.prune(&$elem);
|
||||
assert_eq!($tree.len(), 0);
|
||||
} else {
|
||||
assert_eq!($tree.len(), $tree.unpruned_len());
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let mut elems = [
|
||||
TestElem([0, 0, 0, 1]),
|
||||
TestElem([0, 0, 0, 2]),
|
||||
TestElem([0, 0, 0, 3]),
|
||||
TestElem([0, 0, 0, 4]),
|
||||
TestElem([0, 0, 0, 5]),
|
||||
TestElem([0, 0, 0, 6]),
|
||||
TestElem([0, 0, 0, 7]),
|
||||
TestElem([1, 0, 0, 0]),
|
||||
];
|
||||
|
||||
assert_eq!(tree.root_sum(), None);
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..0].iter()));
|
||||
assert_eq!(tree.len(), 0);
|
||||
assert_eq!(tree.contains(&elems[0]), None);
|
||||
assert!(tree.push(elems[0]));
|
||||
assert_eq!(tree.contains(&elems[0]), Some(0));
|
||||
|
||||
// One element
|
||||
let expected = leaf!(elems[0]).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 1)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..1].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 1);
|
||||
prune!(prune, tree, elems[0]);
|
||||
|
||||
// Two elements
|
||||
assert_eq!(tree.contains(&elems[1]), None);
|
||||
assert!(tree.push(elems[1]));
|
||||
assert_eq!(tree.contains(&elems[1]), Some(1));
|
||||
let expected = node!(leaf!(elems[0]), leaf!(elems[1])).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 3)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..2].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 2);
|
||||
prune!(prune, tree, elems[1]);
|
||||
|
||||
// Three elements
|
||||
assert_eq!(tree.contains(&elems[2]), None);
|
||||
assert!(tree.push(elems[2]));
|
||||
assert_eq!(tree.contains(&elems[2]), Some(2));
|
||||
let expected = node!(node!(leaf!(elems[0]), leaf!(elems[1])), leaf!(elems[2])).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 6)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..3].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 3);
|
||||
prune!(prune, tree, elems[2]);
|
||||
|
||||
// Four elements
|
||||
assert_eq!(tree.contains(&elems[3]), None);
|
||||
assert!(tree.push(elems[3]));
|
||||
assert_eq!(tree.contains(&elems[3]), Some(3));
|
||||
let expected = node!(
|
||||
node!(leaf!(elems[0]), leaf!(elems[1])),
|
||||
node!(leaf!(elems[2]), leaf!(elems[3]))
|
||||
).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 10)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..4].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 4);
|
||||
prune!(prune, tree, elems[3]);
|
||||
|
||||
// Five elements
|
||||
assert_eq!(tree.contains(&elems[4]), None);
|
||||
assert!(tree.push(elems[4]));
|
||||
assert_eq!(tree.contains(&elems[4]), Some(4));
|
||||
let expected = node!(
|
||||
node!(
|
||||
node!(leaf!(elems[0]), leaf!(elems[1])),
|
||||
node!(leaf!(elems[2]), leaf!(elems[3]))
|
||||
),
|
||||
leaf!(elems[4])
|
||||
).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 15)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..5].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 5);
|
||||
prune!(prune, tree, elems[4]);
|
||||
|
||||
// Six elements
|
||||
assert_eq!(tree.contains(&elems[5]), None);
|
||||
assert!(tree.push(elems[5]));
|
||||
assert_eq!(tree.contains(&elems[5]), Some(5));
|
||||
let expected = node!(
|
||||
node!(
|
||||
node!(leaf!(elems[0]), leaf!(elems[1])),
|
||||
node!(leaf!(elems[2]), leaf!(elems[3]))
|
||||
),
|
||||
node!(leaf!(elems[4]), leaf!(elems[5]))
|
||||
).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 21)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..6].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 6);
|
||||
prune!(prune, tree, elems[5]);
|
||||
|
||||
// Seven elements
|
||||
assert_eq!(tree.contains(&elems[6]), None);
|
||||
assert!(tree.push(elems[6]));
|
||||
assert_eq!(tree.contains(&elems[6]), Some(6));
|
||||
let expected = node!(
|
||||
node!(
|
||||
node!(leaf!(elems[0]), leaf!(elems[1])),
|
||||
node!(leaf!(elems[2]), leaf!(elems[3]))
|
||||
),
|
||||
node!(node!(leaf!(elems[4]), leaf!(elems[5])), leaf!(elems[6]))
|
||||
).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 28)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..7].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 7);
|
||||
prune!(prune, tree, elems[6]);
|
||||
|
||||
// Eight elements
|
||||
assert_eq!(tree.contains(&elems[7]), None);
|
||||
assert!(tree.push(elems[7]));
|
||||
assert_eq!(tree.contains(&elems[7]), Some(7));
|
||||
let expected = node!(
|
||||
node!(
|
||||
node!(leaf!(elems[0]), leaf!(elems[1])),
|
||||
node!(leaf!(elems[2]), leaf!(elems[3]))
|
||||
),
|
||||
node!(
|
||||
node!(leaf!(elems[4]), leaf!(elems[5])),
|
||||
node!(leaf!(elems[6]), leaf!(elems[7]))
|
||||
)
|
||||
).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 28 + 0x1000)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..8].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 8);
|
||||
prune!(prune, tree, elems[7]);
|
||||
|
||||
// If we weren't pruning, try changing some elements
|
||||
if !prune {
|
||||
for i in 0..8 {
|
||||
let old_elem = elems[i];
|
||||
elems[i].0[2] += 1 + i as u32;
|
||||
assert_eq!(tree.contains(&old_elem), Some(i));
|
||||
assert_eq!(tree.contains(&elems[i]), None);
|
||||
assert!(tree.replace(&old_elem, elems[i]));
|
||||
assert_eq!(tree.contains(&elems[i]), Some(i));
|
||||
assert_eq!(tree.contains(&old_elem), None);
|
||||
}
|
||||
let expected = node!(
|
||||
node!(
|
||||
node!(leaf!(elems[0]), leaf!(elems[1])),
|
||||
node!(leaf!(elems[2]), leaf!(elems[3]))
|
||||
),
|
||||
node!(
|
||||
node!(leaf!(elems[4]), leaf!(elems[5])),
|
||||
node!(leaf!(elems[6]), leaf!(elems[7]))
|
||||
)
|
||||
).hash();
|
||||
assert_eq!(tree.root_sum(), Some((expected, 28 + 36 * 0x10 + 0x1000)));
|
||||
assert_eq!(tree.root_sum(), compute_root(elems[0..8].iter()));
|
||||
assert_eq!(tree.unpruned_len(), 8);
|
||||
}
|
||||
|
||||
let mut rng = thread_rng();
|
||||
// If we weren't pruning as we went, try pruning everything now
|
||||
// and make sure nothing breaks.
|
||||
if !prune {
|
||||
rng.shuffle(&mut elems);
|
||||
let mut expected_count = 8;
|
||||
let expected_root_sum = tree.root_sum();
|
||||
for elem in &elems {
|
||||
assert_eq!(tree.root_sum(), expected_root_sum);
|
||||
assert_eq!(tree.len(), expected_count);
|
||||
assert_eq!(tree.unpruned_len(), 8);
|
||||
tree.prune(elem);
|
||||
expected_count -= 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Build a large random tree and check its root against that computed
|
||||
// by `compute_root`.
|
||||
let mut big_elems: Vec<TestElem> = vec![];
|
||||
let mut big_tree = SumTree::new();
|
||||
for i in 0..1000 {
|
||||
// To avoid RNG overflow we generate random elements that are small.
|
||||
// Though to avoid repeat elements they have to be reasonably big.
|
||||
let new_elem;
|
||||
let word1 = rng.gen::<u16>() as u32;
|
||||
let word2 = rng.gen::<u16>() as u32;
|
||||
if rng.gen() {
|
||||
if rng.gen() {
|
||||
new_elem = TestElem([word1, word2, 0, 0]);
|
||||
} else {
|
||||
new_elem = TestElem([word1, 0, word2, 0]);
|
||||
}
|
||||
} else {
|
||||
if rng.gen() {
|
||||
new_elem = TestElem([0, word1, 0, word2]);
|
||||
} else {
|
||||
new_elem = TestElem([0, 0, word1, word2]);
|
||||
}
|
||||
}
|
||||
|
||||
big_elems.push(new_elem);
|
||||
assert!(big_tree.push(new_elem));
|
||||
if i % 25 == 0 {
|
||||
// Verify root
|
||||
println!("{}", i);
|
||||
assert_eq!(big_tree.root_sum(), compute_root(big_elems.iter()));
|
||||
// Do serialization roundtrip
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sumtree_create() {
|
||||
sumtree_create_(false);
|
||||
sumtree_create_(true);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn sumtree_double_add() {
|
||||
let elem = TestElem([10, 100, 1000, 10000]);
|
||||
|
||||
let mut tree = SumTree::new();
|
||||
// Cannot prune a nonexistant element
|
||||
assert!(!tree.prune(&elem));
|
||||
// Can add
|
||||
assert!(tree.push(elem));
|
||||
// Cannot double-add
|
||||
assert!(!tree.push(elem));
|
||||
// Can prune but not double-prune
|
||||
assert!(tree.prune(&elem));
|
||||
assert!(!tree.prune(&elem));
|
||||
// Can re-add
|
||||
assert!(tree.push(elem));
|
||||
}
|
||||
}
|
|
@ -17,10 +17,10 @@
|
|||
use byteorder::{ByteOrder, BigEndian};
|
||||
use secp::{self, Secp256k1, Message, Signature};
|
||||
use secp::pedersen::{RangeProof, Commitment};
|
||||
use std::ops;
|
||||
|
||||
use core::Committed;
|
||||
use core::MerkleRow;
|
||||
use core::hash::{Hash, Hashed};
|
||||
use core::pmmr::Summable;
|
||||
use ser::{self, Reader, Writer, Readable, Writeable};
|
||||
|
||||
bitflags! {
|
||||
|
@ -286,6 +286,10 @@ bitflags! {
|
|||
/// transferred. The commitment is a blinded value for the output while the
|
||||
/// range proof guarantees the commitment includes a positive value without
|
||||
/// overflow and the ownership of the private key.
|
||||
///
|
||||
/// The hash of an output only covers its features and commitment. The range
|
||||
/// proof is expected to have its own hash and is stored and committed to
|
||||
/// separately.
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Output {
|
||||
/// Options for an output's structure or use
|
||||
|
@ -343,12 +347,60 @@ impl Output {
|
|||
}
|
||||
}
|
||||
|
||||
/// Utility function to calculate the Merkle root of vectors of inputs and
|
||||
/// outputs.
|
||||
pub fn merkle_inputs_outputs(inputs: &Vec<Input>, outputs: &Vec<Output>) -> Hash {
|
||||
let mut all_hs = map_vec!(inputs, |inp| inp.hash());
|
||||
all_hs.append(&mut map_vec!(outputs, |out| out.hash()));
|
||||
MerkleRow::new(all_hs).root()
|
||||
/// Wrapper to Output commitments to provide the Summable trait.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SumCommit {
|
||||
/// Output commitment
|
||||
pub commit: Commitment,
|
||||
/// Secp256k1 used to sum
|
||||
pub secp: Secp256k1,
|
||||
}
|
||||
|
||||
/// Outputs get summed through their commitments.
|
||||
impl Summable for SumCommit {
|
||||
type Sum = SumCommit;
|
||||
|
||||
fn sum(&self) -> SumCommit {
|
||||
SumCommit {
|
||||
commit: self.commit.clone(),
|
||||
secp: self.secp.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
fn sum_len() -> usize {
|
||||
secp::constants::PEDERSEN_COMMITMENT_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for SumCommit {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
self.commit.write(writer)
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for SumCommit {
|
||||
fn read(reader: &mut Reader) -> Result<SumCommit, ser::Error> {
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
Ok(SumCommit {
|
||||
commit: Commitment::read(reader)?,
|
||||
secp: secp,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl ops::Add for SumCommit {
|
||||
type Output = SumCommit;
|
||||
|
||||
fn add(self, other: SumCommit) -> SumCommit {
|
||||
let sum = match self.secp.commit_sum(vec![self.commit.clone(), other.commit.clone()], vec![]) {
|
||||
Ok(s) => s,
|
||||
Err(_) => Commitment::from_vec(vec![1; 33]),
|
||||
};
|
||||
SumCommit {
|
||||
commit: sum,
|
||||
secp: self.secp,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn u64_to_32bytes(n: u64) -> [u8; 32] {
|
||||
|
|
|
@ -26,6 +26,7 @@ use global;
|
|||
/// fees and a height of zero.
|
||||
pub fn genesis() -> core::Block {
|
||||
let proof_size = global::proofsize();
|
||||
let empty_hash = [].hash();
|
||||
core::Block {
|
||||
header: core::BlockHeader {
|
||||
height: 0,
|
||||
|
@ -38,8 +39,9 @@ pub fn genesis() -> core::Block {
|
|||
},
|
||||
difficulty: Difficulty::from_num(MINIMUM_DIFFICULTY),
|
||||
total_difficulty: Difficulty::from_num(MINIMUM_DIFFICULTY),
|
||||
utxo_merkle: [].hash(),
|
||||
tx_merkle: [].hash(),
|
||||
utxo_root: empty_hash,
|
||||
range_proof_root: empty_hash,
|
||||
kernel_root: empty_hash,
|
||||
features: core::DEFAULT_BLOCK,
|
||||
nonce: global::get_genesis_nonce(),
|
||||
pow: core::Proof::zero(proof_size), // TODO get actual PoW solution
|
||||
|
|
|
@ -261,7 +261,6 @@ impl<'a> Reader for BinReader<'a> {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
impl Readable for Commitment {
|
||||
fn read(reader: &mut Reader) -> Result<Commitment, Error> {
|
||||
let a = try!(reader.read_fixed_bytes(PEDERSEN_COMMITMENT_SIZE));
|
||||
|
@ -273,6 +272,18 @@ impl Readable for Commitment {
|
|||
}
|
||||
}
|
||||
|
||||
impl Writeable for Commitment {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
writer.write_fixed_bytes(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for RangeProof {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
writer.write_fixed_bytes(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for RangeProof {
|
||||
fn read(reader: &mut Reader) -> Result<RangeProof, Error> {
|
||||
let p = try!(reader.read_limited_vec(MAX_PROOF_SIZE));
|
||||
|
|
115
doc/merkle.md
115
doc/merkle.md
|
@ -174,118 +174,3 @@ only binary tree, we can find a key in the tree by its insertion position. So a
|
|||
full index of keys inserted in the tree (i.e. an output commitment) to their
|
||||
insertion positions is also required.
|
||||
|
||||
### Sum Tree Disk Storage
|
||||
|
||||
The sum tree is split in chunks that are handled independently and stored in
|
||||
separate files.
|
||||
|
||||
3 G
|
||||
/ \
|
||||
2 M \
|
||||
/ \ \
|
||||
1 X Y \ ---- cutoff height H=1
|
||||
/ \ / \ \
|
||||
0 A B C D E
|
||||
|
||||
[----] [----]
|
||||
chunk1 chunk2
|
||||
|
||||
Each chunk is a full tree rooted at height H, lesser than R, the height of the
|
||||
tree root. Because our MMR is append-only, each chunk is guaranteed to never
|
||||
change on additions. The remaining nodes are captured in a root chunk that
|
||||
contains the top nodes (above H) in the MMR as well as the leftover nodes on
|
||||
its right side.
|
||||
|
||||
In the example above, we have 2 chunks X[A,B] and Y[C,D] and a root chunk
|
||||
G[M,E]. The cutoff height H=1 and the root height R=3.
|
||||
|
||||
Note that each non-root chunk is a complete and fully valid MMR sum tree in
|
||||
itself. The root chunk, with each chunk replaced with a single pruned node,
|
||||
is also a complete and fully valid MMR.
|
||||
|
||||
As new leaves get inserted in the tree, more chunks get extracted, reducing the
|
||||
size of the root chunk.
|
||||
|
||||
Assuming a cutoff height of H and a root height of R, the size (in nodes) of
|
||||
each chunk is:
|
||||
|
||||
chunk_size = 2^(H+1)-1
|
||||
|
||||
The maximum size of the root chunk is:
|
||||
|
||||
max_root_size = 2^(R-H)-1 + 2^(H+1)-2
|
||||
|
||||
If we set the cutoff height H=15 and assume a node size of 50 bytes, for a tree
|
||||
with a root at height 26 (capable of containing all Bitcoin UTXOs as this time)
|
||||
we obtain a chunk size of about 3.3MB (without pruning) and a maximum root chunk
|
||||
size of about 3.4MB.
|
||||
|
||||
### Tombstone Log
|
||||
|
||||
Deleting a leaf in a given tree can be expensive if done naively, especially
|
||||
if spread on multiple chunks that aren't stored in memory. It would require
|
||||
loading the affected chunks, removing the node (and possibly pruning parents)
|
||||
and re-saving the whole chunks back.
|
||||
|
||||
To avoid this, we maintain a simple append-only log of deletion operations that
|
||||
tombstone a given leaf node. When the tombstone log becomes too large, we can
|
||||
easily, in the background, apply it as a whole on affected chunks.
|
||||
|
||||
Note that our sum MMR never actually fully deletes a key (i.e. output
|
||||
commitment) as subsequent leaf nodes aren't shifted and parents don't need
|
||||
rebalancing. Deleting a node just makes its storage in the tree unnecessary,
|
||||
allowing for potential additional pruning of parent nodes.
|
||||
|
||||
### Key to Tree Insertion Position Index
|
||||
|
||||
For its operation, our sum MMR needs an index from key (i.e. an output
|
||||
commitment) to the position of that key in insertion order. From that
|
||||
position, the tree can be walked down to find the corresponding leaf node.
|
||||
|
||||
To hold that index without having to keep it all in memory, we store it in a
|
||||
fast KV store (rocksdb, a leveldb fork). This reduces the implementation effort
|
||||
while still keeping great performance. In the future we may adopt a more
|
||||
specialized storage to hold this index.
|
||||
|
||||
### Design Notes
|
||||
|
||||
We chose explicitly to not try to save the whole tree into a KV store. While
|
||||
this may sound attractive, mapping a sum tree structure onto a KV store is
|
||||
non-trivial. Having a separate storage mechanism for the MMR introduces
|
||||
multiple advantages:
|
||||
|
||||
* Storing all nodes in a KV store makes it impossible to fully separate
|
||||
the implementation of the tree and its persistence. The tree implementation
|
||||
gets more complex to include persistence concerns, making the whole system
|
||||
much harder to understand, debug and maintain.
|
||||
* The state of the tree is consensus critical. We want to minimize the
|
||||
dependency on 3rd party storages whose change in behavior could impact our
|
||||
consensus (the position index is less critical than the tree, being layered
|
||||
above).
|
||||
* The overall system can be simpler and faster: because of some particular
|
||||
properties of our MMR (append-only, same size keys, composable), the storage
|
||||
solution is actually rather straightforward and allows us to do multiple
|
||||
optimizations (i.e. bulk operations, no updates, etc.).
|
||||
|
||||
### Operations
|
||||
|
||||
We list here most main operations that the combined sum tree structure and its
|
||||
storage logic have to implement. Operations that have side-effects (push, prune,
|
||||
truncate) need to be reversible in case the result of the modification is deemed
|
||||
invalid (root or sum don't match).
|
||||
|
||||
* Bulk Push (new block):
|
||||
1. Partially clone last in-memory chunk (full subtrees will not change).
|
||||
2. Append all new hashes to the clone.
|
||||
3. New root hash and sum can be checked immediately.
|
||||
4. On commit, insert new hashes to position index, merge the clone in the
|
||||
latest in-memory chunk, save.
|
||||
* Prune (new block):
|
||||
1. On commit, delete from position index, add to append-only tombstone file.
|
||||
2. When append-only tombstone files becomes too large, apply fully and delete
|
||||
(in background).
|
||||
* Exists (new block or tx): directly check the key/position index.
|
||||
* Truncate (fork): usually combined with a bulk push.
|
||||
1. Partially clone truncated last (or before last) in-memory chunk (again, full subtrees before the truncation position will not change).
|
||||
2. Proceed with bulk push as normal.
|
||||
|
||||
|
|
|
@ -141,9 +141,8 @@ impl Miner {
|
|||
}
|
||||
}
|
||||
|
||||
/// Keeping this optional so setting in a separate funciton
|
||||
/// Keeping this optional so setting in a separate function
|
||||
/// instead of in the new function
|
||||
|
||||
pub fn set_debug_output_id(&mut self, debug_output_id: String) {
|
||||
self.debug_output_id = debug_output_id;
|
||||
}
|
||||
|
@ -538,6 +537,7 @@ impl Miner {
|
|||
b.header.nonce = rng.gen();
|
||||
b.header.difficulty = difficulty;
|
||||
b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0));
|
||||
self.chain.set_sumtree_roots(&mut b).expect("Error setting sum tree roots");
|
||||
b
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@ use rand;
|
|||
use std::fmt;
|
||||
|
||||
use core::core;
|
||||
use core::core::hash::Hashed;
|
||||
|
||||
/// An entry in the transaction pool.
|
||||
/// These are the vertices of both of the graph structures
|
||||
|
@ -214,7 +215,8 @@ impl DirectedGraph {
|
|||
/// proofs and any extra data the kernel may cover, but it is used initially
|
||||
/// for testing purposes.
|
||||
pub fn transaction_identifier(tx: &core::transaction::Transaction) -> core::hash::Hash {
|
||||
core::transaction::merkle_inputs_outputs(&tx.inputs, &tx.outputs)
|
||||
// core::transaction::merkle_inputs_outputs(&tx.inputs, &tx.outputs)
|
||||
tx.hash()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -8,6 +8,7 @@ workspace = ".."
|
|||
byteorder = "^0.5"
|
||||
env_logger="^0.3.5"
|
||||
log = "^0.3"
|
||||
libc = "^0.2"
|
||||
memmap = { git = "https://github.com/danburkert/memmap-rs" }
|
||||
rocksdb = "^0.7.0"
|
||||
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
|
||||
extern crate byteorder;
|
||||
extern crate grin_core as core;
|
||||
extern crate libc;
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
extern crate env_logger;
|
||||
|
|
|
@ -18,9 +18,12 @@ use memmap;
|
|||
use std::cmp;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, Write, BufReader, BufRead, ErrorKind};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
use std::io::Read;
|
||||
|
||||
use libc;
|
||||
|
||||
use core::core::pmmr::{self, Summable, Backend, HashSum, VecBackend};
|
||||
use core::ser;
|
||||
|
||||
|
@ -35,6 +38,10 @@ pub const RM_LOG_MAX_NODES: usize = 10000;
|
|||
/// which writes are append only. Reads are backed by a memory map (mmap(2)),
|
||||
/// relying on the operating system for fast access and caching. The memory
|
||||
/// map is reallocated to expand it when new writes are flushed.
|
||||
///
|
||||
/// Despite being append-only, the file can still be pruned and truncated. The
|
||||
/// former simply happens by rewriting it, ignoring some of the data. The
|
||||
/// latter by truncating the underlying file and re-creating the mmap.
|
||||
struct AppendOnlyFile {
|
||||
path: String,
|
||||
file: File,
|
||||
|
@ -54,9 +61,10 @@ impl AppendOnlyFile {
|
|||
file: file,
|
||||
mmap: None,
|
||||
};
|
||||
let file_path = Path::new(&path);
|
||||
if file_path.exists() {
|
||||
aof.sync();
|
||||
if let Ok(sz) = aof.size() {
|
||||
if sz > 0 {
|
||||
aof.sync()?;
|
||||
}
|
||||
}
|
||||
Ok(aof)
|
||||
}
|
||||
|
@ -127,6 +135,17 @@ impl AppendOnlyFile {
|
|||
}
|
||||
}
|
||||
|
||||
/// Truncates the underlying file to the provided offset
|
||||
fn truncate(&self, offs: u64) -> io::Result<()> {
|
||||
let fd = self.file.as_raw_fd();
|
||||
let res = unsafe { libc::ftruncate64(fd, offs as i64) };
|
||||
if res == -1 {
|
||||
Err(io::Error::last_os_error())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Current size of the file in bytes.
|
||||
fn size(&self) -> io::Result<u64> {
|
||||
fs::metadata(&self.path).map(|md| md.len())
|
||||
|
@ -140,9 +159,10 @@ impl AppendOnlyFile {
|
|||
/// MMR data file and truncate the remove log.
|
||||
struct RemoveLog {
|
||||
path: String,
|
||||
file: File,
|
||||
// Ordered vector of MMR positions that should get eventually removed.
|
||||
removed: Vec<u64>,
|
||||
removed: Vec<(u64, u32)>,
|
||||
// Holds positions temporarily until flush is called.
|
||||
removed_tmp: Vec<(u64, u32)>,
|
||||
}
|
||||
|
||||
impl RemoveLog {
|
||||
|
@ -150,40 +170,65 @@ impl RemoveLog {
|
|||
/// for fast checking.
|
||||
fn open(path: String) -> io::Result<RemoveLog> {
|
||||
let removed = read_ordered_vec(path.clone())?;
|
||||
let file = OpenOptions::new().append(true).create(true).open(path.clone())?;
|
||||
Ok(RemoveLog {
|
||||
path: path,
|
||||
file: file,
|
||||
removed: removed,
|
||||
removed_tmp: vec![],
|
||||
})
|
||||
}
|
||||
|
||||
/// Truncate and empties the remove log.
|
||||
fn truncate(&mut self) -> io::Result<()> {
|
||||
self.removed = vec![];
|
||||
self.file = File::create(self.path.clone())?;
|
||||
fn truncate(&mut self, last_offs: u32) -> io::Result<()> {
|
||||
// simplifying assumption: we always remove older than what's in tmp
|
||||
self.removed_tmp = vec![];
|
||||
|
||||
if last_offs == 0 {
|
||||
self.removed = vec![];
|
||||
} else {
|
||||
self.removed = self.removed.iter().filter(|&&(_, idx)| { idx < last_offs }).map(|x| *x).collect();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Append a set of new positions to the remove log. Both adds those
|
||||
/// positions
|
||||
/// to the ordered in-memory set and to the file.
|
||||
fn append(&mut self, elmts: Vec<u64>) -> io::Result<()> {
|
||||
/// positions the ordered in-memory set and to the file.
|
||||
fn append(&mut self, elmts: Vec<u64>, index: u32) -> io::Result<()> {
|
||||
for elmt in elmts {
|
||||
match self.removed.binary_search(&elmt) {
|
||||
match self.removed_tmp.binary_search(&(elmt, index)) {
|
||||
Ok(_) => continue,
|
||||
Err(idx) => {
|
||||
self.file.write_all(&ser::ser_vec(&elmt).unwrap()[..])?;
|
||||
self.removed.insert(idx, elmt);
|
||||
self.removed_tmp.insert(idx, (elmt, index));
|
||||
}
|
||||
}
|
||||
}
|
||||
self.file.sync_data()
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Flush the positions to remove to file.
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
let mut file = File::create(self.path.clone())?;
|
||||
for elmt in &self.removed_tmp {
|
||||
match self.removed.binary_search(&elmt) {
|
||||
Ok(_) => continue,
|
||||
Err(idx) => {
|
||||
file.write_all(&ser::ser_vec(&elmt).unwrap()[..])?;
|
||||
self.removed.insert(idx, *elmt);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.removed_tmp = vec![];
|
||||
file.sync_data()
|
||||
}
|
||||
|
||||
/// Discard pending changes
|
||||
fn discard(&mut self) {
|
||||
self.removed_tmp = vec![];
|
||||
}
|
||||
|
||||
/// Whether the remove log currently includes the provided position.
|
||||
fn includes(&self, elmt: u64) -> bool {
|
||||
self.removed.binary_search(&elmt).is_ok()
|
||||
include_tuple(&self.removed, elmt) ||
|
||||
include_tuple(&self.removed_tmp, elmt)
|
||||
}
|
||||
|
||||
/// Number of positions stored in the remove log.
|
||||
|
@ -192,6 +237,15 @@ impl RemoveLog {
|
|||
}
|
||||
}
|
||||
|
||||
fn include_tuple(v: &Vec<(u64, u32)>, e: u64) -> bool {
|
||||
if let Err(pos) = v.binary_search(&(e, 0)) {
|
||||
if pos > 0 && v[pos-1].0 == e {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// PMMR persistent backend implementation. Relies on multiple facilities to
|
||||
/// handle writing, reading and pruning.
|
||||
///
|
||||
|
@ -213,6 +267,9 @@ where
|
|||
// buffers addition of new elements until they're fully written to disk
|
||||
buffer: VecBackend<T>,
|
||||
buffer_index: usize,
|
||||
// whether a rewind occurred since last flush, the rewind position, index
|
||||
// and buffer index are captured
|
||||
rewind: Option<(u64, u32, usize)>,
|
||||
}
|
||||
|
||||
impl<T> Backend<T> for PMMRBackend<T>
|
||||
|
@ -226,14 +283,6 @@ where
|
|||
position - (self.buffer_index as u64),
|
||||
data.clone(),
|
||||
)?;
|
||||
for hs in data {
|
||||
if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) {
|
||||
return Err(format!(
|
||||
"Could not write to log storage, disk full? {:?}",
|
||||
e
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -241,7 +290,7 @@ where
|
|||
fn get(&self, position: u64) -> Option<HashSum<T>> {
|
||||
// First, check if it's in our temporary write buffer
|
||||
let pos_sz = position as usize;
|
||||
if pos_sz - 1 >= self.buffer_index && pos_sz - 1 < self.buffer_index + self.buffer.len() {
|
||||
if pos_sz > self.buffer_index && pos_sz - 1 < self.buffer_index + self.buffer.len() {
|
||||
return self.buffer.get((pos_sz - self.buffer_index) as u64);
|
||||
}
|
||||
|
||||
|
@ -275,12 +324,25 @@ where
|
|||
}
|
||||
}
|
||||
|
||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||
assert!(self.buffer.len() == 0, "Rewind on non empty buffer.");
|
||||
self.remove_log.truncate(index).map_err(|e| format!("Could not truncate remove log: {}", e))?;
|
||||
self.rewind = Some((position, index, self.buffer_index));
|
||||
self.buffer_index = position as usize;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove HashSums by insertion position
|
||||
fn remove(&mut self, positions: Vec<u64>) -> Result<(), String> {
|
||||
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> {
|
||||
if self.buffer.used_size() > 0 {
|
||||
self.buffer.remove(positions.clone()).unwrap();
|
||||
for position in &positions {
|
||||
let pos_sz = *position as usize;
|
||||
if pos_sz > self.buffer_index && pos_sz - 1 < self.buffer_index + self.buffer.len() {
|
||||
self.buffer.remove(vec![*position], index).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
self.remove_log.append(positions).map_err(|e| {
|
||||
self.remove_log.append(positions, index).map_err(|e| {
|
||||
format!("Could not write to log storage, disk full? {:?}", e)
|
||||
})
|
||||
}
|
||||
|
@ -306,16 +368,55 @@ where
|
|||
buffer: VecBackend::new(),
|
||||
buffer_index: (sz as usize) / record_len,
|
||||
pruned_nodes: pmmr::PruneList{pruned_nodes: prune_list},
|
||||
rewind: None,
|
||||
})
|
||||
}
|
||||
|
||||
/// Total size of the PMMR stored by this backend. Only produces the fully
|
||||
/// sync'd size.
|
||||
pub fn unpruned_size(&self) -> io::Result<u64> {
|
||||
let total_shift = self.pruned_nodes.get_shift(::std::u64::MAX).unwrap();
|
||||
let rm_len = self.remove_log.len() as u64;
|
||||
let record_len = 32 + T::sum_len() as u64;
|
||||
let sz = self.hashsum_file.size()?;
|
||||
Ok(sz / record_len + rm_len + total_shift)
|
||||
}
|
||||
|
||||
/// Syncs all files to disk. A call to sync is required to ensure all the
|
||||
/// data has been successfully written to disk.
|
||||
pub fn sync(&mut self) -> io::Result<()> {
|
||||
// truncating the storage file if a rewind occurred
|
||||
if let Some((pos, _, _)) = self.rewind {
|
||||
let record_len = 32 + T::sum_len() as u64;
|
||||
self.hashsum_file.truncate(pos * record_len)?;
|
||||
}
|
||||
for elem in &self.buffer.elems {
|
||||
if let Some(ref hs) = *elem {
|
||||
if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Interrupted,
|
||||
format!("Could not write to log storage, disk full? {:?}", e)
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.buffer_index = self.buffer_index + self.buffer.len();
|
||||
self.buffer.clear();
|
||||
self.remove_log.flush()?;
|
||||
self.hashsum_file.sync()?;
|
||||
self.rewind = None;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
self.hashsum_file.sync()
|
||||
/// Discard the current, non synced state of the backend.
|
||||
pub fn discard(&mut self) {
|
||||
if let Some((_, _, bi)) = self.rewind {
|
||||
self.buffer_index = bi;
|
||||
}
|
||||
self.buffer = VecBackend::new();
|
||||
self.remove_log.discard();
|
||||
self.rewind = None;
|
||||
}
|
||||
|
||||
/// Checks the length of the remove log to see if it should get compacted.
|
||||
|
@ -326,6 +427,9 @@ where
|
|||
/// If a max_len strictly greater than 0 is provided, the value will be used
|
||||
/// to decide whether the remove log has reached its maximum length,
|
||||
/// otherwise the RM_LOG_MAX_NODES default value is used.
|
||||
///
|
||||
/// TODO whatever is calling this should also clean up the commit to position
|
||||
/// index in db
|
||||
pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> {
|
||||
if !(max_len > 0 && self.remove_log.len() > max_len ||
|
||||
max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES) {
|
||||
|
@ -335,7 +439,7 @@ where
|
|||
// 0. validate none of the nodes in the rm log are in the prune list (to
|
||||
// avoid accidental double compaction)
|
||||
for pos in &self.remove_log.removed[..] {
|
||||
if let None = self.pruned_nodes.pruned_pos(*pos) {
|
||||
if let None = self.pruned_nodes.pruned_pos(pos.0) {
|
||||
// TODO we likely can recover from this by directly jumping to 3
|
||||
error!("The remove log contains nodes that are already in the pruned \
|
||||
list, a previous compaction likely failed.");
|
||||
|
@ -347,15 +451,15 @@ where
|
|||
// remove list
|
||||
let tmp_prune_file = format!("{}/{}.prune", self.data_dir, PMMR_DATA_FILE);
|
||||
let record_len = (32 + T::sum_len()) as u64;
|
||||
let to_rm = self.remove_log.removed.iter().map(|pos| {
|
||||
let shift = self.pruned_nodes.get_shift(*pos);
|
||||
(*pos - 1 - shift.unwrap()) * record_len
|
||||
let to_rm = self.remove_log.removed.iter().map(|&(pos, _)| {
|
||||
let shift = self.pruned_nodes.get_shift(pos);
|
||||
(pos - 1 - shift.unwrap()) * record_len
|
||||
}).collect();
|
||||
self.hashsum_file.save_prune(tmp_prune_file.clone(), to_rm, record_len)?;
|
||||
|
||||
// 2. update the prune list and save it in place
|
||||
for rm_pos in &self.remove_log.removed[..] {
|
||||
self.pruned_nodes.add(*rm_pos);
|
||||
for &(rm_pos, _) in &self.remove_log.removed[..] {
|
||||
self.pruned_nodes.add(rm_pos);
|
||||
}
|
||||
write_vec(format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE), &self.pruned_nodes.pruned_nodes)?;
|
||||
|
||||
|
@ -365,7 +469,8 @@ where
|
|||
self.hashsum_file.sync()?;
|
||||
|
||||
// 4. truncate the rm log
|
||||
self.remove_log.truncate()?;
|
||||
self.remove_log.truncate(0)?;
|
||||
self.remove_log.flush()?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -80,9 +80,9 @@ fn sumtree_prune_compact() {
|
|||
// pruning some choice nodes
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(1);
|
||||
pmmr.prune(4);
|
||||
pmmr.prune(5);
|
||||
pmmr.prune(1).unwrap();
|
||||
pmmr.prune(4).unwrap();
|
||||
pmmr.prune(5).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
|
||||
|
@ -118,8 +118,8 @@ fn sumtree_reload() {
|
|||
{
|
||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
root = pmmr.root();
|
||||
pmmr.prune(1);
|
||||
pmmr.prune(4);
|
||||
pmmr.prune(1).unwrap();
|
||||
pmmr.prune(4).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
backend.check_compact(1).unwrap();
|
||||
|
@ -128,7 +128,7 @@ fn sumtree_reload() {
|
|||
// prune some more to get rm log data
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(5);
|
||||
pmmr.prune(5).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
}
|
||||
|
@ -169,7 +169,7 @@ fn load(pos: u64, elems: &[TestElem],
|
|||
|
||||
let mut pmmr = PMMR::at(backend, pos);
|
||||
for elem in elems {
|
||||
pmmr.push(elem.clone());
|
||||
pmmr.push(elem.clone()).unwrap();
|
||||
}
|
||||
pmmr.unpruned_size()
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue