mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-01 17:01:09 +03:00
Take the 'Sum' out of 'Sumtree' (#702)
* beginning to remove sum * continuing to remove sumtree sums * finished removing sums from pmmr core * renamed sumtree files, and completed changes+test updates in core and store * updating grin/chain to include removelogs * integration of flatfile structure, changes to chain/sumtree to start using them * tests on chain, core and store passing * cleaning up api and tests * formatting * flatfiles stored as part of PMMR backend instead * all compiling and tests running * documentation * added remove + pruning to flatfiles * remove unneeded enum * adding sumtree root struct
This commit is contained in:
parent
c2ca6ad03f
commit
05d1c6c817
17 changed files with 893 additions and 879 deletions
|
@ -206,13 +206,13 @@ impl Handler for UtxoHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sum tree handler. Retrieve the roots:
|
// Sum tree handler. Retrieve the roots:
|
||||||
// GET /v1/sumtrees/roots
|
// GET /v1/pmmrtrees/roots
|
||||||
//
|
//
|
||||||
// Last inserted nodes::
|
// Last inserted nodes::
|
||||||
// GET /v1/sumtrees/lastutxos (gets last 10)
|
// GET /v1/pmmrtrees/lastutxos (gets last 10)
|
||||||
// GET /v1/sumtrees/lastutxos?n=5
|
// GET /v1/pmmrtrees/lastutxos?n=5
|
||||||
// GET /v1/sumtrees/lastrangeproofs
|
// GET /v1/pmmrtrees/lastrangeproofs
|
||||||
// GET /v1/sumtrees/lastkernels
|
// GET /v1/pmmrtrees/lastkernels
|
||||||
struct SumTreeHandler {
|
struct SumTreeHandler {
|
||||||
chain: Weak<chain::Chain>,
|
chain: Weak<chain::Chain>,
|
||||||
}
|
}
|
||||||
|
@ -224,18 +224,18 @@ impl SumTreeHandler {
|
||||||
}
|
}
|
||||||
|
|
||||||
// gets last n utxos inserted in to the tree
|
// gets last n utxos inserted in to the tree
|
||||||
fn get_last_n_utxo(&self, distance: u64) -> Vec<SumTreeNode> {
|
fn get_last_n_utxo(&self, distance: u64) -> Vec<PmmrTreeNode> {
|
||||||
SumTreeNode::get_last_n_utxo(w(&self.chain), distance)
|
PmmrTreeNode::get_last_n_utxo(w(&self.chain), distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
// gets last n utxos inserted in to the tree
|
// gets last n utxos inserted in to the tree
|
||||||
fn get_last_n_rangeproof(&self, distance: u64) -> Vec<SumTreeNode> {
|
fn get_last_n_rangeproof(&self, distance: u64) -> Vec<PmmrTreeNode> {
|
||||||
SumTreeNode::get_last_n_rangeproof(w(&self.chain), distance)
|
PmmrTreeNode::get_last_n_rangeproof(w(&self.chain), distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
// gets last n utxos inserted in to the tree
|
// gets last n utxos inserted in to the tree
|
||||||
fn get_last_n_kernel(&self, distance: u64) -> Vec<SumTreeNode> {
|
fn get_last_n_kernel(&self, distance: u64) -> Vec<PmmrTreeNode> {
|
||||||
SumTreeNode::get_last_n_kernel(w(&self.chain), distance)
|
PmmrTreeNode::get_last_n_kernel(w(&self.chain), distance)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -620,10 +620,10 @@ pub fn start_rest_apis<T>(
|
||||||
"get chain".to_string(),
|
"get chain".to_string(),
|
||||||
"get chain/utxos".to_string(),
|
"get chain/utxos".to_string(),
|
||||||
"get status".to_string(),
|
"get status".to_string(),
|
||||||
"get sumtrees/roots".to_string(),
|
"get pmmrtrees/roots".to_string(),
|
||||||
"get sumtrees/lastutxos?n=10".to_string(),
|
"get pmmrtrees/lastutxos?n=10".to_string(),
|
||||||
"get sumtrees/lastrangeproofs".to_string(),
|
"get pmmrtrees/lastrangeproofs".to_string(),
|
||||||
"get sumtrees/lastkernels".to_string(),
|
"get pmmrtrees/lastkernels".to_string(),
|
||||||
"get pool".to_string(),
|
"get pool".to_string(),
|
||||||
"post pool/push".to_string(),
|
"post pool/push".to_string(),
|
||||||
"post peers/a.b.c.d:p/ban".to_string(),
|
"post peers/a.b.c.d:p/ban".to_string(),
|
||||||
|
@ -641,7 +641,7 @@ pub fn start_rest_apis<T>(
|
||||||
chain_tip: get "/chain" => chain_tip_handler,
|
chain_tip: get "/chain" => chain_tip_handler,
|
||||||
chain_utxos: get "/chain/utxos/*" => utxo_handler,
|
chain_utxos: get "/chain/utxos/*" => utxo_handler,
|
||||||
status: get "/status" => status_handler,
|
status: get "/status" => status_handler,
|
||||||
sumtree_roots: get "/sumtrees/*" => sumtree_handler,
|
sumtree_roots: get "/pmmrtrees/*" => sumtree_handler,
|
||||||
pool_info: get "/pool" => pool_info_handler,
|
pool_info: get "/pool" => pool_info_handler,
|
||||||
pool_push: post "/pool/push" => pool_push_handler,
|
pool_push: post "/pool/push" => pool_push_handler,
|
||||||
peers_all: get "/peers/all" => peers_all_handler,
|
peers_all: get "/peers/all" => peers_all_handler,
|
||||||
|
|
|
@ -16,7 +16,6 @@ use std::sync::Arc;
|
||||||
|
|
||||||
use core::{core, ser};
|
use core::{core, ser};
|
||||||
use core::core::hash::Hashed;
|
use core::core::hash::Hashed;
|
||||||
use core::core::SumCommit;
|
|
||||||
use core::core::SwitchCommitHash;
|
use core::core::SwitchCommitHash;
|
||||||
use chain;
|
use chain;
|
||||||
use p2p;
|
use p2p;
|
||||||
|
@ -89,8 +88,6 @@ impl Status {
|
||||||
pub struct SumTrees {
|
pub struct SumTrees {
|
||||||
/// UTXO Root Hash
|
/// UTXO Root Hash
|
||||||
pub utxo_root_hash: String,
|
pub utxo_root_hash: String,
|
||||||
// UTXO Root Sum
|
|
||||||
pub utxo_root_sum: String,
|
|
||||||
// Rangeproof root hash
|
// Rangeproof root hash
|
||||||
pub range_proof_root_hash: String,
|
pub range_proof_root_hash: String,
|
||||||
// Kernel set root hash
|
// Kernel set root hash
|
||||||
|
@ -101,10 +98,9 @@ impl SumTrees {
|
||||||
pub fn from_head(head: Arc<chain::Chain>) -> SumTrees {
|
pub fn from_head(head: Arc<chain::Chain>) -> SumTrees {
|
||||||
let roots = head.get_sumtree_roots();
|
let roots = head.get_sumtree_roots();
|
||||||
SumTrees {
|
SumTrees {
|
||||||
utxo_root_hash: roots.0.hash.to_hex(),
|
utxo_root_hash: roots.0.to_hex(),
|
||||||
utxo_root_sum: roots.0.sum.to_hex(),
|
range_proof_root_hash: roots.1.to_hex(),
|
||||||
range_proof_root_hash: roots.1.hash.to_hex(),
|
kernel_root_hash: roots.2.to_hex(),
|
||||||
kernel_root_hash: roots.2.hash.to_hex(),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -112,45 +108,40 @@ impl SumTrees {
|
||||||
/// Wrapper around a list of sumtree nodes, so it can be
|
/// Wrapper around a list of sumtree nodes, so it can be
|
||||||
/// presented properly via json
|
/// presented properly via json
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||||
pub struct SumTreeNode {
|
pub struct PmmrTreeNode {
|
||||||
// The hash
|
// The hash
|
||||||
pub hash: String,
|
pub hash: String,
|
||||||
// SumCommit (features|commitment), optional (only for utxos)
|
|
||||||
pub sum: Option<SumCommit>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SumTreeNode {
|
impl PmmrTreeNode {
|
||||||
pub fn get_last_n_utxo(chain: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
|
pub fn get_last_n_utxo(chain: Arc<chain::Chain>, distance: u64) -> Vec<PmmrTreeNode> {
|
||||||
let mut return_vec = Vec::new();
|
let mut return_vec = Vec::new();
|
||||||
let last_n = chain.get_last_n_utxo(distance);
|
let last_n = chain.get_last_n_utxo(distance);
|
||||||
for x in last_n {
|
for x in last_n {
|
||||||
return_vec.push(SumTreeNode {
|
return_vec.push(PmmrTreeNode {
|
||||||
hash: util::to_hex(x.hash.to_vec()),
|
hash: util::to_hex(x.0.to_vec()),
|
||||||
sum: Some(x.sum),
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return_vec
|
return_vec
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_last_n_rangeproof(head: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
|
pub fn get_last_n_rangeproof(head: Arc<chain::Chain>, distance: u64) -> Vec<PmmrTreeNode> {
|
||||||
let mut return_vec = Vec::new();
|
let mut return_vec = Vec::new();
|
||||||
let last_n = head.get_last_n_rangeproof(distance);
|
let last_n = head.get_last_n_rangeproof(distance);
|
||||||
for elem in last_n {
|
for elem in last_n {
|
||||||
return_vec.push(SumTreeNode {
|
return_vec.push(PmmrTreeNode {
|
||||||
hash: util::to_hex(elem.hash.to_vec()),
|
hash: util::to_hex(elem.0.to_vec()),
|
||||||
sum: None,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return_vec
|
return_vec
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn get_last_n_kernel(head: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
|
pub fn get_last_n_kernel(head: Arc<chain::Chain>, distance: u64) -> Vec<PmmrTreeNode> {
|
||||||
let mut return_vec = Vec::new();
|
let mut return_vec = Vec::new();
|
||||||
let last_n = head.get_last_n_kernel(distance);
|
let last_n = head.get_last_n_kernel(distance);
|
||||||
for elem in last_n {
|
for elem in last_n {
|
||||||
return_vec.push(SumTreeNode {
|
return_vec.push(PmmrTreeNode {
|
||||||
hash: util::to_hex(elem.hash.to_vec()),
|
hash: util::to_hex(elem.0.to_vec()),
|
||||||
sum: None,
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
return_vec
|
return_vec
|
||||||
|
|
|
@ -20,21 +20,18 @@ use std::fs::File;
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, Mutex, RwLock};
|
||||||
use std::time::{Duration, Instant};
|
use std::time::{Duration, Instant};
|
||||||
|
|
||||||
use util::secp::pedersen::RangeProof;
|
use core::core::{Input, OutputIdentifier, OutputStoreable, TxKernel};
|
||||||
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::{Input, OutputIdentifier, SumCommit};
|
|
||||||
use core::core::hash::Hashed;
|
|
||||||
use core::core::pmmr::{HashSum, NoSum};
|
|
||||||
use core::global;
|
use core::global;
|
||||||
|
|
||||||
use core::core::{Block, BlockHeader, TxKernel};
|
use core::core::{Block, BlockHeader};
|
||||||
use core::core::target::Difficulty;
|
use core::core::target::Difficulty;
|
||||||
use core::core::hash::Hash;
|
|
||||||
use grin_store::Error::NotFoundErr;
|
use grin_store::Error::NotFoundErr;
|
||||||
use pipe;
|
use pipe;
|
||||||
use store;
|
use store;
|
||||||
use sumtree;
|
use sumtree;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
use util::secp::pedersen::RangeProof;
|
||||||
use util::LOGGER;
|
use util::LOGGER;
|
||||||
|
|
||||||
|
|
||||||
|
@ -428,9 +425,9 @@ impl Chain {
|
||||||
Ok(extension.roots())
|
Ok(extension.roots())
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
b.header.utxo_root = roots.0.hash;
|
b.header.utxo_root = roots.utxo_root;
|
||||||
b.header.range_proof_root = roots.1.hash;
|
b.header.range_proof_root = roots.rproof_root;
|
||||||
b.header.kernel_root = roots.2.hash;
|
b.header.kernel_root = roots.kernel_root;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -438,9 +435,9 @@ impl Chain {
|
||||||
pub fn get_sumtree_roots(
|
pub fn get_sumtree_roots(
|
||||||
&self,
|
&self,
|
||||||
) -> (
|
) -> (
|
||||||
HashSum<SumCommit>,
|
Hash,
|
||||||
HashSum<NoSum<RangeProof>>,
|
Hash,
|
||||||
HashSum<NoSum<TxKernel>>,
|
Hash,
|
||||||
) {
|
) {
|
||||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||||
sumtrees.roots()
|
sumtrees.roots()
|
||||||
|
@ -507,7 +504,7 @@ impl Chain {
|
||||||
{
|
{
|
||||||
let mut head = self.head.lock().unwrap();
|
let mut head = self.head.lock().unwrap();
|
||||||
*head = Tip::from_block(&header);
|
*head = Tip::from_block(&header);
|
||||||
self.store.save_body_head(&head)?;
|
let _ = self.store.save_body_head(&head);
|
||||||
self.store.save_header_height(&header)?;
|
self.store.save_header_height(&header)?;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -517,19 +514,19 @@ impl Chain {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// returns the last n nodes inserted into the utxo sum tree
|
/// returns the last n nodes inserted into the utxo sum tree
|
||||||
pub fn get_last_n_utxo(&self, distance: u64) -> Vec<HashSum<SumCommit>> {
|
pub fn get_last_n_utxo(&self, distance: u64) -> Vec<(Hash, Option<OutputStoreable>)> {
|
||||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||||
sumtrees.last_n_utxo(distance)
|
sumtrees.last_n_utxo(distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// as above, for rangeproofs
|
/// as above, for rangeproofs
|
||||||
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<HashSum<NoSum<RangeProof>>> {
|
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, Option<RangeProof>)> {
|
||||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||||
sumtrees.last_n_rangeproof(distance)
|
sumtrees.last_n_rangeproof(distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// as above, for kernels
|
/// as above, for kernels
|
||||||
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<HashSum<NoSum<TxKernel>>> {
|
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, Option<TxKernel>)> {
|
||||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||||
sumtrees.last_n_kernel(distance)
|
sumtrees.last_n_kernel(distance)
|
||||||
}
|
}
|
||||||
|
|
|
@ -305,28 +305,28 @@ fn validate_block(
|
||||||
// apply the new block to the MMR trees and check the new root hashes
|
// apply the new block to the MMR trees and check the new root hashes
|
||||||
ext.apply_block(&b)?;
|
ext.apply_block(&b)?;
|
||||||
|
|
||||||
let (utxo_root, rproof_root, kernel_root) = ext.roots();
|
let roots = ext.roots();
|
||||||
if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root
|
if roots.utxo_root != b.header.utxo_root || roots.rproof_root != b.header.range_proof_root
|
||||||
|| kernel_root.hash != b.header.kernel_root
|
|| roots.kernel_root != b.header.kernel_root
|
||||||
{
|
{
|
||||||
ext.dump(false);
|
ext.dump(false);
|
||||||
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"validate_block: utxo roots - {:?}, {:?}",
|
"validate_block: utxo roots - {:?}, {:?}",
|
||||||
utxo_root.hash,
|
roots.utxo_root,
|
||||||
b.header.utxo_root,
|
b.header.utxo_root,
|
||||||
);
|
);
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"validate_block: rproof roots - {:?}, {:?}",
|
"validate_block: rproof roots - {:?}, {:?}",
|
||||||
rproof_root.hash,
|
roots.rproof_root,
|
||||||
b.header.range_proof_root,
|
b.header.range_proof_root,
|
||||||
);
|
);
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"validate_block: kernel roots - {:?}, {:?}",
|
"validate_block: kernel roots - {:?}, {:?}",
|
||||||
kernel_root.hash,
|
roots.kernel_root,
|
||||||
b.header.kernel_root,
|
b.header.kernel_root,
|
||||||
);
|
);
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,3 @@
|
||||||
|
|
||||||
//
|
//
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
|
@ -26,26 +25,26 @@ use util::static_secp_instance;
|
||||||
use util::secp::pedersen::{RangeProof, Commitment};
|
use util::secp::pedersen::{RangeProof, Commitment};
|
||||||
|
|
||||||
use core::consensus::reward;
|
use core::consensus::reward;
|
||||||
use core::core::{Block, BlockHeader, SumCommit, Input, Output, OutputIdentifier, OutputFeatures, TxKernel};
|
use core::core::{Block, BlockHeader, Input, Output, OutputIdentifier,
|
||||||
use core::core::pmmr::{self, HashSum, NoSum, Summable, PMMR};
|
OutputFeatures, OutputStoreable, TxKernel};
|
||||||
use core::core::hash::Hashed;
|
use core::core::pmmr::{self, PMMR};
|
||||||
use core::ser;
|
use core::core::hash::{Hash, Hashed};
|
||||||
|
use core::ser::{self, PMMRable};
|
||||||
|
|
||||||
use grin_store;
|
use grin_store;
|
||||||
use grin_store::sumtree::{PMMRBackend, AppendOnlyFile};
|
use grin_store::pmmr::PMMRBackend;
|
||||||
use types::ChainStore;
|
use types::{ChainStore, SumTreeRoots, Error};
|
||||||
use types::Error;
|
|
||||||
use util::{LOGGER, zip};
|
use util::{LOGGER, zip};
|
||||||
|
|
||||||
const SUMTREES_SUBDIR: &'static str = "sumtrees";
|
const SUMTREES_SUBDIR: &'static str = "sumtrees";
|
||||||
const UTXO_SUBDIR: &'static str = "utxo";
|
const UTXO_SUBDIR: &'static str = "utxo";
|
||||||
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
|
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
|
||||||
const KERNEL_SUBDIR: &'static str = "kernel";
|
const KERNEL_SUBDIR: &'static str = "kernel";
|
||||||
const KERNEL_FILE: &'static str = "kernel_full_data.bin";
|
|
||||||
const SUMTREES_ZIP: &'static str = "sumtrees_snapshot.zip";
|
const SUMTREES_ZIP: &'static str = "sumtrees_snapshot.zip";
|
||||||
|
|
||||||
struct PMMRHandle<T>
|
struct PMMRHandle<T>
|
||||||
where
|
where
|
||||||
T: Summable + Clone,
|
T: PMMRable,
|
||||||
{
|
{
|
||||||
backend: PMMRBackend<T>,
|
backend: PMMRBackend<T>,
|
||||||
last_pos: u64,
|
last_pos: u64,
|
||||||
|
@ -53,7 +52,7 @@ where
|
||||||
|
|
||||||
impl<T> PMMRHandle<T>
|
impl<T> PMMRHandle<T>
|
||||||
where
|
where
|
||||||
T: Summable + Clone,
|
T: PMMRable,
|
||||||
{
|
{
|
||||||
fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> {
|
fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> {
|
||||||
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
|
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
|
||||||
|
@ -70,20 +69,17 @@ where
|
||||||
/// An easy to manipulate structure holding the 3 sum trees necessary to
|
/// An easy to manipulate structure holding the 3 sum trees necessary to
|
||||||
/// validate blocks and capturing the UTXO set, the range proofs and the
|
/// validate blocks and capturing the UTXO set, the range proofs and the
|
||||||
/// kernels. Also handles the index of Commitments to positions in the
|
/// kernels. Also handles the index of Commitments to positions in the
|
||||||
/// output and range proof sum trees.
|
/// output and range proof pmmr trees.
|
||||||
///
|
///
|
||||||
/// Note that the index is never authoritative, only the trees are
|
/// Note that the index is never authoritative, only the trees are
|
||||||
/// guaranteed to indicate whether an output is spent or not. The index
|
/// guaranteed to indicate whether an output is spent or not. The index
|
||||||
/// may have commitments that have already been spent, even with
|
/// may have commitments that have already been spent, even with
|
||||||
/// pruning enabled.
|
/// pruning enabled.
|
||||||
///
|
|
||||||
/// In addition of the sumtrees, this maintains the full list of kernel
|
|
||||||
/// data so it can be easily packaged for sync or validation.
|
|
||||||
pub struct SumTrees {
|
pub struct SumTrees {
|
||||||
output_pmmr_h: PMMRHandle<SumCommit>,
|
utxo_pmmr_h: PMMRHandle<OutputStoreable>,
|
||||||
rproof_pmmr_h: PMMRHandle<NoSum<RangeProof>>,
|
rproof_pmmr_h: PMMRHandle<RangeProof>,
|
||||||
kernel_pmmr_h: PMMRHandle<NoSum<TxKernel>>,
|
kernel_pmmr_h: PMMRHandle<TxKernel>,
|
||||||
kernel_file: AppendOnlyFile,
|
|
||||||
|
|
||||||
// chain store used as index of commitments to MMR positions
|
// chain store used as index of commitments to MMR positions
|
||||||
commit_index: Arc<ChainStore>,
|
commit_index: Arc<ChainStore>,
|
||||||
|
@ -92,16 +88,20 @@ pub struct SumTrees {
|
||||||
impl SumTrees {
|
impl SumTrees {
|
||||||
/// Open an existing or new set of backends for the SumTrees
|
/// Open an existing or new set of backends for the SumTrees
|
||||||
pub fn open(root_dir: String, commit_index: Arc<ChainStore>) -> Result<SumTrees, Error> {
|
pub fn open(root_dir: String, commit_index: Arc<ChainStore>) -> Result<SumTrees, Error> {
|
||||||
let mut kernel_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, KERNEL_SUBDIR].iter().collect();
|
|
||||||
|
let utxo_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, UTXO_SUBDIR].iter().collect();
|
||||||
|
fs::create_dir_all(utxo_file_path.clone())?;
|
||||||
|
|
||||||
|
let rproof_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, RANGE_PROOF_SUBDIR].iter().collect();
|
||||||
|
fs::create_dir_all(rproof_file_path.clone())?;
|
||||||
|
|
||||||
|
let kernel_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, KERNEL_SUBDIR].iter().collect();
|
||||||
fs::create_dir_all(kernel_file_path.clone())?;
|
fs::create_dir_all(kernel_file_path.clone())?;
|
||||||
kernel_file_path.push(KERNEL_FILE);
|
|
||||||
let kernel_file = AppendOnlyFile::open(kernel_file_path.to_str().unwrap().to_owned())?;
|
|
||||||
|
|
||||||
Ok(SumTrees {
|
Ok(SumTrees {
|
||||||
output_pmmr_h: PMMRHandle::new(root_dir.clone(), UTXO_SUBDIR)?,
|
utxo_pmmr_h: PMMRHandle::new(root_dir.clone(), UTXO_SUBDIR)?,
|
||||||
rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR)?,
|
rproof_pmmr_h: PMMRHandle::new(root_dir.clone(), RANGE_PROOF_SUBDIR)?,
|
||||||
kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR)?,
|
kernel_pmmr_h: PMMRHandle::new(root_dir.clone(), KERNEL_SUBDIR)?,
|
||||||
kernel_file: kernel_file,
|
|
||||||
commit_index: commit_index,
|
commit_index: commit_index,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
@ -109,19 +109,19 @@ impl SumTrees {
|
||||||
/// Check is an output is unspent.
|
/// Check is an output is unspent.
|
||||||
/// We look in the index to find the output MMR pos.
|
/// We look in the index to find the output MMR pos.
|
||||||
/// Then we check the entry in the output MMR and confirm the hash matches.
|
/// Then we check the entry in the output MMR and confirm the hash matches.
|
||||||
pub fn is_unspent(&mut self, output: &OutputIdentifier) -> Result<(), Error> {
|
pub fn is_unspent(&mut self, output_id: &OutputIdentifier) -> Result<(), Error> {
|
||||||
match self.commit_index.get_output_pos(&output.commit) {
|
match self.commit_index.get_output_pos(&output_id.commit) {
|
||||||
Ok(pos) => {
|
Ok(pos) => {
|
||||||
let output_pmmr = PMMR::at(
|
let output_pmmr:PMMR<OutputStoreable, _> = PMMR::at(
|
||||||
&mut self.output_pmmr_h.backend,
|
&mut self.utxo_pmmr_h.backend,
|
||||||
self.output_pmmr_h.last_pos,
|
self.utxo_pmmr_h.last_pos,
|
||||||
);
|
);
|
||||||
if let Some(HashSum { hash, sum: _ }) = output_pmmr.get(pos) {
|
if let Some((hash, _)) = output_pmmr.get(pos, false) {
|
||||||
let sum_commit = output.as_sum_commit();
|
println!("Getting output ID hash");
|
||||||
let hash_sum = HashSum::from_summable(pos, &sum_commit);
|
if hash == output_id.hash() {
|
||||||
if hash == hash_sum.hash {
|
|
||||||
Ok(())
|
Ok(())
|
||||||
} else {
|
} else {
|
||||||
|
println!("MISMATCH BECAUSE THE BLOODY THING MISMATCHES");
|
||||||
Err(Error::SumTreeErr(format!("sumtree hash mismatch")))
|
Err(Error::SumTreeErr(format!("sumtree hash mismatch")))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -164,20 +164,21 @@ impl SumTrees {
|
||||||
|
|
||||||
/// returns the last N nodes inserted into the tree (i.e. the 'bottom'
|
/// returns the last N nodes inserted into the tree (i.e. the 'bottom'
|
||||||
/// nodes at level 0
|
/// nodes at level 0
|
||||||
pub fn last_n_utxo(&mut self, distance: u64) -> Vec<HashSum<SumCommit>> {
|
/// TODO: These need to return the actual data from the flat-files instead of hashes now
|
||||||
let output_pmmr = PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
pub fn last_n_utxo(&mut self, distance: u64) -> Vec<(Hash, Option<OutputStoreable>)> {
|
||||||
output_pmmr.get_last_n_insertions(distance)
|
let utxo_pmmr:PMMR<OutputStoreable, _> = PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
|
||||||
|
utxo_pmmr.get_last_n_insertions(distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// as above, for range proofs
|
/// as above, for range proofs
|
||||||
pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<HashSum<NoSum<RangeProof>>> {
|
pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<(Hash, Option<RangeProof>)> {
|
||||||
let rproof_pmmr = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
let rproof_pmmr:PMMR<RangeProof, _> = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
||||||
rproof_pmmr.get_last_n_insertions(distance)
|
rproof_pmmr.get_last_n_insertions(distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// as above, for kernels
|
/// as above, for kernels
|
||||||
pub fn last_n_kernel(&mut self, distance: u64) -> Vec<HashSum<NoSum<TxKernel>>> {
|
pub fn last_n_kernel(&mut self, distance: u64) -> Vec<(Hash, Option<TxKernel>)> {
|
||||||
let kernel_pmmr = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
let kernel_pmmr:PMMR<TxKernel, _> = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
||||||
kernel_pmmr.get_last_n_insertions(distance)
|
kernel_pmmr.get_last_n_insertions(distance)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,17 +187,19 @@ impl SumTrees {
|
||||||
indexes_at(block, self.commit_index.deref())
|
indexes_at(block, self.commit_index.deref())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Get sum tree roots
|
/// Get sum tree roots
|
||||||
|
/// TODO: Return data instead of hashes
|
||||||
pub fn roots(
|
pub fn roots(
|
||||||
&mut self,
|
&mut self,
|
||||||
) -> (
|
) -> (
|
||||||
HashSum<SumCommit>,
|
Hash,
|
||||||
HashSum<NoSum<RangeProof>>,
|
Hash,
|
||||||
HashSum<NoSum<TxKernel>>,
|
Hash,
|
||||||
) {
|
) {
|
||||||
let output_pmmr = PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
let output_pmmr:PMMR<OutputStoreable, _> = PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
|
||||||
let rproof_pmmr = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
let rproof_pmmr:PMMR<RangeProof, _> = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
||||||
let kernel_pmmr = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
let kernel_pmmr:PMMR<TxKernel, _> = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
||||||
(output_pmmr.root(), rproof_pmmr.root(), kernel_pmmr.root())
|
(output_pmmr.root(), rproof_pmmr.root(), kernel_pmmr.root())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -231,26 +234,23 @@ where
|
||||||
match res {
|
match res {
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
debug!(LOGGER, "Error returned, discarding sumtree extension.");
|
debug!(LOGGER, "Error returned, discarding sumtree extension.");
|
||||||
trees.output_pmmr_h.backend.discard();
|
trees.utxo_pmmr_h.backend.discard();
|
||||||
trees.rproof_pmmr_h.backend.discard();
|
trees.rproof_pmmr_h.backend.discard();
|
||||||
trees.kernel_pmmr_h.backend.discard();
|
trees.kernel_pmmr_h.backend.discard();
|
||||||
trees.kernel_file.discard();
|
|
||||||
Err(e)
|
Err(e)
|
||||||
}
|
}
|
||||||
Ok(r) => {
|
Ok(r) => {
|
||||||
if rollback {
|
if rollback {
|
||||||
debug!(LOGGER, "Rollbacking sumtree extension.");
|
debug!(LOGGER, "Rollbacking sumtree extension.");
|
||||||
trees.output_pmmr_h.backend.discard();
|
trees.utxo_pmmr_h.backend.discard();
|
||||||
trees.rproof_pmmr_h.backend.discard();
|
trees.rproof_pmmr_h.backend.discard();
|
||||||
trees.kernel_pmmr_h.backend.discard();
|
trees.kernel_pmmr_h.backend.discard();
|
||||||
trees.kernel_file.discard();
|
|
||||||
} else {
|
} else {
|
||||||
debug!(LOGGER, "Committing sumtree extension.");
|
debug!(LOGGER, "Committing sumtree extension.");
|
||||||
trees.output_pmmr_h.backend.sync()?;
|
trees.utxo_pmmr_h.backend.sync()?;
|
||||||
trees.rproof_pmmr_h.backend.sync()?;
|
trees.rproof_pmmr_h.backend.sync()?;
|
||||||
trees.kernel_pmmr_h.backend.sync()?;
|
trees.kernel_pmmr_h.backend.sync()?;
|
||||||
trees.kernel_file.flush()?;
|
trees.utxo_pmmr_h.last_pos = sizes.0;
|
||||||
trees.output_pmmr_h.last_pos = sizes.0;
|
|
||||||
trees.rproof_pmmr_h.last_pos = sizes.1;
|
trees.rproof_pmmr_h.last_pos = sizes.1;
|
||||||
trees.kernel_pmmr_h.last_pos = sizes.2;
|
trees.kernel_pmmr_h.last_pos = sizes.2;
|
||||||
}
|
}
|
||||||
|
@ -265,11 +265,10 @@ where
|
||||||
/// reversible manner within a unit of work provided by the `extending`
|
/// reversible manner within a unit of work provided by the `extending`
|
||||||
/// function.
|
/// function.
|
||||||
pub struct Extension<'a> {
|
pub struct Extension<'a> {
|
||||||
output_pmmr: PMMR<'a, SumCommit, PMMRBackend<SumCommit>>,
|
utxo_pmmr: PMMR<'a, OutputStoreable, PMMRBackend<OutputStoreable>>,
|
||||||
rproof_pmmr: PMMR<'a, NoSum<RangeProof>, PMMRBackend<NoSum<RangeProof>>>,
|
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
|
||||||
kernel_pmmr: PMMR<'a, NoSum<TxKernel>, PMMRBackend<NoSum<TxKernel>>>,
|
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
|
||||||
|
|
||||||
kernel_file: &'a mut AppendOnlyFile,
|
|
||||||
commit_index: Arc<ChainStore>,
|
commit_index: Arc<ChainStore>,
|
||||||
new_output_commits: HashMap<Commitment, u64>,
|
new_output_commits: HashMap<Commitment, u64>,
|
||||||
new_kernel_excesses: HashMap<Commitment, u64>,
|
new_kernel_excesses: HashMap<Commitment, u64>,
|
||||||
|
@ -284,9 +283,9 @@ impl<'a> Extension<'a> {
|
||||||
) -> Extension<'a> {
|
) -> Extension<'a> {
|
||||||
|
|
||||||
Extension {
|
Extension {
|
||||||
output_pmmr: PMMR::at(
|
utxo_pmmr: PMMR::at(
|
||||||
&mut trees.output_pmmr_h.backend,
|
&mut trees.utxo_pmmr_h.backend,
|
||||||
trees.output_pmmr_h.last_pos,
|
trees.utxo_pmmr_h.last_pos,
|
||||||
),
|
),
|
||||||
rproof_pmmr: PMMR::at(
|
rproof_pmmr: PMMR::at(
|
||||||
&mut trees.rproof_pmmr_h.backend,
|
&mut trees.rproof_pmmr_h.backend,
|
||||||
|
@ -296,10 +295,10 @@ impl<'a> Extension<'a> {
|
||||||
&mut trees.kernel_pmmr_h.backend,
|
&mut trees.kernel_pmmr_h.backend,
|
||||||
trees.kernel_pmmr_h.last_pos,
|
trees.kernel_pmmr_h.last_pos,
|
||||||
),
|
),
|
||||||
kernel_file: &mut trees.kernel_file,
|
|
||||||
commit_index: commit_index,
|
commit_index: commit_index,
|
||||||
new_output_commits: HashMap::new(),
|
new_output_commits: HashMap::new(),
|
||||||
new_kernel_excesses: HashMap::new(),
|
new_kernel_excesses: HashMap::new(),
|
||||||
|
|
||||||
rollback: false,
|
rollback: false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -354,15 +353,14 @@ impl<'a> Extension<'a> {
|
||||||
fn apply_input(&mut self, input: &Input, height: u64) -> Result<(), Error> {
|
fn apply_input(&mut self, input: &Input, height: u64) -> Result<(), Error> {
|
||||||
let commit = input.commitment();
|
let commit = input.commitment();
|
||||||
let pos_res = self.get_output_pos(&commit);
|
let pos_res = self.get_output_pos(&commit);
|
||||||
|
let output_id_hash = OutputIdentifier::from_input(input).hash();
|
||||||
if let Ok(pos) = pos_res {
|
if let Ok(pos) = pos_res {
|
||||||
if let Some(HashSum { hash, sum: _ }) = self.output_pmmr.get(pos) {
|
if let Some((read_hash, read_elem)) = self.utxo_pmmr.get(pos, true) {
|
||||||
let sum_commit = SumCommit::from_input(&input);
|
// check hash from pmmr matches hash from input (or corresponding output)
|
||||||
|
|
||||||
// check hash from pmmr matches hash from input
|
|
||||||
// if not then the input is not being honest about
|
// if not then the input is not being honest about
|
||||||
// what it is attempting to spend...
|
// what it is attempting to spend...
|
||||||
let hash_sum = HashSum::from_summable(pos, &sum_commit);
|
if output_id_hash != read_hash ||
|
||||||
if hash != hash_sum.hash {
|
output_id_hash != read_elem.expect("no output at position").hash() {
|
||||||
return Err(Error::SumTreeErr(format!("output pmmr hash mismatch")));
|
return Err(Error::SumTreeErr(format!("output pmmr hash mismatch")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -379,9 +377,10 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now prune the output_pmmr and rproof_pmmr.
|
// Now prune the utxo_pmmr, rproof_pmmr and their storage.
|
||||||
// Input is not valid if we cannot prune successfully (to spend an unspent output).
|
// Input is not valid if we cannot prune successfully (to spend an unspent output).
|
||||||
match self.output_pmmr.prune(pos, height as u32) {
|
// TODO: rm log, skip list for utxo and range proofs
|
||||||
|
match self.utxo_pmmr.prune(pos, height as u32) {
|
||||||
Ok(true) => {
|
Ok(true) => {
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.prune(pos, height as u32)
|
.prune(pos, height as u32)
|
||||||
|
@ -398,7 +397,6 @@ impl<'a> Extension<'a> {
|
||||||
|
|
||||||
fn apply_output(&mut self, out: &Output) -> Result<(), Error> {
|
fn apply_output(&mut self, out: &Output) -> Result<(), Error> {
|
||||||
let commit = out.commitment();
|
let commit = out.commitment();
|
||||||
let sum_commit = SumCommit::from_output(out);
|
|
||||||
|
|
||||||
if let Ok(pos) = self.get_output_pos(&commit) {
|
if let Ok(pos) = self.get_output_pos(&commit) {
|
||||||
// we need to check whether the commitment is in the current MMR view
|
// we need to check whether the commitment is in the current MMR view
|
||||||
|
@ -406,27 +404,24 @@ impl<'a> Extension<'a> {
|
||||||
// (non-historical node will have a much smaller one)
|
// (non-historical node will have a much smaller one)
|
||||||
// note that this doesn't show the commitment *never* existed, just
|
// note that this doesn't show the commitment *never* existed, just
|
||||||
// that this is not an existing unspent commitment right now
|
// that this is not an existing unspent commitment right now
|
||||||
if let Some(c) = self.output_pmmr.get(pos) {
|
if let Some((hash, _)) = self.utxo_pmmr.get(pos, false) {
|
||||||
let hash_sum = HashSum::from_summable(pos, &sum_commit);
|
|
||||||
|
|
||||||
// processing a new fork so we may get a position on the old
|
// processing a new fork so we may get a position on the old
|
||||||
// fork that exists but matches a different node
|
// fork that exists but matches a different node
|
||||||
// filtering that case out
|
// filtering that case out
|
||||||
if c.hash == hash_sum.hash {
|
if hash == OutputStoreable::from_output(out).hash() {
|
||||||
return Err(Error::DuplicateCommitment(commit));
|
return Err(Error::DuplicateCommitment(commit));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// push new outputs commitments in their MMR and save them in the index
|
// push new outputs in their MMR and save them in the index
|
||||||
let pos = self.output_pmmr
|
let pos = self.utxo_pmmr
|
||||||
.push(sum_commit)
|
.push(OutputStoreable::from_output(out))
|
||||||
.map_err(&Error::SumTreeErr)?;
|
.map_err(&Error::SumTreeErr)?;
|
||||||
|
|
||||||
self.new_output_commits.insert(out.commitment(), pos);
|
self.new_output_commits.insert(out.commitment(), pos);
|
||||||
|
|
||||||
// push range proofs in their MMR
|
// push range proofs in their MMR and file
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
.push(NoSum(out.proof))
|
.push(out.proof)
|
||||||
.map_err(&Error::SumTreeErr)?;
|
.map_err(&Error::SumTreeErr)?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -434,9 +429,8 @@ impl<'a> Extension<'a> {
|
||||||
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
|
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
|
||||||
if let Ok(pos) = self.get_kernel_pos(&kernel.excess) {
|
if let Ok(pos) = self.get_kernel_pos(&kernel.excess) {
|
||||||
// same as outputs
|
// same as outputs
|
||||||
if let Some(k) = self.kernel_pmmr.get(pos) {
|
if let Some((h,_)) = self.kernel_pmmr.get(pos, false) {
|
||||||
let hashsum = HashSum::from_summable(pos, &NoSum(kernel));
|
if h == kernel.hash() {
|
||||||
if k.hash == hashsum.hash {
|
|
||||||
return Err(Error::DuplicateKernel(kernel.excess.clone()));
|
return Err(Error::DuplicateKernel(kernel.excess.clone()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -444,10 +438,9 @@ impl<'a> Extension<'a> {
|
||||||
|
|
||||||
// push kernels in their MMR and file
|
// push kernels in their MMR and file
|
||||||
let pos = self.kernel_pmmr
|
let pos = self.kernel_pmmr
|
||||||
.push(NoSum(kernel.clone()))
|
.push(kernel.clone())
|
||||||
.map_err(&Error::SumTreeErr)?;
|
.map_err(&Error::SumTreeErr)?;
|
||||||
self.new_kernel_excesses.insert(kernel.excess, pos);
|
self.new_kernel_excesses.insert(kernel.excess, pos);
|
||||||
self.kernel_file.append(&mut ser::ser_vec(&kernel).unwrap());
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -465,13 +458,6 @@ impl<'a> Extension<'a> {
|
||||||
// rewind each MMR
|
// rewind each MMR
|
||||||
let (out_pos_rew, kern_pos_rew) = indexes_at(block, self.commit_index.deref())?;
|
let (out_pos_rew, kern_pos_rew) = indexes_at(block, self.commit_index.deref())?;
|
||||||
self.rewind_pos(block.header.height, out_pos_rew, kern_pos_rew)?;
|
self.rewind_pos(block.header.height, out_pos_rew, kern_pos_rew)?;
|
||||||
|
|
||||||
// rewind the kernel file store, the position is the number of kernels
|
|
||||||
// multiplied by their size
|
|
||||||
// the number of kernels is the number of leaves in the MMR
|
|
||||||
let pos = pmmr::n_leaves(kern_pos_rew);
|
|
||||||
self.kernel_file.rewind(pos * (TxKernel::size() as u64));
|
|
||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -485,7 +471,7 @@ impl<'a> Extension<'a> {
|
||||||
kern_pos_rew,
|
kern_pos_rew,
|
||||||
);
|
);
|
||||||
|
|
||||||
self.output_pmmr
|
self.utxo_pmmr
|
||||||
.rewind(out_pos_rew, height as u32)
|
.rewind(out_pos_rew, height as u32)
|
||||||
.map_err(&Error::SumTreeErr)?;
|
.map_err(&Error::SumTreeErr)?;
|
||||||
self.rproof_pmmr
|
self.rproof_pmmr
|
||||||
|
@ -495,7 +481,6 @@ impl<'a> Extension<'a> {
|
||||||
.rewind(kern_pos_rew, height as u32)
|
.rewind(kern_pos_rew, height as u32)
|
||||||
.map_err(&Error::SumTreeErr)?;
|
.map_err(&Error::SumTreeErr)?;
|
||||||
|
|
||||||
self.dump(true);
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -515,26 +500,23 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Current root hashes and sums (if applicable) for the UTXO, range proof
|
/// Current root hashes and sums (if applicable) for the UTXO, range proof
|
||||||
/// and kernel sum trees.
|
/// and kernel sum trees.
|
||||||
pub fn roots(
|
pub fn roots(
|
||||||
&self,
|
&self,
|
||||||
) -> (
|
) -> SumTreeRoots {
|
||||||
HashSum<SumCommit>,
|
SumTreeRoots {
|
||||||
HashSum<NoSum<RangeProof>>,
|
utxo_root: self.utxo_pmmr.root(),
|
||||||
HashSum<NoSum<TxKernel>>,
|
rproof_root: self.rproof_pmmr.root(),
|
||||||
) {
|
kernel_root: self.kernel_pmmr.root(),
|
||||||
(
|
}
|
||||||
self.output_pmmr.root(),
|
|
||||||
self.rproof_pmmr.root(),
|
|
||||||
self.kernel_pmmr.root(),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Validate the current sumtree state against a block header
|
/// Validate the current sumtree state against a block header
|
||||||
pub fn validate(&self, header: &BlockHeader) -> Result<(), Error> {
|
pub fn validate(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||||
// validate all hashes and sums within the trees
|
// validate all hashes and sums within the trees
|
||||||
if let Err(e) = self.output_pmmr.validate() {
|
if let Err(e) = self.utxo_pmmr.validate() {
|
||||||
return Err(Error::InvalidSumtree(e));
|
return Err(Error::InvalidSumtree(e));
|
||||||
}
|
}
|
||||||
if let Err(e) = self.rproof_pmmr.validate() {
|
if let Err(e) = self.rproof_pmmr.validate() {
|
||||||
|
@ -545,9 +527,9 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// validate the tree roots against the block header
|
// validate the tree roots against the block header
|
||||||
let (utxo_root, rproof_root, kernel_root) = self.roots();
|
let roots = self.roots();
|
||||||
if utxo_root.hash != header.utxo_root || rproof_root.hash != header.range_proof_root
|
if roots.utxo_root != header.utxo_root || roots.rproof_root != header.range_proof_root
|
||||||
|| kernel_root.hash != header.kernel_root
|
|| roots.kernel_root != header.kernel_root
|
||||||
{
|
{
|
||||||
return Err(Error::InvalidRoot);
|
return Err(Error::InvalidRoot);
|
||||||
}
|
}
|
||||||
|
@ -574,11 +556,11 @@ impl<'a> Extension<'a> {
|
||||||
/// by iterating over the whole MMR data. This is a costly operation
|
/// by iterating over the whole MMR data. This is a costly operation
|
||||||
/// performed only when we receive a full new chain state.
|
/// performed only when we receive a full new chain state.
|
||||||
pub fn rebuild_index(&self) -> Result<(), Error> {
|
pub fn rebuild_index(&self) -> Result<(), Error> {
|
||||||
for n in 1..self.output_pmmr.unpruned_size()+1 {
|
for n in 1..self.utxo_pmmr.unpruned_size()+1 {
|
||||||
// non-pruned leaves only
|
// non-pruned leaves only
|
||||||
if pmmr::bintree_postorder_height(n) == 0 {
|
if pmmr::bintree_postorder_height(n) == 0 {
|
||||||
if let Some(hs) = self.output_pmmr.get(n) {
|
if let Some((_, out)) = self.utxo_pmmr.get(n, true) {
|
||||||
self.commit_index.save_output_pos(&hs.sum.commit, n)?;
|
self.commit_index.save_output_pos(&out.expect("not a leaf node").commit, n)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -594,7 +576,7 @@ impl<'a> Extension<'a> {
|
||||||
/// version only prints the UTXO tree.
|
/// version only prints the UTXO tree.
|
||||||
pub fn dump(&self, short: bool) {
|
pub fn dump(&self, short: bool) {
|
||||||
debug!(LOGGER, "-- outputs --");
|
debug!(LOGGER, "-- outputs --");
|
||||||
self.output_pmmr.dump(short);
|
self.utxo_pmmr.dump(short);
|
||||||
if !short {
|
if !short {
|
||||||
debug!(LOGGER, "-- range proofs --");
|
debug!(LOGGER, "-- range proofs --");
|
||||||
self.rproof_pmmr.dump(short);
|
self.rproof_pmmr.dump(short);
|
||||||
|
@ -606,7 +588,7 @@ impl<'a> Extension<'a> {
|
||||||
// Sizes of the sum trees, used by `extending` on rollback.
|
// Sizes of the sum trees, used by `extending` on rollback.
|
||||||
fn sizes(&self) -> (u64, u64, u64) {
|
fn sizes(&self) -> (u64, u64, u64) {
|
||||||
(
|
(
|
||||||
self.output_pmmr.unpruned_size(),
|
self.utxo_pmmr.unpruned_size(),
|
||||||
self.rproof_pmmr.unpruned_size(),
|
self.rproof_pmmr.unpruned_size(),
|
||||||
self.kernel_pmmr.unpruned_size(),
|
self.kernel_pmmr.unpruned_size(),
|
||||||
)
|
)
|
||||||
|
@ -619,7 +601,7 @@ impl<'a> Extension<'a> {
|
||||||
let mmr_sz = self.kernel_pmmr.unpruned_size();
|
let mmr_sz = self.kernel_pmmr.unpruned_size();
|
||||||
let count = pmmr::n_leaves(mmr_sz);
|
let count = pmmr::n_leaves(mmr_sz);
|
||||||
|
|
||||||
let mut kernel_file = File::open(self.kernel_file.path())?;
|
let mut kernel_file = File::open(self.kernel_pmmr.data_file_path())?;
|
||||||
let first: TxKernel = ser::deserialize(&mut kernel_file)?;
|
let first: TxKernel = ser::deserialize(&mut kernel_file)?;
|
||||||
first.verify()?;
|
first.verify()?;
|
||||||
let mut sum_kernel = first.excess;
|
let mut sum_kernel = first.excess;
|
||||||
|
@ -651,14 +633,14 @@ impl<'a> Extension<'a> {
|
||||||
let mut sum_utxo = None;
|
let mut sum_utxo = None;
|
||||||
let mut utxo_count = 0;
|
let mut utxo_count = 0;
|
||||||
let secp = static_secp_instance();
|
let secp = static_secp_instance();
|
||||||
for n in 1..self.output_pmmr.unpruned_size()+1 {
|
for n in 1..self.utxo_pmmr.unpruned_size()+1 {
|
||||||
if pmmr::bintree_postorder_height(n) == 0 {
|
if pmmr::bintree_postorder_height(n) == 0 {
|
||||||
if let Some(hs) = self.output_pmmr.get(n) {
|
if let Some((_,output)) = self.utxo_pmmr.get(n, true) {
|
||||||
if n == 1 {
|
if n == 1 {
|
||||||
sum_utxo = Some(hs.sum.commit);
|
sum_utxo = Some(output.expect("not a leaf node").commit);
|
||||||
} else {
|
} else {
|
||||||
let secp = secp.lock().unwrap();
|
let secp = secp.lock().unwrap();
|
||||||
sum_utxo = Some(secp.commit_sum(vec![sum_utxo.unwrap(), hs.sum.commit], vec![])?);
|
sum_utxo = Some(secp.commit_sum(vec![sum_utxo.unwrap(), output.expect("not a leaf node").commit], vec![])?);
|
||||||
}
|
}
|
||||||
utxo_count += 1;
|
utxo_count += 1;
|
||||||
}
|
}
|
||||||
|
@ -669,6 +651,42 @@ impl<'a> Extension<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*fn store_element<T>(file_store: &mut FlatFileStore<T>, data: T)
|
||||||
|
-> Result<(), String>
|
||||||
|
where
|
||||||
|
T: ser::Readable + ser::Writeable + Clone
|
||||||
|
{
|
||||||
|
file_store.append(vec![data])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn read_element_at_pmmr_index<T>(file_store: &FlatFileStore<T>, pos: u64) -> Option<T>
|
||||||
|
where
|
||||||
|
T: ser::Readable + ser::Writeable + Clone
|
||||||
|
{
|
||||||
|
let leaf_index = pmmr::leaf_index(pos);
|
||||||
|
// flat files are zero indexed
|
||||||
|
file_store.get(leaf_index - 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn _remove_element_at_pmmr_index<T>(file_store: &mut FlatFileStore<T>, pos: u64)
|
||||||
|
-> Result<(), String>
|
||||||
|
where
|
||||||
|
T: ser::Readable + ser::Writeable + Clone
|
||||||
|
{
|
||||||
|
let leaf_index = pmmr::leaf_index(pos);
|
||||||
|
// flat files are zero indexed
|
||||||
|
file_store.remove(vec![leaf_index - 1])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rewind_to_pmmr_index<T>(file_store: &mut FlatFileStore<T>, pos: u64) -> Result<(), String>
|
||||||
|
where
|
||||||
|
T: ser::Readable + ser::Writeable + Clone
|
||||||
|
{
|
||||||
|
let leaf_index = pmmr::leaf_index(pos);
|
||||||
|
// flat files are zero indexed
|
||||||
|
file_store.rewind(leaf_index - 1)
|
||||||
|
}*/
|
||||||
|
|
||||||
/// Output and kernel MMR indexes at the end of the provided block
|
/// Output and kernel MMR indexes at the end of the provided block
|
||||||
fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Error> {
|
fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Error> {
|
||||||
let out_idx = match block.outputs.last() {
|
let out_idx = match block.outputs.last() {
|
||||||
|
|
|
@ -40,6 +40,17 @@ bitflags! {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// A helper to hold the roots of the sumtrees in order to keep them
|
||||||
|
/// readable
|
||||||
|
pub struct SumTreeRoots {
|
||||||
|
/// UTXO root
|
||||||
|
pub utxo_root: Hash,
|
||||||
|
/// Range Proof root
|
||||||
|
pub rproof_root: Hash,
|
||||||
|
/// Kernel root
|
||||||
|
pub kernel_root: Hash,
|
||||||
|
}
|
||||||
|
|
||||||
/// Errors
|
/// Errors
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
|
@ -81,6 +92,8 @@ pub enum Error {
|
||||||
InvalidSumtree(String),
|
InvalidSumtree(String),
|
||||||
/// Internal issue when trying to save or load data from store
|
/// Internal issue when trying to save or load data from store
|
||||||
StoreErr(grin_store::Error, String),
|
StoreErr(grin_store::Error, String),
|
||||||
|
/// Internal issue when trying to save or load data from append only files
|
||||||
|
FileReadErr(String),
|
||||||
/// Error serializing or deserializing a type
|
/// Error serializing or deserializing a type
|
||||||
SerErr(ser::Error),
|
SerErr(ser::Error),
|
||||||
/// Error with the sumtrees
|
/// Error with the sumtrees
|
||||||
|
|
|
@ -253,6 +253,8 @@ fn spend_in_fork() {
|
||||||
fork_head = b.header.clone();
|
fork_head = b.header.clone();
|
||||||
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||||
|
|
||||||
|
println!("First block");
|
||||||
|
|
||||||
// now mine three further blocks
|
// now mine three further blocks
|
||||||
for n in 3..6 {
|
for n in 3..6 {
|
||||||
let b = prepare_block(&kc, &fork_head, &chain, n);
|
let b = prepare_block(&kc, &fork_head, &chain, n);
|
||||||
|
@ -263,6 +265,8 @@ fn spend_in_fork() {
|
||||||
let lock_height = 1 + global::coinbase_maturity();
|
let lock_height = 1 + global::coinbase_maturity();
|
||||||
assert_eq!(lock_height, 4);
|
assert_eq!(lock_height, 4);
|
||||||
|
|
||||||
|
println!("3 Further Blocks: should have 4 blocks or 264 bytes in file ");
|
||||||
|
|
||||||
let tx1 = build::transaction(
|
let tx1 = build::transaction(
|
||||||
vec![
|
vec![
|
||||||
build::coinbase_input(consensus::REWARD, block_hash, kc.derive_key_id(2).unwrap()),
|
build::coinbase_input(consensus::REWARD, block_hash, kc.derive_key_id(2).unwrap()),
|
||||||
|
@ -272,10 +276,14 @@ fn spend_in_fork() {
|
||||||
&kc,
|
&kc,
|
||||||
).unwrap();
|
).unwrap();
|
||||||
|
|
||||||
|
println!("Built coinbase input and output");
|
||||||
|
|
||||||
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
|
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
|
||||||
let prev_main = next.header.clone();
|
let prev_main = next.header.clone();
|
||||||
chain.process_block(next.clone(), chain::Options::SKIP_POW).unwrap();
|
chain.process_block(next.clone(), chain::Options::SKIP_POW).unwrap();
|
||||||
|
|
||||||
|
println!("tx 1 processed, should have 6 outputs or 396 bytes in file, first skipped");
|
||||||
|
|
||||||
let tx2 = build::transaction(
|
let tx2 = build::transaction(
|
||||||
vec![
|
vec![
|
||||||
build::input(consensus::REWARD - 20000, next.hash(), kc.derive_key_id(30).unwrap()),
|
build::input(consensus::REWARD - 20000, next.hash(), kc.derive_key_id(30).unwrap()),
|
||||||
|
@ -289,6 +297,9 @@ fn spend_in_fork() {
|
||||||
let prev_main = next.header.clone();
|
let prev_main = next.header.clone();
|
||||||
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
|
chain.process_block(next, chain::Options::SKIP_POW).unwrap();
|
||||||
|
|
||||||
|
println!("tx 2 processed");
|
||||||
|
/*panic!("Stop");*/
|
||||||
|
|
||||||
// mine 2 forked blocks from the first
|
// mine 2 forked blocks from the first
|
||||||
let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
|
let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
|
||||||
let prev_fork = fork.header.clone();
|
let prev_fork = fork.header.clone();
|
||||||
|
|
|
@ -20,13 +20,13 @@
|
||||||
use std::cmp::min;
|
use std::cmp::min;
|
||||||
use std::{fmt, ops};
|
use std::{fmt, ops};
|
||||||
use std::convert::AsRef;
|
use std::convert::AsRef;
|
||||||
|
use std::ops::Add;
|
||||||
|
|
||||||
use blake2::blake2b::Blake2b;
|
use blake2::blake2b::Blake2b;
|
||||||
|
|
||||||
use consensus;
|
use consensus;
|
||||||
use ser::{self, AsFixedBytes, Error, Readable, Reader, Writeable, Writer};
|
use ser::{self, AsFixedBytes, Error, Readable, Reader, Writeable, Writer};
|
||||||
use util;
|
use util;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
/// A hash consisting of all zeroes, used as a sentinel. No known preimage.
|
/// A hash consisting of all zeroes, used as a sentinel. No known preimage.
|
||||||
pub const ZERO_HASH: Hash = Hash([0; 32]);
|
pub const ZERO_HASH: Hash = Hash([0; 32]);
|
||||||
|
@ -147,6 +147,13 @@ impl Writeable for Hash {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Add for Hash {
|
||||||
|
type Output = Hash;
|
||||||
|
fn add(self, other: Hash) -> Hash {
|
||||||
|
self.hash_with(other)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Serializer that outputs a hash of the serialized object
|
/// Serializer that outputs a hash of the serialized object
|
||||||
pub struct HashWriter {
|
pub struct HashWriter {
|
||||||
state: Blake2b,
|
state: Blake2b,
|
||||||
|
@ -205,7 +212,6 @@ impl<W: ser::Writeable> Hashed for W {
|
||||||
fn hash_with<T: Writeable>(&self, other: T) -> Hash {
|
fn hash_with<T: Writeable>(&self, other: T) -> Hash {
|
||||||
let mut hasher = HashWriter::default();
|
let mut hasher = HashWriter::default();
|
||||||
ser::Writeable::write(self, &mut hasher).unwrap();
|
ser::Writeable::write(self, &mut hasher).unwrap();
|
||||||
trace!(LOGGER, "Hashing with additional data");
|
|
||||||
ser::Writeable::write(&other, &mut hasher).unwrap();
|
ser::Writeable::write(&other, &mut hasher).unwrap();
|
||||||
let mut ret = [0; 32];
|
let mut ret = [0; 32];
|
||||||
hasher.finalize(&mut ret);
|
hasher.finalize(&mut ret);
|
||||||
|
|
|
@ -29,169 +29,30 @@
|
||||||
//! position of siblings, parents, etc. As all those functions only rely on
|
//! position of siblings, parents, etc. As all those functions only rely on
|
||||||
//! binary operations, they're extremely fast. For more information, see the
|
//! binary operations, they're extremely fast. For more information, see the
|
||||||
//! doc on bintree_jump_left_sibling.
|
//! doc on bintree_jump_left_sibling.
|
||||||
//! 2. The implementation of a prunable MMR sum tree using the above. Each leaf
|
//! 2. The implementation of a prunable MMR tree using the above. Each leaf
|
||||||
//! is required to be Summable and Hashed. Tree roots can be trivially and
|
//! is required to be Writeable (which implements Hashed). Tree roots can be trivially and
|
||||||
//! efficiently calculated without materializing the full tree. The underlying
|
//! efficiently calculated without materializing the full tree. The underlying
|
||||||
//! (Hash, Sum) pais are stored in a Backend implementation that can either be
|
//! Hashes are stored in a Backend implementation that can either be
|
||||||
//! a simple Vec or a database.
|
//! a simple Vec or a database.
|
||||||
|
|
||||||
use std::clone::Clone;
|
use std::clone::Clone;
|
||||||
|
use std::ops::Deref;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::ops::{self, Deref};
|
|
||||||
|
|
||||||
use core::hash::{Hash, Hashed};
|
use core::hash::{Hash, Hashed};
|
||||||
use ser::{self, Readable, Reader, Writeable, Writer};
|
use ser::PMMRable;
|
||||||
use util::LOGGER;
|
use util::LOGGER;
|
||||||
|
|
||||||
/// Trait for an element of the tree that has a well-defined sum and hash that
|
|
||||||
/// the tree can sum over
|
|
||||||
pub trait Summable {
|
|
||||||
/// The type of the sum
|
|
||||||
type Sum: Clone + ops::Add<Output = Self::Sum> + Readable + Writeable + PartialEq;
|
|
||||||
|
|
||||||
/// Obtain the sum of the element
|
|
||||||
fn sum(&self) -> Self::Sum;
|
|
||||||
|
|
||||||
/// Length of the Sum type when serialized. Can be used as a hint by
|
|
||||||
/// underlying storages.
|
|
||||||
fn sum_len() -> usize;
|
|
||||||
}
|
|
||||||
|
|
||||||
/// An empty sum that takes no space, to store elements that do not need summing
|
|
||||||
/// but can still leverage the hierarchical hashing.
|
|
||||||
#[derive(Copy, Clone, Debug)]
|
|
||||||
pub struct NullSum;
|
|
||||||
impl ops::Add for NullSum {
|
|
||||||
type Output = NullSum;
|
|
||||||
fn add(self, _: NullSum) -> NullSum {
|
|
||||||
NullSum
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Readable for NullSum {
|
|
||||||
fn read(_: &mut Reader) -> Result<NullSum, ser::Error> {
|
|
||||||
Ok(NullSum)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Writeable for NullSum {
|
|
||||||
fn write<W: Writer>(&self, _: &mut W) -> Result<(), ser::Error> {
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for NullSum {
|
|
||||||
fn eq(&self, _other: &NullSum) -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Wrapper for a type that allows it to be inserted in a tree without summing
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct NoSum<T>(pub T);
|
|
||||||
impl<T> Summable for NoSum<T> {
|
|
||||||
type Sum = NullSum;
|
|
||||||
fn sum(&self) -> NullSum {
|
|
||||||
NullSum
|
|
||||||
}
|
|
||||||
fn sum_len() -> usize {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<T> Writeable for NoSum<T>
|
|
||||||
where
|
|
||||||
T: Writeable,
|
|
||||||
{
|
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
|
||||||
self.0.write(writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A utility type to handle (Hash, Sum) pairs more conveniently. The addition
|
|
||||||
/// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum.
|
|
||||||
#[derive(Debug, Clone, Eq)]
|
|
||||||
pub struct HashSum<T>
|
|
||||||
where
|
|
||||||
T: Summable,
|
|
||||||
{
|
|
||||||
/// The hash
|
|
||||||
pub hash: Hash,
|
|
||||||
/// The sum
|
|
||||||
pub sum: T::Sum,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> HashSum<T>
|
|
||||||
where
|
|
||||||
T: Summable + Hashed,
|
|
||||||
{
|
|
||||||
/// Create a hash sum from a summable
|
|
||||||
pub fn from_summable(idx: u64, elmt: &T) -> HashSum<T> {
|
|
||||||
let hash = elmt.hash();
|
|
||||||
let sum = elmt.sum();
|
|
||||||
let node_hash = (idx, &sum, hash).hash();
|
|
||||||
HashSum {
|
|
||||||
hash: node_hash,
|
|
||||||
sum: sum,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> PartialEq for HashSum<T>
|
|
||||||
where
|
|
||||||
T: Summable,
|
|
||||||
{
|
|
||||||
fn eq(&self, other: &HashSum<T>) -> bool {
|
|
||||||
self.hash == other.hash && self.sum == other.sum
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Readable for HashSum<T>
|
|
||||||
where
|
|
||||||
T: Summable,
|
|
||||||
{
|
|
||||||
fn read(r: &mut Reader) -> Result<HashSum<T>, ser::Error> {
|
|
||||||
Ok(HashSum {
|
|
||||||
hash: Hash::read(r)?,
|
|
||||||
sum: T::Sum::read(r)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Writeable for HashSum<T>
|
|
||||||
where
|
|
||||||
T: Summable,
|
|
||||||
{
|
|
||||||
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ser::Error> {
|
|
||||||
self.hash.write(w)?;
|
|
||||||
self.sum.write(w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> ops::Add for HashSum<T>
|
|
||||||
where
|
|
||||||
T: Summable,
|
|
||||||
{
|
|
||||||
type Output = HashSum<T>;
|
|
||||||
fn add(self, other: HashSum<T>) -> HashSum<T> {
|
|
||||||
HashSum {
|
|
||||||
hash: (self.hash, other.hash).hash(),
|
|
||||||
sum: self.sum + other.sum,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Storage backend for the MMR, just needs to be indexed by order of insertion.
|
/// Storage backend for the MMR, just needs to be indexed by order of insertion.
|
||||||
/// The PMMR itself does not need the Backend to be accurate on the existence
|
/// The PMMR itself does not need the Backend to be accurate on the existence
|
||||||
/// of an element (i.e. remove could be a no-op) but layers above can
|
/// of an element (i.e. remove could be a no-op) but layers above can
|
||||||
/// depend on an accurate Backend to check existence.
|
/// depend on an accurate Backend to check existence.
|
||||||
pub trait Backend<T>
|
pub trait Backend<T> where
|
||||||
where
|
T:PMMRable {
|
||||||
T: Summable,
|
/// Append the provided Hashes to the backend storage, and optionally an associated
|
||||||
{
|
/// data element to flatfile storage (for leaf nodes only). The position of the
|
||||||
/// Append the provided HashSums to the backend storage. The position of the
|
/// first element of the Vec in the MMR is provided to help the implementation.
|
||||||
/// first element of the Vec in the MMR is provided to help the
|
fn append(&mut self, position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String>;
|
||||||
/// implementation.
|
|
||||||
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String>;
|
|
||||||
|
|
||||||
/// Rewind the backend state to a previous position, as if all append
|
/// Rewind the backend state to a previous position, as if all append
|
||||||
/// operations after that had been canceled. Expects a position in the PMMR
|
/// operations after that had been canceled. Expects a position in the PMMR
|
||||||
|
@ -199,14 +60,20 @@ where
|
||||||
/// occurred (see remove).
|
/// occurred (see remove).
|
||||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String>;
|
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String>;
|
||||||
|
|
||||||
/// Get a HashSum by insertion position
|
/// Get a Hash/Element by insertion position. If include_data is true, will
|
||||||
fn get(&self, position: u64) -> Option<HashSum<T>>;
|
/// also return the associated data element
|
||||||
|
fn get(&self, position: u64, include_data: bool) -> Option<(Hash, Option<T>)>;
|
||||||
|
|
||||||
/// Remove HashSums by insertion position. An index is also provided so the
|
/// Remove Hashes/Data by insertion position. An index is also provided so the
|
||||||
/// underlying backend can implement some rollback of positions up to a
|
/// underlying backend can implement some rollback of positions up to a
|
||||||
/// given index (practically the index is a the height of a block that
|
/// given index (practically the index is a the height of a block that
|
||||||
/// triggered removal).
|
/// triggered removal).
|
||||||
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String>;
|
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String>;
|
||||||
|
|
||||||
|
/// Returns the data file path.. this is a bit of a hack now that doesn't
|
||||||
|
/// sit well with the design, but TxKernels have to be summed and the
|
||||||
|
/// fastest way to to be able to allow direct access to the file
|
||||||
|
fn get_data_file_path(&self) -> String;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prunable Merkle Mountain Range implementation. All positions within the tree
|
/// Prunable Merkle Mountain Range implementation. All positions within the tree
|
||||||
|
@ -218,18 +85,18 @@ where
|
||||||
/// we are in the sequence of nodes making up the MMR.
|
/// we are in the sequence of nodes making up the MMR.
|
||||||
pub struct PMMR<'a, T, B>
|
pub struct PMMR<'a, T, B>
|
||||||
where
|
where
|
||||||
T: Summable,
|
T: PMMRable,
|
||||||
B: 'a + Backend<T>,
|
B: 'a + Backend<T>,
|
||||||
{
|
{
|
||||||
last_pos: u64,
|
last_pos: u64,
|
||||||
backend: &'a mut B,
|
backend: &'a mut B,
|
||||||
// only needed for parameterizing Backend
|
// only needed for parameterizing Backend
|
||||||
summable: PhantomData<T>,
|
writeable: PhantomData<T>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'a, T, B> PMMR<'a, T, B>
|
impl<'a, T, B> PMMR<'a, T, B>
|
||||||
where
|
where
|
||||||
T: Summable + Hashed + Clone,
|
T: PMMRable,
|
||||||
B: 'a + Backend<T>,
|
B: 'a + Backend<T>,
|
||||||
{
|
{
|
||||||
/// Build a new prunable Merkle Mountain Range using the provided backend.
|
/// Build a new prunable Merkle Mountain Range using the provided backend.
|
||||||
|
@ -237,7 +104,7 @@ where
|
||||||
PMMR {
|
PMMR {
|
||||||
last_pos: 0,
|
last_pos: 0,
|
||||||
backend: backend,
|
backend: backend,
|
||||||
summable: PhantomData,
|
writeable: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -248,48 +115,51 @@ where
|
||||||
PMMR {
|
PMMR {
|
||||||
last_pos: last_pos,
|
last_pos: last_pos,
|
||||||
backend: backend,
|
backend: backend,
|
||||||
summable: PhantomData,
|
writeable: PhantomData,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Computes the root of the MMR. Find all the peaks in the current
|
/// Computes the root of the MMR. Find all the peaks in the current
|
||||||
/// tree and "bags" them to get a single peak.
|
/// tree and "bags" them to get a single peak.
|
||||||
pub fn root(&self) -> HashSum<T> {
|
pub fn root(&self) -> Hash {
|
||||||
let peaks_pos = peaks(self.last_pos);
|
let peaks_pos = peaks(self.last_pos);
|
||||||
let peaks: Vec<Option<HashSum<T>>> = map_vec!(peaks_pos, |&pi| self.backend.get(pi));
|
let peaks: Vec<Option<(Hash, Option<T>)>> = peaks_pos.into_iter()
|
||||||
|
.map(|pi| self.backend.get(pi, false))
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
|
||||||
let mut ret = None;
|
let mut ret = None;
|
||||||
for peak in peaks {
|
for peak in peaks {
|
||||||
ret = match (ret, peak) {
|
ret = match (ret, peak) {
|
||||||
(None, x) => x,
|
(None, x) => x,
|
||||||
(Some(hsum), None) => Some(hsum),
|
(Some(hash), None) => Some(hash),
|
||||||
(Some(lhsum), Some(rhsum)) => Some(lhsum + rhsum),
|
(Some(lhash), Some(rhash)) => Some((lhash.0.hash_with(rhash.0), None)),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
ret.expect("no root, invalid tree")
|
ret.expect("no root, invalid tree").0
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Push a new Summable element in the MMR. Computes new related peaks at
|
/// Push a new element into the MMR. Computes new related peaks at
|
||||||
/// the same time if applicable.
|
/// the same time if applicable.
|
||||||
pub fn push(&mut self, elmt: T) -> Result<u64, String> {
|
pub fn push(&mut self, elmt: T) -> Result<u64, String> {
|
||||||
let elmt_pos = self.last_pos + 1;
|
let elmt_pos = self.last_pos + 1;
|
||||||
let mut current_hashsum = HashSum::from_summable(elmt_pos, &elmt);
|
let mut current_hash = elmt.hash();
|
||||||
let mut to_append = vec![current_hashsum.clone()];
|
let mut to_append = vec![(current_hash, Some(elmt))];
|
||||||
let mut height = 0;
|
let mut height = 0;
|
||||||
let mut pos = elmt_pos;
|
let mut pos = elmt_pos;
|
||||||
|
|
||||||
// we look ahead one position in the MMR, if the expected node has a higher
|
// we look ahead one position in the MMR, if the expected node has a higher
|
||||||
// height it means we have to build a higher peak by summing with a previous
|
// height it means we have to build a higher peak by hashing with a previous
|
||||||
// sibling. we do it iteratively in case the new peak itself allows the
|
// sibling. we do it iteratively in case the new peak itself allows the
|
||||||
// creation of another parent.
|
// creation of another parent.
|
||||||
while bintree_postorder_height(pos + 1) > height {
|
while bintree_postorder_height(pos + 1) > height {
|
||||||
let left_sibling = bintree_jump_left_sibling(pos);
|
let left_sibling = bintree_jump_left_sibling(pos);
|
||||||
let left_hashsum = self.backend.get(left_sibling).expect(
|
let left_elem = self.backend.get(left_sibling, false).expect(
|
||||||
"missing left sibling in tree, should not have been pruned",
|
"missing left sibling in tree, should not have been pruned",
|
||||||
);
|
);
|
||||||
current_hashsum = left_hashsum + current_hashsum;
|
current_hash = left_elem.0 + current_hash;
|
||||||
|
|
||||||
to_append.push(current_hashsum.clone());
|
to_append.push((current_hash.clone(), None));
|
||||||
height += 1;
|
height += 1;
|
||||||
pos += 1;
|
pos += 1;
|
||||||
}
|
}
|
||||||
|
@ -322,7 +192,7 @@ where
|
||||||
/// to keep an index of elements to positions in the tree. Prunes parent
|
/// to keep an index of elements to positions in the tree. Prunes parent
|
||||||
/// nodes as well when they become childless.
|
/// nodes as well when they become childless.
|
||||||
pub fn prune(&mut self, position: u64, index: u32) -> Result<bool, String> {
|
pub fn prune(&mut self, position: u64, index: u32) -> Result<bool, String> {
|
||||||
if let None = self.backend.get(position) {
|
if let None = self.backend.get(position, false) {
|
||||||
return Ok(false);
|
return Ok(false);
|
||||||
}
|
}
|
||||||
let prunable_height = bintree_postorder_height(position);
|
let prunable_height = bintree_postorder_height(position);
|
||||||
|
@ -345,7 +215,7 @@ where
|
||||||
|
|
||||||
// if we have a pruned sibling, we can continue up the tree
|
// if we have a pruned sibling, we can continue up the tree
|
||||||
// otherwise we're done
|
// otherwise we're done
|
||||||
if let None = self.backend.get(sibling) {
|
if let None = self.backend.get(sibling, false) {
|
||||||
current = parent;
|
current = parent;
|
||||||
} else {
|
} else {
|
||||||
break;
|
break;
|
||||||
|
@ -356,26 +226,27 @@ where
|
||||||
Ok(true)
|
Ok(true)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Helper function to get the HashSum of a node at a given position from
|
/// Helper function to get a node at a given position from
|
||||||
/// the backend.
|
/// the backend.
|
||||||
pub fn get(&self, position: u64) -> Option<HashSum<T>> {
|
pub fn get(&self, position: u64, include_data: bool) -> Option<(Hash, Option<T>)> {
|
||||||
if position > self.last_pos {
|
if position > self.last_pos {
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
self.backend.get(position)
|
self.backend.get(position, include_data)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/// Helper function to get the last N nodes inserted, i.e. the last
|
/// Helper function to get the last N nodes inserted, i.e. the last
|
||||||
/// n nodes along the bottom of the tree
|
/// n nodes along the bottom of the tree
|
||||||
pub fn get_last_n_insertions(&self, n: u64) -> Vec<HashSum<T>> {
|
pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, Option<T>)> {
|
||||||
let mut return_vec = Vec::new();
|
let mut return_vec = Vec::new();
|
||||||
let mut last_leaf = self.last_pos;
|
let mut last_leaf = self.last_pos;
|
||||||
let size = self.unpruned_size();
|
let size = self.unpruned_size();
|
||||||
// Special case that causes issues in bintree functions,
|
// Special case that causes issues in bintree functions,
|
||||||
// just return
|
// just return
|
||||||
if size == 1 {
|
if size == 1 {
|
||||||
return_vec.push(self.backend.get(last_leaf).unwrap());
|
return_vec.push(self.backend.get(last_leaf, true).unwrap());
|
||||||
return return_vec;
|
return return_vec;
|
||||||
}
|
}
|
||||||
// if size is even, we're already at the bottom, otherwise
|
// if size is even, we're already at the bottom, otherwise
|
||||||
|
@ -390,7 +261,7 @@ where
|
||||||
if bintree_postorder_height(last_leaf) > 0 {
|
if bintree_postorder_height(last_leaf) > 0 {
|
||||||
last_leaf = bintree_rightmost(last_leaf);
|
last_leaf = bintree_rightmost(last_leaf);
|
||||||
}
|
}
|
||||||
return_vec.push(self.backend.get(last_leaf).unwrap());
|
return_vec.push(self.backend.get(last_leaf, true).unwrap());
|
||||||
|
|
||||||
last_leaf = bintree_jump_left_sibling(last_leaf);
|
last_leaf = bintree_jump_left_sibling(last_leaf);
|
||||||
}
|
}
|
||||||
|
@ -398,21 +269,20 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Walks all unpruned nodes in the MMR and revalidate all parent hashes
|
/// Walks all unpruned nodes in the MMR and revalidate all parent hashes
|
||||||
/// and sums.
|
|
||||||
pub fn validate(&self) -> Result<(), String> {
|
pub fn validate(&self) -> Result<(), String> {
|
||||||
// iterate on all parent nodes
|
// iterate on all parent nodes
|
||||||
for n in 1..(self.last_pos + 1) {
|
for n in 1..(self.last_pos + 1) {
|
||||||
if bintree_postorder_height(n) > 0 {
|
if bintree_postorder_height(n) > 0 {
|
||||||
if let Some(hs) = self.get(n) {
|
if let Some(hs) = self.get(n, false) {
|
||||||
// take the left and right children, if they exist
|
// take the left and right children, if they exist
|
||||||
let left_pos = bintree_move_down_left(n).unwrap();
|
let left_pos = bintree_move_down_left(n).unwrap();
|
||||||
let right_pos = bintree_jump_right_sibling(left_pos);
|
let right_pos = bintree_jump_right_sibling(left_pos);
|
||||||
|
|
||||||
if let Some(left_child_hs) = self.get(left_pos) {
|
if let Some(left_child_hs) = self.get(left_pos, false) {
|
||||||
if let Some(right_child_hs) = self.get(right_pos) {
|
if let Some(right_child_hs) = self.get(right_pos, false) {
|
||||||
// sum and compare
|
// add hashes and compare
|
||||||
if left_child_hs + right_child_hs != hs {
|
if left_child_hs.0+right_child_hs.0 != hs.0 {
|
||||||
return Err(format!("Invalid MMR, hashsum of parent at {} does \
|
return Err(format!("Invalid MMR, hash of parent at {} does \
|
||||||
not match children.", n));
|
not match children.", n));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -429,6 +299,11 @@ where
|
||||||
self.last_pos
|
self.last_pos
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Return the path of the data file (needed to sum kernels efficiently)
|
||||||
|
pub fn data_file_path(&self) -> String {
|
||||||
|
self.backend.get_data_file_path()
|
||||||
|
}
|
||||||
|
|
||||||
/// Debugging utility to print information about the MMRs. Short version
|
/// Debugging utility to print information about the MMRs. Short version
|
||||||
/// only prints the last 8 nodes.
|
/// only prints the last 8 nodes.
|
||||||
pub fn dump(&self, short: bool) {
|
pub fn dump(&self, short: bool) {
|
||||||
|
@ -445,40 +320,36 @@ where
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
idx.push_str(&format!("{:>8} ", m + 1));
|
idx.push_str(&format!("{:>8} ", m + 1));
|
||||||
let ohs = self.get(m + 1);
|
let ohs = self.get(m + 1, false);
|
||||||
match ohs {
|
match ohs {
|
||||||
Some(hs) => hashes.push_str(&format!("{} ", hs.hash)),
|
Some(hs) => hashes.push_str(&format!("{} ", hs.0)),
|
||||||
None => hashes.push_str(&format!("{:>8} ", "??")),
|
None => hashes.push_str(&format!("{:>8} ", "??")),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
debug!(LOGGER, "{}", idx);
|
trace!(LOGGER, "{}", idx);
|
||||||
debug!(LOGGER, "{}", hashes);
|
trace!(LOGGER, "{}", hashes);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Simple MMR backend implementation based on a Vector. Pruning does not
|
/// Simple MMR backend implementation based on a Vector. Pruning does not
|
||||||
/// compact the Vector itself but still frees the reference to the
|
/// compact the Vector itself but still frees the reference to the
|
||||||
/// underlying HashSum.
|
/// underlying Hash.
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct VecBackend<T>
|
pub struct VecBackend<T>
|
||||||
where
|
where T:PMMRable {
|
||||||
T: Summable + Clone,
|
|
||||||
{
|
|
||||||
/// Backend elements
|
/// Backend elements
|
||||||
pub elems: Vec<Option<HashSum<T>>>,
|
pub elems: Vec<Option<(Hash, Option<T>)>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> Backend<T> for VecBackend<T>
|
impl <T> Backend <T> for VecBackend<T>
|
||||||
where
|
where T: PMMRable {
|
||||||
T: Summable + Clone,
|
|
||||||
{
|
|
||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> {
|
fn append(&mut self, position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String> {
|
||||||
self.elems.append(&mut map_vec!(data, |d| Some(d.clone())));
|
self.elems.append(&mut map_vec!(data, |d| Some(d.clone())));
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
fn get(&self, position: u64) -> Option<HashSum<T>> {
|
fn get(&self, position: u64, _include_data:bool) -> Option<(Hash, Option<T>)> {
|
||||||
self.elems[(position - 1) as usize].clone()
|
self.elems[(position - 1) as usize].clone()
|
||||||
}
|
}
|
||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
|
@ -493,18 +364,19 @@ where
|
||||||
self.elems = self.elems[0..(position as usize) + 1].to_vec();
|
self.elems = self.elems[0..(position as usize) + 1].to_vec();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
fn get_data_file_path(&self) -> String {
|
||||||
|
"".to_string()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> VecBackend<T>
|
impl <T> VecBackend <T>
|
||||||
where
|
where T:PMMRable {
|
||||||
T: Summable + Clone,
|
|
||||||
{
|
|
||||||
/// Instantiates a new VecBackend<T>
|
/// Instantiates a new VecBackend<T>
|
||||||
pub fn new() -> VecBackend<T> {
|
pub fn new() -> VecBackend<T> {
|
||||||
VecBackend { elems: vec![] }
|
VecBackend { elems: vec![] }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Current number of HashSum elements in the underlying Vec.
|
/// Current number of elements in the underlying Vec.
|
||||||
pub fn used_size(&self) -> usize {
|
pub fn used_size(&self) -> usize {
|
||||||
let mut usz = self.elems.len();
|
let mut usz = self.elems.len();
|
||||||
for elem in self.elems.deref() {
|
for elem in self.elems.deref() {
|
||||||
|
@ -568,6 +440,28 @@ impl PruneList {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// As above, but only returning the number of leaf nodes to skip for a
|
||||||
|
/// given leaf. Helpful if, for instance, data for each leaf is being stored
|
||||||
|
/// separately in a continuous flat-file
|
||||||
|
pub fn get_leaf_shift(&self, pos: u64) -> Option<u64> {
|
||||||
|
|
||||||
|
// get the position where the node at pos would fit in the pruned list, if
|
||||||
|
// it's already pruned, nothing to skip
|
||||||
|
match self.pruned_pos(pos) {
|
||||||
|
None => None,
|
||||||
|
Some(idx) => {
|
||||||
|
// skip by the number of leaf nodes pruned in the preceeding subtrees
|
||||||
|
// which just 2^height
|
||||||
|
Some(
|
||||||
|
self.pruned_nodes[0..(idx as usize)]
|
||||||
|
.iter()
|
||||||
|
.map(|n| 1 << bintree_postorder_height(*n))
|
||||||
|
.sum(),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Push the node at the provided position in the prune list. Compacts the
|
/// Push the node at the provided position in the prune list. Compacts the
|
||||||
/// list if pruning the additional node means a parent can get pruned as
|
/// list if pruning the additional node means a parent can get pruned as
|
||||||
/// well.
|
/// well.
|
||||||
|
@ -591,7 +485,7 @@ impl PruneList {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Gets the position a new pruned node should take in the prune list.
|
/// Gets the position a new pruned node should take in the prune list.
|
||||||
/// If the node has already bee pruned, either directly or through one of
|
/// If the node has already been pruned, either directly or through one of
|
||||||
/// its parents contained in the prune list, returns None.
|
/// its parents contained in the prune list, returns None.
|
||||||
pub fn pruned_pos(&self, pos: u64) -> Option<usize> {
|
pub fn pruned_pos(&self, pos: u64) -> Option<usize> {
|
||||||
match self.pruned_nodes.binary_search(&pos) {
|
match self.pruned_nodes.binary_search(&pos) {
|
||||||
|
@ -833,7 +727,20 @@ fn most_significant_pos(num: u64) -> u64 {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use core::hash::Hashed;
|
use ser::{Writeable, Readable, Error};
|
||||||
|
use core::{Writer, Reader};
|
||||||
|
use core::hash::{Hash};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_leaf_index(){
|
||||||
|
assert_eq!(n_leaves(1),1);
|
||||||
|
assert_eq!(n_leaves(2),2);
|
||||||
|
assert_eq!(n_leaves(4),3);
|
||||||
|
assert_eq!(n_leaves(5),4);
|
||||||
|
assert_eq!(n_leaves(8),5);
|
||||||
|
assert_eq!(n_leaves(9),6);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn some_all_ones() {
|
fn some_all_ones() {
|
||||||
|
@ -890,23 +797,17 @@ mod test {
|
||||||
assert_eq!(peaks(42), vec![31, 38, 41, 42]);
|
assert_eq!(peaks(42), vec![31, 38, 41, 42]);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||||
struct TestElem([u32; 4]);
|
struct TestElem([u32; 4]);
|
||||||
impl Summable for TestElem {
|
|
||||||
type Sum = u64;
|
impl PMMRable for TestElem {
|
||||||
fn sum(&self) -> u64 {
|
fn len() -> usize {
|
||||||
// sums are not allowed to overflow, so we use this simple
|
16
|
||||||
// non-injective "sum" function that will still be homomorphic
|
|
||||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
|
|
||||||
self.0[3] as u64
|
|
||||||
}
|
|
||||||
fn sum_len() -> usize {
|
|
||||||
8
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Writeable for TestElem {
|
impl Writeable for TestElem {
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||||
try!(writer.write_u32(self.0[0]));
|
try!(writer.write_u32(self.0[0]));
|
||||||
try!(writer.write_u32(self.0[1]));
|
try!(writer.write_u32(self.0[1]));
|
||||||
try!(writer.write_u32(self.0[2]));
|
try!(writer.write_u32(self.0[2]));
|
||||||
|
@ -914,6 +815,19 @@ mod test {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl Readable for TestElem {
|
||||||
|
fn read(reader: &mut Reader) -> Result<TestElem, Error> {
|
||||||
|
Ok(TestElem (
|
||||||
|
[
|
||||||
|
reader.read_u32()?,
|
||||||
|
reader.read_u32()?,
|
||||||
|
reader.read_u32()?,
|
||||||
|
reader.read_u32()?,
|
||||||
|
]
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
#[allow(unused_variables)]
|
#[allow(unused_variables)]
|
||||||
fn pmmr_push_root() {
|
fn pmmr_push_root() {
|
||||||
|
@ -934,72 +848,67 @@ mod test {
|
||||||
|
|
||||||
// one element
|
// one element
|
||||||
pmmr.push(elems[0]).unwrap();
|
pmmr.push(elems[0]).unwrap();
|
||||||
let hash = Hashed::hash(&elems[0]);
|
let node_hash = elems[0].hash();
|
||||||
let sum = elems[0].sum();
|
|
||||||
let node_hash = (1 as u64, &sum, hash).hash();
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
pmmr.root(),
|
pmmr.root(),
|
||||||
HashSum {
|
node_hash,
|
||||||
hash: node_hash,
|
|
||||||
sum: sum,
|
|
||||||
}
|
|
||||||
);
|
);
|
||||||
assert_eq!(pmmr.unpruned_size(), 1);
|
assert_eq!(pmmr.unpruned_size(), 1);
|
||||||
|
pmmr.dump(false);
|
||||||
|
|
||||||
// two elements
|
// two elements
|
||||||
pmmr.push(elems[1]).unwrap();
|
pmmr.push(elems[1]).unwrap();
|
||||||
let sum2 = HashSum::from_summable(1, &elems[0]) +
|
let sum2 = elems[0].hash() + elems[1].hash();
|
||||||
HashSum::from_summable(2, &elems[1]);
|
pmmr.dump(false);
|
||||||
assert_eq!(pmmr.root(), sum2);
|
assert_eq!(pmmr.root(), sum2);
|
||||||
assert_eq!(pmmr.unpruned_size(), 3);
|
assert_eq!(pmmr.unpruned_size(), 3);
|
||||||
|
|
||||||
// three elements
|
// three elements
|
||||||
pmmr.push(elems[2]).unwrap();
|
pmmr.push(elems[2]).unwrap();
|
||||||
let sum3 = sum2.clone() + HashSum::from_summable(4, &elems[2]);
|
let sum3 = sum2 + elems[2].hash();
|
||||||
|
pmmr.dump(false);
|
||||||
assert_eq!(pmmr.root(), sum3);
|
assert_eq!(pmmr.root(), sum3);
|
||||||
assert_eq!(pmmr.unpruned_size(), 4);
|
assert_eq!(pmmr.unpruned_size(), 4);
|
||||||
|
|
||||||
// four elements
|
// four elements
|
||||||
pmmr.push(elems[3]).unwrap();
|
pmmr.push(elems[3]).unwrap();
|
||||||
let sum4 = sum2 +
|
let sum_one = elems[2].hash() + elems[3].hash();
|
||||||
(HashSum::from_summable(4, &elems[2]) +
|
let sum4 = sum2 + sum_one;
|
||||||
HashSum::from_summable(5, &elems[3]));
|
pmmr.dump(false);
|
||||||
assert_eq!(pmmr.root(), sum4);
|
assert_eq!(pmmr.root(), sum4);
|
||||||
assert_eq!(pmmr.unpruned_size(), 7);
|
assert_eq!(pmmr.unpruned_size(), 7);
|
||||||
|
|
||||||
// five elements
|
// five elements
|
||||||
pmmr.push(elems[4]).unwrap();
|
pmmr.push(elems[4]).unwrap();
|
||||||
let sum5 = sum4.clone() + HashSum::from_summable(8, &elems[4]);
|
let sum3 = sum4 + elems[4].hash();
|
||||||
assert_eq!(pmmr.root(), sum5);
|
pmmr.dump(false);
|
||||||
|
assert_eq!(pmmr.root(), sum3);
|
||||||
assert_eq!(pmmr.unpruned_size(), 8);
|
assert_eq!(pmmr.unpruned_size(), 8);
|
||||||
|
|
||||||
// six elements
|
// six elements
|
||||||
pmmr.push(elems[5]).unwrap();
|
pmmr.push(elems[5]).unwrap();
|
||||||
let sum6 = sum4.clone() +
|
let sum6 = sum4 +
|
||||||
(HashSum::from_summable(8, &elems[4]) +
|
(elems[4].hash() + elems[5].hash());
|
||||||
HashSum::from_summable(9, &elems[5]));
|
|
||||||
assert_eq!(pmmr.root(), sum6.clone());
|
assert_eq!(pmmr.root(), sum6.clone());
|
||||||
assert_eq!(pmmr.unpruned_size(), 10);
|
assert_eq!(pmmr.unpruned_size(), 10);
|
||||||
|
|
||||||
// seven elements
|
// seven elements
|
||||||
pmmr.push(elems[6]).unwrap();
|
pmmr.push(elems[6]).unwrap();
|
||||||
let sum7 = sum6 + HashSum::from_summable(11, &elems[6]);
|
let sum7 = sum6 + elems[6].hash();
|
||||||
assert_eq!(pmmr.root(), sum7);
|
assert_eq!(pmmr.root(), sum7);
|
||||||
assert_eq!(pmmr.unpruned_size(), 11);
|
assert_eq!(pmmr.unpruned_size(), 11);
|
||||||
|
|
||||||
// eight elements
|
// eight elements
|
||||||
pmmr.push(elems[7]).unwrap();
|
pmmr.push(elems[7]).unwrap();
|
||||||
let sum8 = sum4 +
|
let sum8 = sum4 +
|
||||||
((HashSum::from_summable(8, &elems[4]) +
|
((elems[4].hash() + elems[5].hash()) +
|
||||||
HashSum::from_summable(9, &elems[5])) +
|
(elems[6].hash() + elems[7].hash()));
|
||||||
(HashSum::from_summable(11, &elems[6]) +
|
|
||||||
HashSum::from_summable(12, &elems[7])));
|
|
||||||
assert_eq!(pmmr.root(), sum8);
|
assert_eq!(pmmr.root(), sum8);
|
||||||
assert_eq!(pmmr.unpruned_size(), 15);
|
assert_eq!(pmmr.unpruned_size(), 15);
|
||||||
|
|
||||||
// nine elements
|
// nine elements
|
||||||
pmmr.push(elems[8]).unwrap();
|
pmmr.push(elems[8]).unwrap();
|
||||||
let sum9 = sum8 + HashSum::from_summable(16, &elems[8]);
|
let sum9 = sum8 + elems[8].hash();
|
||||||
assert_eq!(pmmr.root(), sum9);
|
assert_eq!(pmmr.root(), sum9);
|
||||||
assert_eq!(pmmr.unpruned_size(), 16);
|
assert_eq!(pmmr.unpruned_size(), 16);
|
||||||
}
|
}
|
||||||
|
@ -1015,8 +924,9 @@ mod test {
|
||||||
TestElem([0, 0, 0, 6]),
|
TestElem([0, 0, 0, 6]),
|
||||||
TestElem([0, 0, 0, 7]),
|
TestElem([0, 0, 0, 7]),
|
||||||
TestElem([0, 0, 0, 8]),
|
TestElem([0, 0, 0, 8]),
|
||||||
TestElem([0, 0, 0, 9]),
|
TestElem([1, 0, 0, 0]),
|
||||||
];
|
];
|
||||||
|
|
||||||
let mut ba = VecBackend::new();
|
let mut ba = VecBackend::new();
|
||||||
let mut pmmr = PMMR::new(&mut ba);
|
let mut pmmr = PMMR::new(&mut ba);
|
||||||
|
|
||||||
|
@ -1026,23 +936,23 @@ mod test {
|
||||||
|
|
||||||
pmmr.push(elems[0]).unwrap();
|
pmmr.push(elems[0]).unwrap();
|
||||||
let res = pmmr.get_last_n_insertions(19);
|
let res = pmmr.get_last_n_insertions(19);
|
||||||
assert!(res.len() == 1 && res[0].sum == 1);
|
assert!(res.len() == 1);
|
||||||
|
|
||||||
pmmr.push(elems[1]).unwrap();
|
pmmr.push(elems[1]).unwrap();
|
||||||
|
|
||||||
let res = pmmr.get_last_n_insertions(12);
|
let res = pmmr.get_last_n_insertions(12);
|
||||||
assert!(res[0].sum == 2 && res[1].sum == 1);
|
assert!(res.len() == 2);
|
||||||
|
|
||||||
pmmr.push(elems[2]).unwrap();
|
pmmr.push(elems[2]).unwrap();
|
||||||
|
|
||||||
let res = pmmr.get_last_n_insertions(2);
|
let res = pmmr.get_last_n_insertions(2);
|
||||||
assert!(res[0].sum == 3 && res[1].sum == 2);
|
assert!(res.len() == 2);
|
||||||
|
|
||||||
pmmr.push(elems[3]).unwrap();
|
pmmr.push(elems[3]).unwrap();
|
||||||
|
|
||||||
let res = pmmr.get_last_n_insertions(19);
|
let res = pmmr.get_last_n_insertions(19);
|
||||||
assert!(
|
assert!(
|
||||||
res[0].sum == 4 && res[1].sum == 3 && res[2].sum == 2 && res[3].sum == 1 && res.len() == 4
|
res.len() == 4
|
||||||
);
|
);
|
||||||
|
|
||||||
pmmr.push(elems[5]).unwrap();
|
pmmr.push(elems[5]).unwrap();
|
||||||
|
@ -1052,7 +962,7 @@ mod test {
|
||||||
|
|
||||||
let res = pmmr.get_last_n_insertions(7);
|
let res = pmmr.get_last_n_insertions(7);
|
||||||
assert!(
|
assert!(
|
||||||
res[0].sum == 9 && res[1].sum == 8 && res[2].sum == 7 && res[3].sum == 6 && res.len() == 7
|
res.len() == 7
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1071,7 +981,7 @@ mod test {
|
||||||
TestElem([1, 0, 0, 0]),
|
TestElem([1, 0, 0, 0]),
|
||||||
];
|
];
|
||||||
|
|
||||||
let orig_root: HashSum<TestElem>;
|
let orig_root: Hash;
|
||||||
let sz: u64;
|
let sz: u64;
|
||||||
let mut ba = VecBackend::new();
|
let mut ba = VecBackend::new();
|
||||||
{
|
{
|
||||||
|
@ -1085,7 +995,7 @@ mod test {
|
||||||
|
|
||||||
// pruning a leaf with no parent should do nothing
|
// pruning a leaf with no parent should do nothing
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||||
pmmr.prune(16, 0).unwrap();
|
pmmr.prune(16, 0).unwrap();
|
||||||
assert_eq!(orig_root, pmmr.root());
|
assert_eq!(orig_root, pmmr.root());
|
||||||
}
|
}
|
||||||
|
@ -1093,14 +1003,14 @@ mod test {
|
||||||
|
|
||||||
// pruning leaves with no shared parent just removes 1 element
|
// pruning leaves with no shared parent just removes 1 element
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||||
pmmr.prune(2, 0).unwrap();
|
pmmr.prune(2, 0).unwrap();
|
||||||
assert_eq!(orig_root, pmmr.root());
|
assert_eq!(orig_root, pmmr.root());
|
||||||
}
|
}
|
||||||
assert_eq!(ba.used_size(), 15);
|
assert_eq!(ba.used_size(), 15);
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||||
pmmr.prune(4, 0).unwrap();
|
pmmr.prune(4, 0).unwrap();
|
||||||
assert_eq!(orig_root, pmmr.root());
|
assert_eq!(orig_root, pmmr.root());
|
||||||
}
|
}
|
||||||
|
@ -1108,7 +1018,7 @@ mod test {
|
||||||
|
|
||||||
// pruning a non-leaf node has no effect
|
// pruning a non-leaf node has no effect
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||||
pmmr.prune(3, 0).unwrap_err();
|
pmmr.prune(3, 0).unwrap_err();
|
||||||
assert_eq!(orig_root, pmmr.root());
|
assert_eq!(orig_root, pmmr.root());
|
||||||
}
|
}
|
||||||
|
@ -1116,7 +1026,7 @@ mod test {
|
||||||
|
|
||||||
// pruning sibling removes subtree
|
// pruning sibling removes subtree
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||||
pmmr.prune(5, 0).unwrap();
|
pmmr.prune(5, 0).unwrap();
|
||||||
assert_eq!(orig_root, pmmr.root());
|
assert_eq!(orig_root, pmmr.root());
|
||||||
}
|
}
|
||||||
|
@ -1124,7 +1034,7 @@ mod test {
|
||||||
|
|
||||||
// pruning all leaves under level >1 removes all subtree
|
// pruning all leaves under level >1 removes all subtree
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||||
pmmr.prune(1, 0).unwrap();
|
pmmr.prune(1, 0).unwrap();
|
||||||
assert_eq!(orig_root, pmmr.root());
|
assert_eq!(orig_root, pmmr.root());
|
||||||
}
|
}
|
||||||
|
@ -1132,7 +1042,7 @@ mod test {
|
||||||
|
|
||||||
// pruning everything should only leave us the peaks
|
// pruning everything should only leave us the peaks
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut ba, sz);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||||
for n in 1..16 {
|
for n in 1..16 {
|
||||||
let _ = pmmr.prune(n, 0);
|
let _ = pmmr.prune(n, 0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,16 +19,14 @@ use util::{static_secp_instance, kernel_sig_msg};
|
||||||
use util::secp::pedersen::{Commitment, RangeProof};
|
use util::secp::pedersen::{Commitment, RangeProof};
|
||||||
use std::cmp::{min, max};
|
use std::cmp::{min, max};
|
||||||
use std::cmp::Ordering;
|
use std::cmp::Ordering;
|
||||||
use std::ops;
|
|
||||||
|
|
||||||
use consensus;
|
use consensus;
|
||||||
use consensus::VerifySortOrder;
|
use consensus::VerifySortOrder;
|
||||||
use core::Committed;
|
use core::Committed;
|
||||||
use core::hash::{Hash, Hashed, ZERO_HASH};
|
use core::hash::{Hash, Hashed, ZERO_HASH};
|
||||||
use core::pmmr::Summable;
|
|
||||||
use keychain;
|
|
||||||
use keychain::{Identifier, Keychain, BlindingFactor};
|
use keychain::{Identifier, Keychain, BlindingFactor};
|
||||||
use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
|
use keychain;
|
||||||
|
use ser::{self, read_and_verify_sorted, PMMRable, Readable, Reader, Writeable, WriteableSorted, Writer};
|
||||||
use util;
|
use util;
|
||||||
|
|
||||||
/// The size of the blake2 hash of a switch commitment (256 bits)
|
/// The size of the blake2 hash of a switch commitment (256 bits)
|
||||||
|
@ -217,9 +215,10 @@ impl TxKernel {
|
||||||
..self
|
..self
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/// Size in bytes of a kernel, necessary for binary storage
|
impl PMMRable for TxKernel {
|
||||||
pub fn size() -> usize {
|
fn len() -> usize {
|
||||||
17 + // features plus fee and lock_height
|
17 + // features plus fee and lock_height
|
||||||
secp::constants::PEDERSEN_COMMITMENT_SIZE +
|
secp::constants::PEDERSEN_COMMITMENT_SIZE +
|
||||||
secp::constants::AGG_SIGNATURE_SIZE
|
secp::constants::AGG_SIGNATURE_SIZE
|
||||||
|
@ -671,17 +670,13 @@ impl SwitchCommitHash {
|
||||||
/// provides future-proofing against quantum-based attacks, as well as providing
|
/// provides future-proofing against quantum-based attacks, as well as providing
|
||||||
/// wallet implementations with a way to identify their outputs for wallet
|
/// wallet implementations with a way to identify their outputs for wallet
|
||||||
/// reconstruction.
|
/// reconstruction.
|
||||||
///
|
|
||||||
/// The hash of an output only covers its features, commitment,
|
|
||||||
/// and switch commitment. The range proof is expected to have its own hash
|
|
||||||
/// and is stored and committed to separately.
|
|
||||||
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Copy, Clone, Serialize, Deserialize)]
|
||||||
pub struct Output {
|
pub struct Output {
|
||||||
/// Options for an output's structure or use
|
/// Options for an output's structure or use
|
||||||
pub features: OutputFeatures,
|
pub features: OutputFeatures,
|
||||||
/// The homomorphic commitment representing the output amount
|
/// The homomorphic commitment representing the output amount
|
||||||
pub commit: Commitment,
|
pub commit: Commitment,
|
||||||
/// The switch commitment hash, a 160 bit length blake2 hash of blind*J
|
/// The switch commitment hash, a 256 bit length blake2 hash of blind*J
|
||||||
pub switch_commit_hash: SwitchCommitHash,
|
pub switch_commit_hash: SwitchCommitHash,
|
||||||
/// A proof that the commitment is in the right range
|
/// A proof that the commitment is in the right range
|
||||||
pub proof: RangeProof,
|
pub proof: RangeProof,
|
||||||
|
@ -704,9 +699,13 @@ impl Writeable for Output {
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||||
writer.write_u8(self.features.bits())?;
|
writer.write_u8(self.features.bits())?;
|
||||||
writer.write_fixed_bytes(&self.commit)?;
|
writer.write_fixed_bytes(&self.commit)?;
|
||||||
writer.write_fixed_bytes(&self.switch_commit_hash)?;
|
// Hash of an output doesn't cover the switch commit, it should
|
||||||
|
// be wound into the range proof separately
|
||||||
// The hash of an output doesn't include the range proof
|
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
||||||
|
writer.write_fixed_bytes(&self.switch_commit_hash)?;
|
||||||
|
}
|
||||||
|
// The hash of an output doesn't include the range proof, which
|
||||||
|
// is commit to separately
|
||||||
if writer.serialization_mode() == ser::SerializationMode::Full {
|
if writer.serialization_mode() == ser::SerializationMode::Full {
|
||||||
writer.write_bytes(&self.proof)?
|
writer.write_bytes(&self.proof)?
|
||||||
}
|
}
|
||||||
|
@ -818,21 +817,6 @@ impl OutputIdentifier {
|
||||||
util::to_hex(self.commit.0.to_vec()),
|
util::to_hex(self.commit.0.to_vec()),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Convert an output_indentifier to a sum_commit representation
|
|
||||||
/// so we can use it to query the the output MMR
|
|
||||||
pub fn as_sum_commit(&self) -> SumCommit {
|
|
||||||
SumCommit {
|
|
||||||
features: self.features,
|
|
||||||
commit: self.commit,
|
|
||||||
switch_commit_hash: SwitchCommitHash::zero(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Convert a sum_commit back to an output_identifier.
|
|
||||||
pub fn from_sum_commit(sum_commit: &SumCommit) -> OutputIdentifier {
|
|
||||||
OutputIdentifier::new(sum_commit.features, &sum_commit.commit)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Writeable for OutputIdentifier {
|
impl Writeable for OutputIdentifier {
|
||||||
|
@ -855,140 +839,73 @@ impl Readable for OutputIdentifier {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Wrapper to Output commitments to provide the Summable trait.
|
/// Yet another output version to read/write from disk. Ends up being far too awkward
|
||||||
|
/// to use the write serialisation property to do this
|
||||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||||
pub struct SumCommit {
|
pub struct OutputStoreable {
|
||||||
/// Output features (coinbase vs. regular transaction output)
|
/// Output features (coinbase vs. regular transaction output)
|
||||||
/// We need to include this when hashing to ensure coinbase maturity can be enforced.
|
/// We need to include this when hashing to ensure coinbase maturity can be enforced.
|
||||||
pub features: OutputFeatures,
|
pub features: OutputFeatures,
|
||||||
/// Output commitment
|
/// Output commitment
|
||||||
pub commit: Commitment,
|
pub commit: Commitment,
|
||||||
/// The corresponding switch commit hash
|
/// Switch commit hash
|
||||||
pub switch_commit_hash: SwitchCommitHash,
|
pub switch_commit_hash: SwitchCommitHash,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SumCommit {
|
impl OutputStoreable {
|
||||||
/// Build a new sum_commit.
|
/// Build a StoreableOutput from an existing output.
|
||||||
pub fn new(
|
pub fn from_output(output: &Output) -> OutputStoreable {
|
||||||
features: OutputFeatures,
|
OutputStoreable {
|
||||||
commit: &Commitment,
|
|
||||||
switch_commit_hash: &SwitchCommitHash,
|
|
||||||
) -> SumCommit {
|
|
||||||
SumCommit {
|
|
||||||
features: features.clone(),
|
|
||||||
commit: commit.clone(),
|
|
||||||
switch_commit_hash: switch_commit_hash.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Build a new sum_commit from an existing output.
|
|
||||||
pub fn from_output(output: &Output) -> SumCommit {
|
|
||||||
SumCommit {
|
|
||||||
features: output.features,
|
features: output.features,
|
||||||
commit: output.commit,
|
commit: output.commit,
|
||||||
switch_commit_hash: output.switch_commit_hash,
|
switch_commit_hash: output.switch_commit_hash,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Build a new sum_commit from an existing input.
|
/// Return a regular output
|
||||||
pub fn from_input(input: &Input) -> SumCommit {
|
pub fn to_output(self) -> Output {
|
||||||
SumCommit {
|
Output{
|
||||||
features: input.features,
|
features: self.features,
|
||||||
commit: input.commit,
|
commit: self.commit,
|
||||||
switch_commit_hash: SwitchCommitHash::zero(),
|
switch_commit_hash: self.switch_commit_hash,
|
||||||
|
proof: RangeProof{
|
||||||
|
proof:[0; secp::constants::MAX_PROOF_SIZE],
|
||||||
|
plen: 0,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Hex string representation of a sum_commit.
|
|
||||||
pub fn to_hex(&self) -> String {
|
|
||||||
format!(
|
|
||||||
"{:b}{}{}",
|
|
||||||
self.features.bits(),
|
|
||||||
util::to_hex(self.commit.0.to_vec()),
|
|
||||||
self.switch_commit_hash.to_hex(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Outputs get summed through their commitments.
|
impl PMMRable for OutputStoreable {
|
||||||
impl Summable for SumCommit {
|
fn len() -> usize {
|
||||||
type Sum = SumCommit;
|
1 + secp::constants::PEDERSEN_COMMITMENT_SIZE + SWITCH_COMMIT_HASH_SIZE
|
||||||
|
|
||||||
fn sum(&self) -> SumCommit {
|
|
||||||
SumCommit {
|
|
||||||
commit: self.commit.clone(),
|
|
||||||
features: self.features.clone(),
|
|
||||||
switch_commit_hash: self.switch_commit_hash.clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sum_len() -> usize {
|
|
||||||
secp::constants::PEDERSEN_COMMITMENT_SIZE + SWITCH_COMMIT_HASH_SIZE + 1
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Writeable for SumCommit {
|
impl Writeable for OutputStoreable {
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||||
writer.write_u8(self.features.bits())?;
|
writer.write_u8(self.features.bits())?;
|
||||||
self.commit.write(writer)?;
|
self.commit.write(writer)?;
|
||||||
if writer.serialization_mode() == ser::SerializationMode::Full {
|
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
||||||
self.switch_commit_hash.write(writer)?;
|
self.switch_commit_hash.write(writer)?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Readable for SumCommit {
|
impl Readable for OutputStoreable {
|
||||||
fn read(reader: &mut Reader) -> Result<SumCommit, ser::Error> {
|
fn read(reader: &mut Reader) -> Result<OutputStoreable, ser::Error> {
|
||||||
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or(
|
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||||
ser::Error::CorruptedData,
|
ser::Error::CorruptedData,
|
||||||
)?;
|
)?;
|
||||||
Ok(SumCommit {
|
Ok(OutputStoreable {
|
||||||
features: features,
|
|
||||||
commit: Commitment::read(reader)?,
|
commit: Commitment::read(reader)?,
|
||||||
switch_commit_hash: SwitchCommitHash::read(reader)?,
|
switch_commit_hash: SwitchCommitHash::read(reader)?,
|
||||||
|
features: features,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ops::Add for SumCommit {
|
|
||||||
type Output = SumCommit;
|
|
||||||
|
|
||||||
fn add(self, other: SumCommit) -> SumCommit {
|
|
||||||
// Build a new commitment by summing the two commitments.
|
|
||||||
let secp = static_secp_instance();
|
|
||||||
let sum = match secp.lock().unwrap().commit_sum(
|
|
||||||
vec![
|
|
||||||
self.commit.clone(),
|
|
||||||
other.commit.clone(),
|
|
||||||
],
|
|
||||||
vec![],
|
|
||||||
) {
|
|
||||||
Ok(s) => s,
|
|
||||||
Err(_) => Commitment::from_vec(vec![1; 33]),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Now build a new switch_commit_hash by concatenating the two switch_commit_hash value
|
|
||||||
// and hashing the result.
|
|
||||||
let mut bytes = self.switch_commit_hash.0.to_vec();
|
|
||||||
bytes.extend(other.switch_commit_hash.0.iter().cloned());
|
|
||||||
let key = SwitchCommitHashKey::zero();
|
|
||||||
let hash = blake2b(SWITCH_COMMIT_HASH_SIZE, &key.0, &bytes);
|
|
||||||
let hash = hash.as_bytes();
|
|
||||||
let mut h = [0; SWITCH_COMMIT_HASH_SIZE];
|
|
||||||
for i in 0..SWITCH_COMMIT_HASH_SIZE {
|
|
||||||
h[i] = hash[i];
|
|
||||||
}
|
|
||||||
let switch_commit_hash_sum = SwitchCommitHash(h);
|
|
||||||
|
|
||||||
SumCommit {
|
|
||||||
features: self.features | other.features,
|
|
||||||
commit: sum,
|
|
||||||
switch_commit_hash: switch_commit_hash_sum,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
|
|
@ -37,6 +37,8 @@ use util::secp::constants::{
|
||||||
SECRET_KEY_SIZE,
|
SECRET_KEY_SIZE,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
const BULLET_PROOF_SIZE:usize = 674;
|
||||||
|
|
||||||
/// Possible errors deriving from serializing or deserializing.
|
/// Possible errors deriving from serializing or deserializing.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
|
@ -119,6 +121,9 @@ pub enum SerializationMode {
|
||||||
Hash,
|
Hash,
|
||||||
/// Serialize everything that a signer of the object should know
|
/// Serialize everything that a signer of the object should know
|
||||||
SigHash,
|
SigHash,
|
||||||
|
/// Serialize for local storage, for instance in the case where
|
||||||
|
/// an output doesn't wish to store its range proof
|
||||||
|
Storage,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Implementations defined how different numbers and binary structures are
|
/// Implementations defined how different numbers and binary structures are
|
||||||
|
@ -255,6 +260,7 @@ pub fn ser_vec<W: Writeable>(thing: &W) -> Result<Vec<u8>, Error> {
|
||||||
Ok(vec)
|
Ok(vec)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Utility to read from a binary source
|
||||||
struct BinReader<'a> {
|
struct BinReader<'a> {
|
||||||
source: &'a mut Read,
|
source: &'a mut Read,
|
||||||
}
|
}
|
||||||
|
@ -364,7 +370,7 @@ impl Writeable for RangeProof {
|
||||||
|
|
||||||
impl Readable for RangeProof {
|
impl Readable for RangeProof {
|
||||||
fn read(reader: &mut Reader) -> Result<RangeProof, Error> {
|
fn read(reader: &mut Reader) -> Result<RangeProof, Error> {
|
||||||
let p = try!(reader.read_limited_vec(MAX_PROOF_SIZE));
|
let p = try!(reader.read_limited_vec(BULLET_PROOF_SIZE));
|
||||||
let mut a = [0; MAX_PROOF_SIZE];
|
let mut a = [0; MAX_PROOF_SIZE];
|
||||||
for i in 0..p.len() {
|
for i in 0..p.len() {
|
||||||
a[i] = p[i];
|
a[i] = p[i];
|
||||||
|
@ -376,6 +382,12 @@ impl Readable for RangeProof {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl PMMRable for RangeProof {
|
||||||
|
fn len() -> usize {
|
||||||
|
BULLET_PROOF_SIZE
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Readable for Signature {
|
impl Readable for Signature {
|
||||||
fn read(reader: &mut Reader) -> Result<Signature, Error> {
|
fn read(reader: &mut Reader) -> Result<Signature, Error> {
|
||||||
let a = try!(reader.read_fixed_bytes(AGG_SIGNATURE_SIZE));
|
let a = try!(reader.read_fixed_bytes(AGG_SIGNATURE_SIZE));
|
||||||
|
@ -542,6 +554,12 @@ impl Writeable for [u8; 4] {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Trait for types that can serialize and report their size
|
||||||
|
pub trait PMMRable: Readable + Writeable + Hashed + Clone {
|
||||||
|
/// Length in bytes
|
||||||
|
fn len() -> usize;
|
||||||
|
}
|
||||||
|
|
||||||
/// Useful marker trait on types that can be sized byte slices
|
/// Useful marker trait on types that can be sized byte slices
|
||||||
pub trait AsFixedBytes: Sized + AsRef<[u8]> {
|
pub trait AsFixedBytes: Sized + AsRef<[u8]> {
|
||||||
/// The length in bytes
|
/// The length in bytes
|
||||||
|
|
|
@ -283,38 +283,38 @@ fn get_utxos_by_height(base_addr: &String, api_server_port: u16, start_height: u
|
||||||
|
|
||||||
// Sumtree handler functions
|
// Sumtree handler functions
|
||||||
fn get_sumtree_roots(base_addr: &String, api_server_port: u16) -> Result<api::SumTrees, Error> {
|
fn get_sumtree_roots(base_addr: &String, api_server_port: u16) -> Result<api::SumTrees, Error> {
|
||||||
let url = format!("http://{}:{}/v1/sumtrees/roots", base_addr, api_server_port);
|
let url = format!("http://{}:{}/v1/pmmrtrees/roots", base_addr, api_server_port);
|
||||||
api::client::get::<api::SumTrees>(url.as_str()).map_err(|e| Error::API(e))
|
api::client::get::<api::SumTrees>(url.as_str()).map_err(|e| Error::API(e))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_sumtree_lastutxos(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::SumTreeNode>, Error> {
|
fn get_sumtree_lastutxos(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||||
let url: String;
|
let url: String;
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
url = format!("http://{}:{}/v1/sumtrees/lastutxos", base_addr, api_server_port);
|
url = format!("http://{}:{}/v1/pmmrtrees/lastutxos", base_addr, api_server_port);
|
||||||
} else {
|
} else {
|
||||||
url = format!("http://{}:{}/v1/sumtrees/lastutxos?n={}", base_addr, api_server_port, n);
|
url = format!("http://{}:{}/v1/pmmrtrees/lastutxos?n={}", base_addr, api_server_port, n);
|
||||||
}
|
}
|
||||||
api::client::get::<Vec<api::SumTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get_sumtree_lastrangeproofs(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::SumTreeNode>, Error> {
|
fn get_sumtree_lastrangeproofs(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||||
let url: String;
|
let url: String;
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
url = format!("http://{}:{}/v1/sumtrees/lastrangeproofs", base_addr, api_server_port);
|
url = format!("http://{}:{}/v1/pmmrtrees/lastrangeproofs", base_addr, api_server_port);
|
||||||
} else {
|
} else {
|
||||||
url = format!("http://{}:{}/v1/sumtrees/lastrangeproofs?n={}", base_addr, api_server_port, n);
|
url = format!("http://{}:{}/v1/pmmrtrees/lastrangeproofs?n={}", base_addr, api_server_port, n);
|
||||||
}
|
}
|
||||||
api::client::get::<Vec<api::SumTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn getsumtree_lastkernels(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::SumTreeNode>, Error> {
|
fn getsumtree_lastkernels(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||||
let url: String;
|
let url: String;
|
||||||
if n == 0 {
|
if n == 0 {
|
||||||
url = format!("http://{}:{}/v1/sumtrees/lastkernels", base_addr, api_server_port);
|
url = format!("http://{}:{}/v1/pmmrtrees/lastkernels", base_addr, api_server_port);
|
||||||
} else {
|
} else {
|
||||||
url = format!("http://{}:{}/v1/sumtrees/lastkernels?n={}", base_addr, api_server_port, n);
|
url = format!("http://{}:{}/v1/pmmrtrees/lastkernels?n={}", base_addr, api_server_port, n);
|
||||||
}
|
}
|
||||||
api::client::get::<Vec<api::SumTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Helper function to get a vec of commitment output ids from a vec of block outputs
|
// Helper function to get a vec of commitment output ids from a vec of block outputs
|
||||||
|
|
|
@ -29,9 +29,9 @@ use blind::{BlindSum, BlindingFactor};
|
||||||
use extkey::{self, Identifier};
|
use extkey::{self, Identifier};
|
||||||
|
|
||||||
#[cfg(feature = "use-bullet-proofs")]
|
#[cfg(feature = "use-bullet-proofs")]
|
||||||
const USE_BULLET_PROOFS:bool = true;
|
pub const USE_BULLET_PROOFS:bool = true;
|
||||||
#[cfg(not(feature = "use-bullet-proofs"))]
|
#[cfg(not(feature = "use-bullet-proofs"))]
|
||||||
const USE_BULLET_PROOFS:bool = false;
|
pub const USE_BULLET_PROOFS:bool = false;
|
||||||
|
|
||||||
#[derive(PartialEq, Eq, Clone, Debug)]
|
#[derive(PartialEq, Eq, Clone, Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
|
|
|
@ -30,7 +30,8 @@ extern crate rocksdb;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate slog;
|
extern crate slog;
|
||||||
|
|
||||||
pub mod sumtree;
|
pub mod pmmr;
|
||||||
|
pub mod types;
|
||||||
|
|
||||||
const SEP: u8 = ':' as u8;
|
const SEP: u8 = ':' as u8;
|
||||||
|
|
||||||
|
|
318
store/src/pmmr.rs
Normal file
318
store/src/pmmr.rs
Normal file
|
@ -0,0 +1,318 @@
|
||||||
|
// Copyright 2017 The Grin Developers
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//! Implementation of the persistent Backend for the prunable MMR tree.
|
||||||
|
|
||||||
|
use std::fs::{self};
|
||||||
|
use std::io::{self};
|
||||||
|
use std::marker::PhantomData;
|
||||||
|
|
||||||
|
use core::core::pmmr::{self, Backend};
|
||||||
|
use core::ser::{self, PMMRable};
|
||||||
|
use core::core::hash::Hash;
|
||||||
|
use util::LOGGER;
|
||||||
|
use types::{AppendOnlyFile, RemoveLog, read_ordered_vec, write_vec};
|
||||||
|
|
||||||
|
const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin";
|
||||||
|
const PMMR_DATA_FILE: &'static str = "pmmr_data.bin";
|
||||||
|
const PMMR_RM_LOG_FILE: &'static str = "pmmr_rm_log.bin";
|
||||||
|
const PMMR_PRUNED_FILE: &'static str = "pmmr_pruned.bin";
|
||||||
|
|
||||||
|
/// Maximum number of nodes in the remove log before it gets flushed
|
||||||
|
pub const RM_LOG_MAX_NODES: usize = 10000;
|
||||||
|
|
||||||
|
/// PMMR persistent backend implementation. Relies on multiple facilities to
|
||||||
|
/// handle writing, reading and pruning.
|
||||||
|
///
|
||||||
|
/// * A main storage file appends Hash instances as they come. This
|
||||||
|
/// AppendOnlyFile is also backed by a mmap for reads.
|
||||||
|
/// * An in-memory backend buffers the latest batch of writes to ensure the
|
||||||
|
/// PMMR can always read recent values even if they haven't been flushed to
|
||||||
|
/// disk yet.
|
||||||
|
/// * A remove log tracks the positions that need to be pruned from the
|
||||||
|
/// main storage file.
|
||||||
|
pub struct PMMRBackend<T>
|
||||||
|
where
|
||||||
|
T: PMMRable,
|
||||||
|
{
|
||||||
|
data_dir: String,
|
||||||
|
hash_file: AppendOnlyFile,
|
||||||
|
data_file: AppendOnlyFile,
|
||||||
|
rm_log: RemoveLog,
|
||||||
|
pruned_nodes: pmmr::PruneList,
|
||||||
|
phantom: PhantomData<T>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Backend<T> for PMMRBackend<T>
|
||||||
|
where
|
||||||
|
T: PMMRable,
|
||||||
|
{
|
||||||
|
/// Append the provided Hashes to the backend storage.
|
||||||
|
#[allow(unused_variables)]
|
||||||
|
fn append(&mut self, position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String> {
|
||||||
|
for d in data {
|
||||||
|
self.hash_file.append(&mut ser::ser_vec(&d.0).unwrap());
|
||||||
|
if let Some(elem) = d.1 {
|
||||||
|
self.data_file.append(&mut ser::ser_vec(&elem).unwrap());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get a Hash by insertion position
|
||||||
|
fn get(&self, position: u64, include_data:bool) -> Option<(Hash, Option<T>)> {
|
||||||
|
// Check if this position has been pruned in the remove log or the
|
||||||
|
// pruned list
|
||||||
|
if self.rm_log.includes(position) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let shift = self.pruned_nodes.get_shift(position);
|
||||||
|
if let None = shift {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read PMMR
|
||||||
|
// The MMR starts at 1, our binary backend starts at 0
|
||||||
|
let pos = position - 1;
|
||||||
|
|
||||||
|
// Must be on disk, doing a read at the correct position
|
||||||
|
let hash_record_len = 32;
|
||||||
|
let file_offset = ((pos - shift.unwrap()) as usize) * hash_record_len;
|
||||||
|
let data = self.hash_file.read(file_offset, hash_record_len);
|
||||||
|
let hash_val = match ser::deserialize(&mut &data[..]) {
|
||||||
|
Ok(h) => h,
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
LOGGER,
|
||||||
|
"Corrupted storage, could not read an entry from hash store: {:?}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if !include_data {
|
||||||
|
return Some(((hash_val), None));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Optionally read flatfile storage to get data element
|
||||||
|
let flatfile_pos = pmmr::n_leaves(position)
|
||||||
|
- 1 - self.pruned_nodes.get_leaf_shift(position).unwrap();
|
||||||
|
let record_len = T::len();
|
||||||
|
let file_offset = flatfile_pos as usize * T::len();
|
||||||
|
let data = self.data_file.read(file_offset, record_len);
|
||||||
|
let data = match ser::deserialize(&mut &data[..]) {
|
||||||
|
Ok(elem) => Some(elem),
|
||||||
|
Err(e) => {
|
||||||
|
error!(
|
||||||
|
LOGGER,
|
||||||
|
"Corrupted storage, could not read an entry from backend flatfile store: {:?}",
|
||||||
|
e
|
||||||
|
);
|
||||||
|
None
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Some((hash_val, data))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||||
|
self.rm_log
|
||||||
|
.rewind(index)
|
||||||
|
.map_err(|e| format!("Could not truncate remove log: {}", e))?;
|
||||||
|
|
||||||
|
let shift = self.pruned_nodes.get_shift(position).unwrap_or(0);
|
||||||
|
let record_len = 32;
|
||||||
|
let file_pos = (position - shift) * (record_len as u64);
|
||||||
|
self.hash_file.rewind(file_pos);
|
||||||
|
|
||||||
|
//Data file
|
||||||
|
let flatfile_pos = pmmr::n_leaves(position) - 1;
|
||||||
|
let file_pos = (flatfile_pos as usize + 1) * T::len();
|
||||||
|
self.data_file.rewind(file_pos as u64);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Remove Hashes by insertion position
|
||||||
|
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> {
|
||||||
|
self.rm_log.append(positions, index).map_err(|e| {
|
||||||
|
format!("Could not write to log storage, disk full? {:?}", e)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return data file path
|
||||||
|
fn get_data_file_path(&self) -> String {
|
||||||
|
self.data_file.path()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> PMMRBackend<T>
|
||||||
|
where
|
||||||
|
T: PMMRable,
|
||||||
|
{
|
||||||
|
/// Instantiates a new PMMR backend that will use the provided directly to
|
||||||
|
/// store its files.
|
||||||
|
pub fn new(data_dir: String) -> io::Result<PMMRBackend<T>> {
|
||||||
|
let hash_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE))?;
|
||||||
|
let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE))?;
|
||||||
|
let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?;
|
||||||
|
let data_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE))?;
|
||||||
|
|
||||||
|
Ok(PMMRBackend {
|
||||||
|
data_dir: data_dir,
|
||||||
|
hash_file: hash_file,
|
||||||
|
data_file: data_file,
|
||||||
|
rm_log: rm_log,
|
||||||
|
pruned_nodes: pmmr::PruneList {
|
||||||
|
pruned_nodes: prune_list,
|
||||||
|
},
|
||||||
|
phantom: PhantomData,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Total size of the PMMR stored by this backend. Only produces the fully
|
||||||
|
/// sync'd size.
|
||||||
|
pub fn unpruned_size(&self) -> io::Result<u64> {
|
||||||
|
let total_shift = self.pruned_nodes.get_shift(::std::u64::MAX).unwrap();
|
||||||
|
let record_len = 32;
|
||||||
|
let sz = self.hash_file.size()?;
|
||||||
|
Ok(sz / record_len + total_shift)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Syncs all files to disk. A call to sync is required to ensure all the
|
||||||
|
/// data has been successfully written to disk.
|
||||||
|
pub fn sync(&mut self) -> io::Result<()> {
|
||||||
|
if let Err(e) = self.hash_file.flush() {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::Interrupted,
|
||||||
|
format!("Could not write to log hash storage, disk full? {:?}", e),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
if let Err(e) = self.data_file.flush() {
|
||||||
|
return Err(io::Error::new(
|
||||||
|
io::ErrorKind::Interrupted,
|
||||||
|
format!("Could not write to log data storage, disk full? {:?}", e),
|
||||||
|
));
|
||||||
|
}
|
||||||
|
self.rm_log.flush()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Discard the current, non synced state of the backend.
|
||||||
|
pub fn discard(&mut self) {
|
||||||
|
self.hash_file.discard();
|
||||||
|
self.rm_log.discard();
|
||||||
|
self.data_file.discard();
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Return the data file path
|
||||||
|
pub fn data_file_path(&self) -> String {
|
||||||
|
self.get_data_file_path()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks the length of the remove log to see if it should get compacted.
|
||||||
|
/// If so, the remove log is flushed into the pruned list, which itself gets
|
||||||
|
/// saved, and the main hashsum data file is rewritten, cutting the removed
|
||||||
|
/// data.
|
||||||
|
///
|
||||||
|
/// If a max_len strictly greater than 0 is provided, the value will be used
|
||||||
|
/// to decide whether the remove log has reached its maximum length,
|
||||||
|
/// otherwise the RM_LOG_MAX_NODES default value is used.
|
||||||
|
///
|
||||||
|
/// TODO whatever is calling this should also clean up the commit to
|
||||||
|
/// position index in db
|
||||||
|
pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> {
|
||||||
|
if !(max_len > 0 && self.rm_log.len() > max_len
|
||||||
|
|| max_len == 0 && self.rm_log.len() > RM_LOG_MAX_NODES)
|
||||||
|
{
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
|
||||||
|
// 0. validate none of the nodes in the rm log are in the prune list (to
|
||||||
|
// avoid accidental double compaction)
|
||||||
|
for pos in &self.rm_log.removed[..] {
|
||||||
|
if let None = self.pruned_nodes.pruned_pos(pos.0) {
|
||||||
|
// TODO we likely can recover from this by directly jumping to 3
|
||||||
|
error!(
|
||||||
|
LOGGER,
|
||||||
|
"The remove log contains nodes that are already in the pruned \
|
||||||
|
list, a previous compaction likely failed."
|
||||||
|
);
|
||||||
|
return Ok(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// 1. save hashsum file to a compact copy, skipping data that's in the
|
||||||
|
// remove list
|
||||||
|
let tmp_prune_file_hash = format!("{}/{}.hashprune", self.data_dir, PMMR_HASH_FILE);
|
||||||
|
let record_len = 32;
|
||||||
|
let to_rm = self.rm_log
|
||||||
|
.removed
|
||||||
|
.iter()
|
||||||
|
.map(|&(pos, _)| {
|
||||||
|
let shift = self.pruned_nodes.get_shift(pos);
|
||||||
|
(pos - 1 - shift.unwrap()) * record_len
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
self.hash_file
|
||||||
|
.save_prune(tmp_prune_file_hash.clone(), to_rm, record_len)?;
|
||||||
|
|
||||||
|
// 2. And the same with the data file
|
||||||
|
let tmp_prune_file_data = format!("{}/{}.dataprune", self.data_dir, PMMR_DATA_FILE);
|
||||||
|
let record_len = T::len() as u64;
|
||||||
|
let to_rm = self.rm_log
|
||||||
|
.removed.clone()
|
||||||
|
.into_iter()
|
||||||
|
.filter(|&(pos, _)| pmmr::bintree_postorder_height(pos) == 0)
|
||||||
|
.map(|(pos, _)| {
|
||||||
|
let shift = self.pruned_nodes.get_leaf_shift(pos).unwrap();
|
||||||
|
let pos = pmmr::n_leaves(pos as u64);
|
||||||
|
(pos - 1 - shift) * record_len
|
||||||
|
})
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
self.data_file
|
||||||
|
.save_prune(tmp_prune_file_data.clone(), to_rm, record_len)?;
|
||||||
|
|
||||||
|
// 3. update the prune list and save it in place
|
||||||
|
for &(rm_pos, _) in &self.rm_log.removed[..] {
|
||||||
|
self.pruned_nodes.add(rm_pos);
|
||||||
|
}
|
||||||
|
write_vec(
|
||||||
|
format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE),
|
||||||
|
&self.pruned_nodes.pruned_nodes,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// 4. move the compact copy of hashes to the hashsum file and re-open it
|
||||||
|
fs::rename(
|
||||||
|
tmp_prune_file_hash.clone(),
|
||||||
|
format!("{}/{}", self.data_dir, PMMR_HASH_FILE),
|
||||||
|
)?;
|
||||||
|
self.hash_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_HASH_FILE))?;
|
||||||
|
|
||||||
|
// 5. and the same with the data file
|
||||||
|
fs::rename(
|
||||||
|
tmp_prune_file_data.clone(),
|
||||||
|
format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
|
||||||
|
)?;
|
||||||
|
self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?;
|
||||||
|
|
||||||
|
// 6. truncate the rm log
|
||||||
|
self.rm_log.rewind(0)?;
|
||||||
|
self.rm_log.flush()?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
// Copyright 2017 The Grin Developers
|
// Copyright 2018 The Grin Developers
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
// you may not use this file except in compliance with the License.
|
// you may not use this file except in compliance with the License.
|
||||||
// You may obtain a copy of the License at
|
// You may obtain a copy of the License at
|
||||||
|
@ -11,33 +11,22 @@
|
||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
//! Implementation of the persistent Backend for the prunable MMR sum-tree.
|
//! Common storage-related types
|
||||||
|
|
||||||
use memmap;
|
use memmap;
|
||||||
|
|
||||||
use std::cmp;
|
use std::cmp;
|
||||||
use std::fs::{self, File, OpenOptions};
|
use std::fs::{self, File, OpenOptions};
|
||||||
use std::io::{self, BufRead, BufReader, ErrorKind, Write};
|
use std::io::{self, BufRead, BufReader, ErrorKind, Write};
|
||||||
use std::marker::PhantomData;
|
|
||||||
use std::os::unix::io::AsRawFd;
|
use std::os::unix::io::AsRawFd;
|
||||||
use std::path::Path;
|
|
||||||
use std::io::Read;
|
use std::io::Read;
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
#[cfg(any(target_os = "linux"))]
|
#[cfg(any(target_os = "linux"))]
|
||||||
use libc::{ftruncate64, off64_t};
|
use libc::{ftruncate64, off64_t};
|
||||||
#[cfg(not(any(target_os = "linux", target_os = "android")))]
|
#[cfg(not(any(target_os = "linux", target_os = "android")))]
|
||||||
use libc::{ftruncate as ftruncate64, off_t as off64_t};
|
use libc::{ftruncate as ftruncate64, off_t as off64_t};
|
||||||
|
|
||||||
use core::core::pmmr::{self, Backend, HashSum, Summable};
|
|
||||||
use core::ser;
|
use core::ser;
|
||||||
use util::LOGGER;
|
|
||||||
|
|
||||||
const PMMR_DATA_FILE: &'static str = "pmmr_dat.bin";
|
|
||||||
const PMMR_RM_LOG_FILE: &'static str = "pmmr_rm_log.bin";
|
|
||||||
const PMMR_PRUNED_FILE: &'static str = "pmmr_pruned.bin";
|
|
||||||
|
|
||||||
/// Maximum number of nodes in the remove log before it gets flushed
|
|
||||||
pub const RM_LOG_MAX_NODES: usize = 10000;
|
|
||||||
|
|
||||||
/// Wrapper for a file that can be read at any position (random read) but for
|
/// Wrapper for a file that can be read at any position (random read) but for
|
||||||
/// which writes are append only. Reads are backed by a memory map (mmap(2)),
|
/// which writes are append only. Reads are backed by a memory map (mmap(2)),
|
||||||
|
@ -54,6 +43,7 @@ pub struct AppendOnlyFile {
|
||||||
buffer_start: usize,
|
buffer_start: usize,
|
||||||
buffer: Vec<u8>,
|
buffer: Vec<u8>,
|
||||||
buffer_start_bak: usize,
|
buffer_start_bak: usize,
|
||||||
|
unflushed_data_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AppendOnlyFile {
|
impl AppendOnlyFile {
|
||||||
|
@ -71,6 +61,7 @@ impl AppendOnlyFile {
|
||||||
buffer_start: 0,
|
buffer_start: 0,
|
||||||
buffer: vec![],
|
buffer: vec![],
|
||||||
buffer_start_bak: 0,
|
buffer_start_bak: 0,
|
||||||
|
unflushed_data_size: 0,
|
||||||
};
|
};
|
||||||
if let Ok(sz) = aof.size() {
|
if let Ok(sz) = aof.size() {
|
||||||
if sz > 0 {
|
if sz > 0 {
|
||||||
|
@ -84,6 +75,7 @@ impl AppendOnlyFile {
|
||||||
/// Append data to the file. Until the append-only file is synced, data is
|
/// Append data to the file. Until the append-only file is synced, data is
|
||||||
/// only written to memory.
|
/// only written to memory.
|
||||||
pub fn append(&mut self, buf: &mut Vec<u8>) {
|
pub fn append(&mut self, buf: &mut Vec<u8>) {
|
||||||
|
self.unflushed_data_size += buf.len();
|
||||||
self.buffer.append(buf);
|
self.buffer.append(buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -110,6 +102,7 @@ impl AppendOnlyFile {
|
||||||
self.file.sync_data()?;
|
self.file.sync_data()?;
|
||||||
self.buffer = vec![];
|
self.buffer = vec![];
|
||||||
self.mmap = Some(unsafe { memmap::Mmap::map(&self.file)? });
|
self.mmap = Some(unsafe { memmap::Mmap::map(&self.file)? });
|
||||||
|
self.unflushed_data_size = 0;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,11 +114,12 @@ impl AppendOnlyFile {
|
||||||
self.buffer_start_bak = 0;
|
self.buffer_start_bak = 0;
|
||||||
}
|
}
|
||||||
self.buffer = vec![];
|
self.buffer = vec![];
|
||||||
|
self.unflushed_data_size = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read length bytes of data at offset from the file. Leverages the memory
|
/// Read length bytes of data at offset from the file. Leverages the memory
|
||||||
/// map.
|
/// map.
|
||||||
fn read(&self, offset: usize, length: usize) -> Vec<u8> {
|
pub fn read(&self, offset: usize, length: usize) -> Vec<u8> {
|
||||||
if offset >= self.buffer_start {
|
if offset >= self.buffer_start {
|
||||||
let offset = offset - self.buffer_start;
|
let offset = offset - self.buffer_start;
|
||||||
return self.buffer[offset..(offset+length)].to_vec();
|
return self.buffer[offset..(offset+length)].to_vec();
|
||||||
|
@ -138,7 +132,7 @@ impl AppendOnlyFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Truncates the underlying file to the provided offset
|
/// Truncates the underlying file to the provided offset
|
||||||
fn truncate(&self, offs: usize) -> io::Result<()> {
|
pub fn truncate(&self, offs: usize) -> io::Result<()> {
|
||||||
let fd = self.file.as_raw_fd();
|
let fd = self.file.as_raw_fd();
|
||||||
let res = unsafe { ftruncate64(fd, offs as off64_t) };
|
let res = unsafe { ftruncate64(fd, offs as off64_t) };
|
||||||
if res == -1 {
|
if res == -1 {
|
||||||
|
@ -150,7 +144,7 @@ impl AppendOnlyFile {
|
||||||
|
|
||||||
/// Saves a copy of the current file content, skipping data at the provided
|
/// Saves a copy of the current file content, skipping data at the provided
|
||||||
/// prune indices. The prune Vec must be ordered.
|
/// prune indices. The prune Vec must be ordered.
|
||||||
fn save_prune(&self, target: String, prune_offs: Vec<u64>, prune_len: u64) -> io::Result<()> {
|
pub fn save_prune(&self, target: String, prune_offs: Vec<u64>, prune_len: u64) -> io::Result<()> {
|
||||||
let mut reader = File::open(self.path.clone())?;
|
let mut reader = File::open(self.path.clone())?;
|
||||||
let mut writer = File::create(target)?;
|
let mut writer = File::create(target)?;
|
||||||
|
|
||||||
|
@ -188,10 +182,15 @@ impl AppendOnlyFile {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Current size of the file in bytes.
|
/// Current size of the file in bytes.
|
||||||
fn size(&self) -> io::Result<u64> {
|
pub fn size(&self) -> io::Result<u64> {
|
||||||
fs::metadata(&self.path).map(|md| md.len())
|
fs::metadata(&self.path).map(|md| md.len())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Current size of file in bytes + size of unsaved data
|
||||||
|
pub fn size_with_unsaved(&self) -> u64 {
|
||||||
|
self.size().unwrap() + self.unflushed_data_size as u64
|
||||||
|
}
|
||||||
|
|
||||||
/// Path of the underlying file
|
/// Path of the underlying file
|
||||||
pub fn path(&self) -> String {
|
pub fn path(&self) -> String {
|
||||||
self.path.clone()
|
self.path.clone()
|
||||||
|
@ -203,10 +202,10 @@ impl AppendOnlyFile {
|
||||||
/// checking of whether a piece of data has been marked for deletion. When the
|
/// checking of whether a piece of data has been marked for deletion. When the
|
||||||
/// log becomes too long, the MMR backend will actually remove chunks from the
|
/// log becomes too long, the MMR backend will actually remove chunks from the
|
||||||
/// MMR data file and truncate the remove log.
|
/// MMR data file and truncate the remove log.
|
||||||
struct RemoveLog {
|
pub struct RemoveLog {
|
||||||
path: String,
|
path: String,
|
||||||
// Ordered vector of MMR positions that should get eventually removed.
|
/// Ordered vector of MMR positions that should get eventually removed.
|
||||||
removed: Vec<(u64, u32)>,
|
pub removed: Vec<(u64, u32)>,
|
||||||
// Holds positions temporarily until flush is called.
|
// Holds positions temporarily until flush is called.
|
||||||
removed_tmp: Vec<(u64, u32)>,
|
removed_tmp: Vec<(u64, u32)>,
|
||||||
// Holds truncated removed temporarily until discarded or committed
|
// Holds truncated removed temporarily until discarded or committed
|
||||||
|
@ -216,7 +215,7 @@ struct RemoveLog {
|
||||||
impl RemoveLog {
|
impl RemoveLog {
|
||||||
/// Open the remove log file. The content of the file will be read in memory
|
/// Open the remove log file. The content of the file will be read in memory
|
||||||
/// for fast checking.
|
/// for fast checking.
|
||||||
fn open(path: String) -> io::Result<RemoveLog> {
|
pub fn open(path: String) -> io::Result<RemoveLog> {
|
||||||
let removed = read_ordered_vec(path.clone(), 12)?;
|
let removed = read_ordered_vec(path.clone(), 12)?;
|
||||||
Ok(RemoveLog {
|
Ok(RemoveLog {
|
||||||
path: path,
|
path: path,
|
||||||
|
@ -227,7 +226,7 @@ impl RemoveLog {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Truncate and empties the remove log.
|
/// Truncate and empties the remove log.
|
||||||
fn rewind(&mut self, last_offs: u32) -> io::Result<()> {
|
pub fn rewind(&mut self, last_offs: u32) -> io::Result<()> {
|
||||||
// simplifying assumption: we always remove older than what's in tmp
|
// simplifying assumption: we always remove older than what's in tmp
|
||||||
self.removed_tmp = vec![];
|
self.removed_tmp = vec![];
|
||||||
|
|
||||||
|
@ -251,7 +250,7 @@ impl RemoveLog {
|
||||||
|
|
||||||
/// Append a set of new positions to the remove log. Both adds those
|
/// Append a set of new positions to the remove log. Both adds those
|
||||||
/// positions the ordered in-memory set and to the file.
|
/// positions the ordered in-memory set and to the file.
|
||||||
fn append(&mut self, elmts: Vec<u64>, index: u32) -> io::Result<()> {
|
pub fn append(&mut self, elmts: Vec<u64>, index: u32) -> io::Result<()> {
|
||||||
for elmt in elmts {
|
for elmt in elmts {
|
||||||
match self.removed_tmp.binary_search(&(elmt, index)) {
|
match self.removed_tmp.binary_search(&(elmt, index)) {
|
||||||
Ok(_) => continue,
|
Ok(_) => continue,
|
||||||
|
@ -264,7 +263,7 @@ impl RemoveLog {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Flush the positions to remove to file.
|
/// Flush the positions to remove to file.
|
||||||
fn flush(&mut self) -> io::Result<()> {
|
pub fn flush(&mut self) -> io::Result<()> {
|
||||||
let mut file = File::create(self.path.clone())?;
|
let mut file = File::create(self.path.clone())?;
|
||||||
for elmt in &self.removed_tmp {
|
for elmt in &self.removed_tmp {
|
||||||
match self.removed.binary_search(&elmt) {
|
match self.removed.binary_search(&elmt) {
|
||||||
|
@ -283,7 +282,7 @@ impl RemoveLog {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Discard pending changes
|
/// Discard pending changes
|
||||||
fn discard(&mut self) {
|
pub fn discard(&mut self) {
|
||||||
if self.removed_bak.len() > 0 {
|
if self.removed_bak.len() > 0 {
|
||||||
self.removed = self.removed_bak.clone();
|
self.removed = self.removed_bak.clone();
|
||||||
self.removed_bak = vec![];
|
self.removed_bak = vec![];
|
||||||
|
@ -292,12 +291,30 @@ impl RemoveLog {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Whether the remove log currently includes the provided position.
|
/// Whether the remove log currently includes the provided position.
|
||||||
fn includes(&self, elmt: u64) -> bool {
|
pub fn includes(&self, elmt: u64) -> bool {
|
||||||
include_tuple(&self.removed, elmt) || include_tuple(&self.removed_tmp, elmt)
|
include_tuple(&self.removed, elmt) || include_tuple(&self.removed_tmp, elmt)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// How many removed positions exist before this particular position
|
||||||
|
pub fn get_shift(&self, elmt: u64) -> usize {
|
||||||
|
let mut complete_list = self.removed.clone();
|
||||||
|
for e in &self.removed_tmp {
|
||||||
|
match self.removed.binary_search(&e) {
|
||||||
|
Ok(_) => continue,
|
||||||
|
Err(idx) => {
|
||||||
|
complete_list.insert(idx, *e);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
let pos = match complete_list.binary_search(&(elmt,0)){
|
||||||
|
Ok(idx) => idx+1,
|
||||||
|
Err(idx) => idx,
|
||||||
|
};
|
||||||
|
complete_list.split_at(pos).0.len()
|
||||||
|
}
|
||||||
|
|
||||||
/// Number of positions stored in the remove log.
|
/// Number of positions stored in the remove log.
|
||||||
fn len(&self) -> usize {
|
pub fn len(&self) -> usize {
|
||||||
self.removed.len()
|
self.removed.len()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -311,216 +328,8 @@ fn include_tuple(v: &Vec<(u64, u32)>, e: u64) -> bool {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
/// PMMR persistent backend implementation. Relies on multiple facilities to
|
/// Read an ordered vector of scalars from a file.
|
||||||
/// handle writing, reading and pruning.
|
pub fn read_ordered_vec<T>(path: String, elmt_len: usize) -> io::Result<Vec<T>>
|
||||||
///
|
|
||||||
/// * A main storage file appends HashSum instances as they come. This
|
|
||||||
/// AppendOnlyFile is also backed by a mmap for reads.
|
|
||||||
/// * An in-memory backend buffers the latest batch of writes to ensure the
|
|
||||||
/// PMMR can always read recent values even if they haven't been flushed to
|
|
||||||
/// disk yet.
|
|
||||||
/// * A remove log tracks the positions that need to be pruned from the
|
|
||||||
/// main storage file.
|
|
||||||
pub struct PMMRBackend<T>
|
|
||||||
where
|
|
||||||
T: Summable + Clone,
|
|
||||||
{
|
|
||||||
data_dir: String,
|
|
||||||
hashsum_file: AppendOnlyFile,
|
|
||||||
remove_log: RemoveLog,
|
|
||||||
pruned_nodes: pmmr::PruneList,
|
|
||||||
phantom: PhantomData<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> Backend<T> for PMMRBackend<T>
|
|
||||||
where
|
|
||||||
T: Summable + Clone,
|
|
||||||
{
|
|
||||||
/// Append the provided HashSums to the backend storage.
|
|
||||||
#[allow(unused_variables)]
|
|
||||||
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> {
|
|
||||||
for d in data {
|
|
||||||
self.hashsum_file.append(&mut ser::ser_vec(&d).unwrap());
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a HashSum by insertion position
|
|
||||||
fn get(&self, position: u64) -> Option<HashSum<T>> {
|
|
||||||
// Check if this position has been pruned in the remove log or the
|
|
||||||
// pruned list
|
|
||||||
if self.remove_log.includes(position) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let shift = self.pruned_nodes.get_shift(position);
|
|
||||||
if let None = shift {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The MMR starts at 1, our binary backend starts at 0
|
|
||||||
let pos = position - 1;
|
|
||||||
|
|
||||||
// Must be on disk, doing a read at the correct position
|
|
||||||
let record_len = 32 + T::sum_len();
|
|
||||||
let file_offset = ((pos - shift.unwrap()) as usize) * record_len;
|
|
||||||
let data = self.hashsum_file.read(file_offset, record_len);
|
|
||||||
match ser::deserialize(&mut &data[..]) {
|
|
||||||
Ok(hashsum) => Some(hashsum),
|
|
||||||
Err(e) => {
|
|
||||||
error!(
|
|
||||||
LOGGER,
|
|
||||||
"Corrupted storage, could not read an entry from sum tree store: {:?}",
|
|
||||||
e
|
|
||||||
);
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
|
||||||
self.remove_log
|
|
||||||
.rewind(index)
|
|
||||||
.map_err(|e| format!("Could not truncate remove log: {}", e))?;
|
|
||||||
|
|
||||||
let shift = self.pruned_nodes.get_shift(position).unwrap_or(0);
|
|
||||||
let record_len = 32 + T::sum_len();
|
|
||||||
let file_pos = (position - shift) * (record_len as u64);
|
|
||||||
self.hashsum_file.rewind(file_pos);
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Remove HashSums by insertion position
|
|
||||||
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> {
|
|
||||||
self.remove_log.append(positions, index).map_err(|e| {
|
|
||||||
format!("Could not write to log storage, disk full? {:?}", e)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T> PMMRBackend<T>
|
|
||||||
where
|
|
||||||
T: Summable + Clone,
|
|
||||||
{
|
|
||||||
/// Instantiates a new PMMR backend that will use the provided directly to
|
|
||||||
/// store its files.
|
|
||||||
pub fn new(data_dir: String) -> io::Result<PMMRBackend<T>> {
|
|
||||||
let hs_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE))?;
|
|
||||||
let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE))?;
|
|
||||||
let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?;
|
|
||||||
|
|
||||||
Ok(PMMRBackend {
|
|
||||||
data_dir: data_dir,
|
|
||||||
hashsum_file: hs_file,
|
|
||||||
remove_log: rm_log,
|
|
||||||
pruned_nodes: pmmr::PruneList {
|
|
||||||
pruned_nodes: prune_list,
|
|
||||||
},
|
|
||||||
phantom: PhantomData,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Total size of the PMMR stored by this backend. Only produces the fully
|
|
||||||
/// sync'd size.
|
|
||||||
pub fn unpruned_size(&self) -> io::Result<u64> {
|
|
||||||
let total_shift = self.pruned_nodes.get_shift(::std::u64::MAX).unwrap();
|
|
||||||
let record_len = 32 + T::sum_len() as u64;
|
|
||||||
let sz = self.hashsum_file.size()?;
|
|
||||||
Ok(sz / record_len + total_shift)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Syncs all files to disk. A call to sync is required to ensure all the
|
|
||||||
/// data has been successfully written to disk.
|
|
||||||
pub fn sync(&mut self) -> io::Result<()> {
|
|
||||||
if let Err(e) = self.hashsum_file.flush() {
|
|
||||||
return Err(io::Error::new(
|
|
||||||
io::ErrorKind::Interrupted,
|
|
||||||
format!("Could not write to log storage, disk full? {:?}", e),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
self.remove_log.flush()?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Discard the current, non synced state of the backend.
|
|
||||||
pub fn discard(&mut self) {
|
|
||||||
self.hashsum_file.discard();
|
|
||||||
self.remove_log.discard();
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Checks the length of the remove log to see if it should get compacted.
|
|
||||||
/// If so, the remove log is flushed into the pruned list, which itself gets
|
|
||||||
/// saved, and the main hashsum data file is rewritten, cutting the removed
|
|
||||||
/// data.
|
|
||||||
///
|
|
||||||
/// If a max_len strictly greater than 0 is provided, the value will be used
|
|
||||||
/// to decide whether the remove log has reached its maximum length,
|
|
||||||
/// otherwise the RM_LOG_MAX_NODES default value is used.
|
|
||||||
///
|
|
||||||
/// TODO whatever is calling this should also clean up the commit to
|
|
||||||
/// position index in db
|
|
||||||
pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> {
|
|
||||||
if !(max_len > 0 && self.remove_log.len() > max_len
|
|
||||||
|| max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES)
|
|
||||||
{
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
// 0. validate none of the nodes in the rm log are in the prune list (to
|
|
||||||
// avoid accidental double compaction)
|
|
||||||
for pos in &self.remove_log.removed[..] {
|
|
||||||
if let None = self.pruned_nodes.pruned_pos(pos.0) {
|
|
||||||
// TODO we likely can recover from this by directly jumping to 3
|
|
||||||
error!(
|
|
||||||
LOGGER,
|
|
||||||
"The remove log contains nodes that are already in the pruned \
|
|
||||||
list, a previous compaction likely failed."
|
|
||||||
);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// 1. save hashsum file to a compact copy, skipping data that's in the
|
|
||||||
// remove list
|
|
||||||
let tmp_prune_file = format!("{}/{}.prune", self.data_dir, PMMR_DATA_FILE);
|
|
||||||
let record_len = (32 + T::sum_len()) as u64;
|
|
||||||
let to_rm = self.remove_log
|
|
||||||
.removed
|
|
||||||
.iter()
|
|
||||||
.map(|&(pos, _)| {
|
|
||||||
let shift = self.pruned_nodes.get_shift(pos);
|
|
||||||
(pos - 1 - shift.unwrap()) * record_len
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
self.hashsum_file
|
|
||||||
.save_prune(tmp_prune_file.clone(), to_rm, record_len)?;
|
|
||||||
|
|
||||||
// 2. update the prune list and save it in place
|
|
||||||
for &(rm_pos, _) in &self.remove_log.removed[..] {
|
|
||||||
self.pruned_nodes.add(rm_pos);
|
|
||||||
}
|
|
||||||
write_vec(
|
|
||||||
format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE),
|
|
||||||
&self.pruned_nodes.pruned_nodes,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 3. move the compact copy to the hashsum file and re-open it
|
|
||||||
fs::rename(
|
|
||||||
tmp_prune_file.clone(),
|
|
||||||
format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
|
|
||||||
)?;
|
|
||||||
self.hashsum_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?;
|
|
||||||
|
|
||||||
// 4. truncate the rm log
|
|
||||||
self.remove_log.rewind(0)?;
|
|
||||||
self.remove_log.flush()?;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read an ordered vector of scalars from a file.
|
|
||||||
fn read_ordered_vec<T>(path: String, elmt_len: usize) -> io::Result<Vec<T>>
|
|
||||||
where
|
where
|
||||||
T: ser::Readable + cmp::Ord,
|
T: ser::Readable + cmp::Ord,
|
||||||
{
|
{
|
||||||
|
@ -557,7 +366,8 @@ where
|
||||||
Ok(ovec)
|
Ok(ovec)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn write_vec<T>(path: String, v: &Vec<T>) -> io::Result<()>
|
/// Writes an ordered vector to a file
|
||||||
|
pub fn write_vec<T>(path: String, v: &Vec<T>) -> io::Result<()>
|
||||||
where
|
where
|
||||||
T: ser::Writeable,
|
T: ser::Writeable,
|
||||||
{
|
{
|
|
@ -20,13 +20,13 @@ extern crate time;
|
||||||
use std::fs;
|
use std::fs;
|
||||||
|
|
||||||
use core::ser::*;
|
use core::ser::*;
|
||||||
use core::core::pmmr::{Backend, HashSum, Summable, PMMR};
|
use core::core::pmmr::{PMMR, Backend};
|
||||||
use core::core::hash::Hashed;
|
use core::core::hash::{Hash, Hashed};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn sumtree_append() {
|
fn pmmr_append() {
|
||||||
let (data_dir, elems) = setup("append");
|
let (data_dir, elems) = setup("append");
|
||||||
let mut backend = store::sumtree::PMMRBackend::new(data_dir.to_string()).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
|
|
||||||
// adding first set of 4 elements and sync
|
// adding first set of 4 elements and sync
|
||||||
let mut mmr_size = load(0, &elems[0..4], &mut backend);
|
let mut mmr_size = load(0, &elems[0..4], &mut backend);
|
||||||
|
@ -37,31 +37,22 @@ fn sumtree_append() {
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
// check the resulting backend store and the computation of the root
|
// check the resulting backend store and the computation of the root
|
||||||
let hash = Hashed::hash(&elems[0].clone());
|
let node_hash = elems[0].hash();
|
||||||
let sum = elems[0].sum();
|
|
||||||
let node_hash = (1 as u64, &sum, hash).hash();
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
backend.get(1),
|
backend.get(1, false).expect("").0,
|
||||||
Some(HashSum {
|
node_hash
|
||||||
hash: node_hash,
|
|
||||||
sum: sum,
|
|
||||||
})
|
|
||||||
);
|
);
|
||||||
|
|
||||||
let sum2 = HashSum::from_summable(1, &elems[0])
|
let sum2 = elems[0].hash() + elems[1].hash();
|
||||||
+ HashSum::from_summable(2, &elems[1]);
|
|
||||||
let sum4 = sum2
|
let sum4 = sum2
|
||||||
+ (HashSum::from_summable(4, &elems[2])
|
+ (elems[2].hash() + elems[3].hash());
|
||||||
+ HashSum::from_summable(5, &elems[3]));
|
|
||||||
let sum8 = sum4
|
let sum8 = sum4
|
||||||
+ ((HashSum::from_summable(8, &elems[4])
|
+ ((elems[4].hash() + elems[5].hash())
|
||||||
+ HashSum::from_summable(9, &elems[5]))
|
+ (elems[6].hash() + elems[7].hash()));
|
||||||
+ (HashSum::from_summable(11, &elems[6])
|
let sum9 = sum8 + elems[8].hash();
|
||||||
+ HashSum::from_summable(12, &elems[7])));
|
|
||||||
let sum9 = sum8 + HashSum::from_summable(16, &elems[8]);
|
|
||||||
|
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
assert_eq!(pmmr.root(), sum9);
|
assert_eq!(pmmr.root(), sum9);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -69,75 +60,81 @@ fn sumtree_append() {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn sumtree_prune_compact() {
|
fn pmmr_prune_compact() {
|
||||||
let (data_dir, elems) = setup("prune_compact");
|
let (data_dir, elems) = setup("prune_compact");
|
||||||
|
|
||||||
// setup the mmr store with all elements
|
// setup the mmr store with all elements
|
||||||
let mut backend = store::sumtree::PMMRBackend::new(data_dir.to_string()).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
let mmr_size = load(0, &elems[..], &mut backend);
|
let mmr_size = load(0, &elems[..], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
// save the root
|
// save the root
|
||||||
let root: HashSum<TestElem>;
|
let root: Hash;
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
root = pmmr.root();
|
root = pmmr.root();
|
||||||
}
|
}
|
||||||
|
|
||||||
// pruning some choice nodes
|
// pruning some choice nodes
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
pmmr.prune(1, 1).unwrap();
|
pmmr.prune(1, 1).unwrap();
|
||||||
pmmr.prune(4, 1).unwrap();
|
pmmr.prune(4, 1).unwrap();
|
||||||
pmmr.prune(5, 1).unwrap();
|
pmmr.prune(5, 1).unwrap();
|
||||||
}
|
}
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
// check the root
|
// check the root and stored data
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
assert_eq!(root, pmmr.root());
|
assert_eq!(root, pmmr.root());
|
||||||
|
// check we can still retrieve same element from leaf index 2
|
||||||
|
assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 2]));
|
||||||
}
|
}
|
||||||
|
|
||||||
// compact
|
// compact
|
||||||
backend.check_compact(2).unwrap();
|
backend.check_compact(2).unwrap();
|
||||||
|
|
||||||
// recheck the root
|
// recheck the root and stored data
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
assert_eq!(root, pmmr.root());
|
assert_eq!(root, pmmr.root());
|
||||||
|
assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 2]));
|
||||||
|
assert_eq!(pmmr.get(11, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 7]));
|
||||||
}
|
}
|
||||||
|
|
||||||
teardown(data_dir);
|
teardown(data_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn sumtree_reload() {
|
fn pmmr_reload() {
|
||||||
let (data_dir, elems) = setup("reload");
|
let (data_dir, elems) = setup("reload");
|
||||||
|
|
||||||
// set everything up with a first backend
|
// set everything up with a first backend
|
||||||
let mmr_size: u64;
|
let mmr_size: u64;
|
||||||
let root: HashSum<TestElem>;
|
let root: Hash;
|
||||||
{
|
{
|
||||||
let mut backend = store::sumtree::PMMRBackend::new(data_dir.to_string()).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
mmr_size = load(0, &elems[..], &mut backend);
|
mmr_size = load(0, &elems[..], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
// save the root and prune some nodes so we have prune data
|
// save the root and prune some nodes so we have prune data
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
|
pmmr.dump(false);
|
||||||
root = pmmr.root();
|
root = pmmr.root();
|
||||||
pmmr.prune(1, 1).unwrap();
|
pmmr.prune(1, 1).unwrap();
|
||||||
pmmr.prune(4, 1).unwrap();
|
pmmr.prune(4, 1).unwrap();
|
||||||
}
|
}
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
|
||||||
backend.check_compact(1).unwrap();
|
backend.check_compact(1).unwrap();
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
||||||
|
|
||||||
// prune some more to get rm log data
|
// prune some more to get rm log data
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
pmmr.prune(5, 1).unwrap();
|
pmmr.prune(5, 1).unwrap();
|
||||||
}
|
}
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
|
@ -146,37 +143,38 @@ fn sumtree_reload() {
|
||||||
|
|
||||||
// create a new backend and check everything is kosher
|
// create a new backend and check everything is kosher
|
||||||
{
|
{
|
||||||
let mut backend = store::sumtree::PMMRBackend::new(data_dir.to_string()).unwrap();
|
let mut backend:store::pmmr::PMMRBackend<TestElem> =
|
||||||
|
store::pmmr::PMMRBackend::new(data_dir.to_string()).unwrap();
|
||||||
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
assert_eq!(root, pmmr.root());
|
assert_eq!(root, pmmr.root());
|
||||||
}
|
}
|
||||||
assert_eq!(backend.get(5), None);
|
assert_eq!(backend.get(5, false), None);
|
||||||
}
|
}
|
||||||
|
|
||||||
teardown(data_dir);
|
teardown(data_dir);
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn sumtree_rewind() {
|
fn pmmr_rewind() {
|
||||||
let (data_dir, elems) = setup("rewind");
|
let (data_dir, elems) = setup("rewind");
|
||||||
let mut backend = store::sumtree::PMMRBackend::new(data_dir.clone()).unwrap();
|
let mut backend = store::pmmr::PMMRBackend::new(data_dir.clone()).unwrap();
|
||||||
|
|
||||||
// adding elements and keeping the corresponding root
|
// adding elements and keeping the corresponding root
|
||||||
let mut mmr_size = load(0, &elems[0..4], &mut backend);
|
let mut mmr_size = load(0, &elems[0..4], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
let root1: HashSum<TestElem>;
|
let root1: Hash;
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
root1 = pmmr.root();
|
root1 = pmmr.root();
|
||||||
}
|
}
|
||||||
|
|
||||||
mmr_size = load(mmr_size, &elems[4..6], &mut backend);
|
mmr_size = load(mmr_size, &elems[4..6], &mut backend);
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
let root2: HashSum<TestElem>;
|
let root2: Hash;
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, mmr_size);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
root2 = pmmr.root();
|
root2 = pmmr.root();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -185,7 +183,7 @@ fn sumtree_rewind() {
|
||||||
|
|
||||||
// prune and compact the 2 first elements to spice things up
|
// prune and compact the 2 first elements to spice things up
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
pmmr.prune(1, 1).unwrap();
|
pmmr.prune(1, 1).unwrap();
|
||||||
pmmr.prune(2, 1).unwrap();
|
pmmr.prune(2, 1).unwrap();
|
||||||
}
|
}
|
||||||
|
@ -194,24 +192,24 @@ fn sumtree_rewind() {
|
||||||
|
|
||||||
// rewind and check the roots still match
|
// rewind and check the roots still match
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||||
pmmr.rewind(9, 3).unwrap();
|
pmmr.rewind(9, 3).unwrap();
|
||||||
assert_eq!(pmmr.root(), root2);
|
assert_eq!(pmmr.root(), root2);
|
||||||
}
|
}
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, 10);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
|
||||||
assert_eq!(pmmr.root(), root2);
|
assert_eq!(pmmr.root(), root2);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut pmmr = PMMR::at(&mut backend, 10);
|
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
|
||||||
pmmr.rewind(5, 3).unwrap();
|
pmmr.rewind(5, 3).unwrap();
|
||||||
assert_eq!(pmmr.root(), root1);
|
assert_eq!(pmmr.root(), root1);
|
||||||
}
|
}
|
||||||
backend.sync().unwrap();
|
backend.sync().unwrap();
|
||||||
{
|
{
|
||||||
let pmmr = PMMR::at(&mut backend, 7);
|
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 7);
|
||||||
assert_eq!(pmmr.root(), root1);
|
assert_eq!(pmmr.root(), root1);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -242,7 +240,7 @@ fn teardown(data_dir: String) {
|
||||||
fs::remove_dir_all(data_dir).unwrap();
|
fs::remove_dir_all(data_dir).unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 {
|
fn load(pos: u64, elems: &[TestElem], backend: &mut store::pmmr::PMMRBackend<TestElem>) -> u64 {
|
||||||
let mut pmmr = PMMR::at(backend, pos);
|
let mut pmmr = PMMR::at(backend, pos);
|
||||||
for elem in elems {
|
for elem in elems {
|
||||||
pmmr.push(elem.clone()).unwrap();
|
pmmr.push(elem.clone()).unwrap();
|
||||||
|
@ -252,16 +250,10 @@ fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<
|
||||||
|
|
||||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||||
struct TestElem([u32; 4]);
|
struct TestElem([u32; 4]);
|
||||||
impl Summable for TestElem {
|
|
||||||
type Sum = u64;
|
impl PMMRable for TestElem {
|
||||||
fn sum(&self) -> u64 {
|
fn len() -> usize {
|
||||||
// sums are not allowed to overflow, so we use this simple
|
16
|
||||||
// non-injective "sum" function that will still be homomorphic
|
|
||||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
|
|
||||||
+ self.0[3] as u64
|
|
||||||
}
|
|
||||||
fn sum_len() -> usize {
|
|
||||||
8
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -273,3 +265,15 @@ impl Writeable for TestElem {
|
||||||
writer.write_u32(self.0[3])
|
writer.write_u32(self.0[3])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl Readable for TestElem {
|
||||||
|
fn read(reader: &mut Reader) -> Result<TestElem, Error> {
|
||||||
|
Ok(TestElem (
|
||||||
|
[
|
||||||
|
reader.read_u32()?,
|
||||||
|
reader.read_u32()?,
|
||||||
|
reader.read_u32()?,
|
||||||
|
reader.read_u32()?,
|
||||||
|
]
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in a new issue