Header MMR Refactor (#2028)

* refactor header MMR to use regular backend
this lets us store header hashes in the mmr data file
the hashes in the hash file are "hash with index"

* rustfmt

* get_header_hash(pos) on header extension

* rustfmt

* cleanup

* cleanup

* comments

* cleanup

* fixup testelem so tests work
This commit is contained in:
Antioch Peverell 2018-11-27 12:32:39 +00:00 committed by GitHub
parent ae49f30b2f
commit 79d540cf86
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 126 additions and 330 deletions

View file

@ -27,6 +27,7 @@ use lru_cache::LruCache;
use core::core::hash::{Hash, Hashed, ZERO_HASH};
use core::core::merkle_proof::MerkleProof;
use core::core::pmmr;
use core::core::verifier_cache::VerifierCache;
use core::core::{
Block, BlockHeader, BlockSums, Output, OutputIdentifier, Transaction, TxKernelEntry,
@ -1196,9 +1197,16 @@ fn setup_head(
// If we have no header MMR then rebuild as necessary.
// Supports old nodes with no header MMR.
txhashset::header_extending(txhashset, &mut batch, |extension| {
if extension.size() == 0 {
let pos = pmmr::insertion_to_pmmr_index(head.height + 1);
let needs_rebuild = match extension.get_header_hash(pos) {
None => true,
Some(hash) => hash != head.last_block_h,
};
if needs_rebuild {
extension.rebuild(&head, &genesis.header)?;
}
Ok(())
})?;

View file

@ -28,7 +28,7 @@ use util::secp::pedersen::{Commitment, RangeProof};
use core::core::committed::Committed;
use core::core::hash::{Hash, Hashed};
use core::core::merkle_proof::MerkleProof;
use core::core::pmmr::{self, ReadonlyPMMR, RewindablePMMR, DBPMMR, PMMR};
use core::core::pmmr::{self, ReadonlyPMMR, RewindablePMMR, PMMR};
use core::core::{
Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier, TxKernel, TxKernelEntry,
};
@ -37,7 +37,7 @@ use core::ser::{PMMRIndexHashable, PMMRable};
use error::{Error, ErrorKind};
use grin_store;
use grin_store::pmmr::{HashOnlyMMRBackend, PMMRBackend, PMMR_FILES};
use grin_store::pmmr::{PMMRBackend, PMMR_FILES};
use grin_store::types::prune_noop;
use store::{Batch, ChainStore};
use txhashset::{RewindableKernelView, UTXOView};
@ -56,21 +56,6 @@ const KERNEL_SUBDIR: &'static str = "kernel";
const TXHASHSET_ZIP: &'static str = "txhashset_snapshot";
struct HashOnlyMMRHandle {
backend: HashOnlyMMRBackend,
last_pos: u64,
}
impl HashOnlyMMRHandle {
fn new(root_dir: &str, sub_dir: &str, file_name: &str) -> Result<HashOnlyMMRHandle, Error> {
let path = Path::new(root_dir).join(sub_dir).join(file_name);
fs::create_dir_all(path.clone())?;
let backend = HashOnlyMMRBackend::new(path.to_str().unwrap())?;
let last_pos = backend.unpruned_size();
Ok(HashOnlyMMRHandle { backend, last_pos })
}
}
struct PMMRHandle<T: PMMRable> {
backend: PMMRBackend<T>,
last_pos: u64,
@ -107,8 +92,7 @@ pub struct TxHashSet {
/// output, rangeproof and kernel MMRs during an extension or a
/// readonly_extension.
/// It can also be rewound and applied separately via a header_extension.
/// Note: the header MMR is backed by the database maintains just the hash file.
header_pmmr_h: HashOnlyMMRHandle,
header_pmmr_h: PMMRHandle<BlockHeader>,
/// Header MMR to support exploratory sync_head.
/// The header_head and sync_head chains can diverge so we need to maintain
@ -116,8 +100,7 @@ pub struct TxHashSet {
///
/// Note: this is rewound and applied separately to the other MMRs
/// via a "sync_extension".
/// Note: the sync MMR is backed by the database and maintains just the hash file.
sync_pmmr_h: HashOnlyMMRHandle,
sync_pmmr_h: PMMRHandle<BlockHeader>,
output_pmmr_h: PMMRHandle<OutputIdentifier>,
rproof_pmmr_h: PMMRHandle<RangeProof>,
@ -135,12 +118,20 @@ impl TxHashSet {
header: Option<&BlockHeader>,
) -> Result<TxHashSet, Error> {
Ok(TxHashSet {
header_pmmr_h: HashOnlyMMRHandle::new(
header_pmmr_h: PMMRHandle::new(
&root_dir,
HEADERHASHSET_SUBDIR,
HEADER_HEAD_SUBDIR,
false,
None,
)?,
sync_pmmr_h: PMMRHandle::new(
&root_dir,
HEADERHASHSET_SUBDIR,
SYNC_HEAD_SUBDIR,
false,
None,
)?,
sync_pmmr_h: HashOnlyMMRHandle::new(&root_dir, HEADERHASHSET_SUBDIR, SYNC_HEAD_SUBDIR)?,
output_pmmr_h: PMMRHandle::new(
&root_dir,
TXHASHSET_SUBDIR,
@ -243,8 +234,8 @@ impl TxHashSet {
/// Get MMR roots.
pub fn roots(&mut self) -> TxHashSetRoots {
let header_pmmr: DBPMMR<BlockHeader, _> =
DBPMMR::at(&mut self.header_pmmr_h.backend, self.header_pmmr_h.last_pos);
let header_pmmr: PMMR<BlockHeader, _> =
PMMR::at(&mut self.header_pmmr_h.backend, self.header_pmmr_h.last_pos);
let output_pmmr: PMMR<OutputIdentifier, _> =
PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
let rproof_pmmr: PMMR<RangeProof, _> =
@ -491,7 +482,7 @@ where
let child_batch = batch.child()?;
{
trace!("Starting new txhashset sync_head extension.");
let pmmr = DBPMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
let pmmr = PMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
res = inner(&mut extension);
@ -550,7 +541,7 @@ where
let child_batch = batch.child()?;
{
trace!("Starting new txhashset header extension.");
let pmmr = DBPMMR::at(
let pmmr = PMMR::at(
&mut trees.header_pmmr_h.backend,
trees.header_pmmr_h.last_pos,
);
@ -591,7 +582,7 @@ where
pub struct HeaderExtension<'a> {
header: BlockHeader,
pmmr: DBPMMR<'a, BlockHeader, HashOnlyMMRBackend>,
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
/// Rollback flag.
rollback: bool,
@ -604,7 +595,7 @@ pub struct HeaderExtension<'a> {
impl<'a> HeaderExtension<'a> {
fn new(
pmmr: DBPMMR<'a, BlockHeader, HashOnlyMMRBackend>,
pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
batch: &'a Batch,
header: BlockHeader,
) -> HeaderExtension<'a> {
@ -616,6 +607,11 @@ impl<'a> HeaderExtension<'a> {
}
}
/// Get the header hash for the specified pos from the underlying MMR backend.
pub fn get_header_hash(&self, pos: u64) -> Option<Hash> {
self.pmmr.get_data(pos)
}
/// Force the rollback of this extension, no matter the result.
pub fn force_rollback(&mut self) {
self.rollback = true;
@ -625,7 +621,9 @@ impl<'a> HeaderExtension<'a> {
/// This may be either the header MMR or the sync MMR depending on the
/// extension.
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<Hash, Error> {
self.pmmr.push(header).map_err(&ErrorKind::TxHashSetErr)?;
self.pmmr
.push(header.clone())
.map_err(&ErrorKind::TxHashSetErr)?;
self.header = header.clone();
Ok(self.root())
}
@ -641,7 +639,7 @@ impl<'a> HeaderExtension<'a> {
let header_pos = pmmr::insertion_to_pmmr_index(header.height + 1);
self.pmmr
.rewind(header_pos)
.rewind(header_pos, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?;
// Update our header to reflect the one we rewound to.
@ -655,7 +653,9 @@ impl<'a> HeaderExtension<'a> {
/// including the genesis block header.
pub fn truncate(&mut self) -> Result<(), Error> {
debug!("Truncating header extension.");
self.pmmr.rewind(0).map_err(&ErrorKind::TxHashSetErr)?;
self.pmmr
.rewind(0, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
@ -732,7 +732,7 @@ impl<'a> HeaderExtension<'a> {
pub struct Extension<'a> {
header: BlockHeader,
header_pmmr: DBPMMR<'a, BlockHeader, HashOnlyMMRBackend>,
header_pmmr: PMMR<'a, BlockHeader, PMMRBackend<BlockHeader>>,
output_pmmr: PMMR<'a, OutputIdentifier, PMMRBackend<OutputIdentifier>>,
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
kernel_pmmr: PMMR<'a, TxKernelEntry, PMMRBackend<TxKernelEntry>>,
@ -780,7 +780,7 @@ impl<'a> Extension<'a> {
fn new(trees: &'a mut TxHashSet, batch: &'a Batch, header: BlockHeader) -> Extension<'a> {
Extension {
header,
header_pmmr: DBPMMR::at(
header_pmmr: PMMR::at(
&mut trees.header_pmmr_h.backend,
trees.header_pmmr_h.last_pos,
),
@ -960,7 +960,7 @@ impl<'a> Extension<'a> {
fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
self.header_pmmr
.push(header)
.push(header.clone())
.map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
@ -1041,7 +1041,7 @@ impl<'a> Extension<'a> {
);
self.header_pmmr
.rewind(header_pos)
.rewind(header_pos, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?;
self.output_pmmr
.rewind(output_pos, rewind_rm_pos)

View file

@ -34,7 +34,7 @@ use core::{
use global;
use keychain::{self, BlindingFactor};
use pow::{Difficulty, Proof, ProofOfWork};
use ser::{self, HashOnlyPMMRable, Readable, Reader, Writeable, Writer};
use ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
use util::{secp, static_secp_instance};
/// Errors thrown by Block validation
@ -159,7 +159,13 @@ impl Default for BlockHeader {
}
}
impl HashOnlyPMMRable for BlockHeader {}
impl PMMRable for BlockHeader {
type E = Hash;
fn as_elmt(self) -> Self::E {
self.hash()
}
}
/// Serialization of a block header
impl Writeable for BlockHeader {

View file

@ -18,18 +18,6 @@ use core::hash::Hash;
use core::BlockHeader;
use ser::PMMRable;
/// Simple "hash only" backend (used for header MMR, headers stored in the db).
pub trait HashOnlyBackend {
/// Append a vec of hashes to the backend.
fn append(&mut self, data: Vec<Hash>) -> Result<(), String>;
/// Rewind the backend to the specified position.
fn rewind(&mut self, position: u64) -> Result<(), String>;
/// Get the hash at the specified position.
fn get_hash(&self, position: u64) -> Option<Hash>;
}
/// Storage backend for the MMR, just needs to be indexed by order of insertion.
/// The PMMR itself does not need the Backend to be accurate on the existence
/// of an element (i.e. remove could be a no-op) but layers above can
@ -52,7 +40,7 @@ pub trait Backend<T: PMMRable> {
fn get_hash(&self, position: u64) -> Option<Hash>;
/// Get underlying data by insertion position.
fn get_data(&self, position: u64) -> Option<T>;
fn get_data(&self, position: u64) -> Option<T::E>;
/// Get a Hash by original insertion position
/// (ignoring the remove log).
@ -60,7 +48,7 @@ pub trait Backend<T: PMMRable> {
/// Get a Data Element by original insertion position
/// (ignoring the remove log).
fn get_data_from_file(&self, position: u64) -> Option<T>;
fn get_data_from_file(&self, position: u64) -> Option<T::E>;
/// Remove Hash by insertion position. An index is also provided so the
/// underlying backend can implement some rollback of positions up to a

View file

@ -1,185 +0,0 @@
// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Database backed MMR.
use std::marker;
use core::hash::{Hash, ZERO_HASH};
use core::pmmr::{bintree_postorder_height, is_leaf, peak_map_height, peaks, HashOnlyBackend};
use ser::{HashOnlyPMMRable, PMMRIndexHashable};
/// Database backed MMR.
pub struct DBPMMR<'a, T, B>
where
T: HashOnlyPMMRable,
B: 'a + HashOnlyBackend,
{
/// The last position in the PMMR
last_pos: u64,
/// The backend for this readonly PMMR
backend: &'a mut B,
// only needed to parameterise Backend
_marker: marker::PhantomData<T>,
}
impl<'a, T, B> DBPMMR<'a, T, B>
where
T: HashOnlyPMMRable,
B: 'a + HashOnlyBackend,
{
/// Build a new db backed MMR.
pub fn new(backend: &'a mut B) -> DBPMMR<T, B> {
DBPMMR {
backend,
last_pos: 0,
_marker: marker::PhantomData,
}
}
/// Build a new db backed MMR initialized to
/// last_pos with the provided db backend.
pub fn at(backend: &'a mut B, last_pos: u64) -> DBPMMR<T, B> {
DBPMMR {
backend,
last_pos,
_marker: marker::PhantomData,
}
}
/// Is the MMR empty?
pub fn is_empty(&self) -> bool {
self.last_pos == 0
}
/// Total size of the tree, including intermediary nodes and ignoring any
/// pruning.
pub fn unpruned_size(&self) -> u64 {
self.last_pos
}
/// Rewind the MMR to the specified position.
pub fn rewind(&mut self, position: u64) -> Result<(), String> {
// Identify which actual position we should rewind to as the provided
// position is a leaf. We traverse the MMR to include any parent(s) that
// need to be included for the MMR to be valid.
let mut pos = position;
while bintree_postorder_height(pos + 1) > 0 {
pos += 1;
}
self.backend.rewind(pos)?;
self.last_pos = pos;
Ok(())
}
/// Get the hash element at provided position in the MMR.
pub fn get_hash(&self, pos: u64) -> Option<Hash> {
if pos > self.last_pos {
// If we are beyond the rhs of the MMR return None.
None
} else if is_leaf(pos) {
// If we are a leaf then get data from the backend.
self.backend.get_hash(pos)
} else {
// If we are not a leaf then return None as only leaves have data.
None
}
}
/// Push a new element into the MMR. Computes new related peaks at
/// the same time if applicable.
pub fn push(&mut self, elmt: &T) -> Result<u64, String> {
let elmt_pos = self.last_pos + 1;
let mut current_hash = elmt.hash_with_index(elmt_pos - 1);
let mut to_append = vec![current_hash];
let mut pos = elmt_pos;
let (peak_map, height) = peak_map_height(pos - 1);
if height != 0 {
return Err(format!("bad mmr size {}", pos - 1));
}
// hash with all immediately preceding peaks, as indicated by peak map
let mut peak = 1;
while (peak_map & peak) != 0 {
let left_sibling = pos + 1 - 2 * peak;
let left_hash = self
.backend
.get_hash(left_sibling)
.ok_or("missing left sibling in tree, should not have been pruned")?;
peak *= 2;
pos += 1;
current_hash = (left_hash, current_hash).hash_with_index(pos - 1);
to_append.push(current_hash);
}
// append all the new nodes and update the MMR index
self.backend.append(to_append)?;
self.last_pos = pos;
Ok(elmt_pos)
}
/// Return the vec of peak hashes for this MMR.
pub fn peaks(&self) -> Vec<Hash> {
let peaks_pos = peaks(self.last_pos);
peaks_pos
.into_iter()
.filter_map(|pi| self.backend.get_hash(pi))
.collect()
}
/// Return the overall root hash for this MMR.
pub fn root(&self) -> Hash {
if self.is_empty() {
return ZERO_HASH;
}
let mut res = None;
for peak in self.peaks().iter().rev() {
res = match res {
None => Some(*peak),
Some(rhash) => Some((*peak, rhash).hash_with_index(self.unpruned_size())),
}
}
res.expect("no root, invalid tree")
}
/// Validate all the hashes in the MMR.
/// For every parent node we check hashes of the children produce the parent hash
/// by hashing them together.
pub fn validate(&self) -> Result<(), String> {
// iterate on all parent nodes
for n in 1..(self.last_pos + 1) {
let height = bintree_postorder_height(n);
if height > 0 {
if let Some(hash) = self.get_hash(n) {
let left_pos = n - (1 << height);
let right_pos = n - 1;
if let Some(left_child_hs) = self.get_hash(left_pos) {
if let Some(right_child_hs) = self.get_hash(right_pos) {
// hash the two child nodes together with parent_pos and compare
if (left_child_hs, right_child_hs).hash_with_index(n - 1) != hash {
return Err(format!(
"Invalid MMR, hash of parent at {} does \
not match children.",
n
));
}
}
}
}
}
}
Ok(())
}
}

View file

@ -37,13 +37,11 @@
//! either be a simple Vec or a database.
mod backend;
mod db_pmmr;
mod pmmr;
mod readonly_pmmr;
mod rewindable_pmmr;
pub use self::backend::*;
pub use self::db_pmmr::*;
pub use self::pmmr::*;
pub use self::readonly_pmmr::*;
pub use self::rewindable_pmmr::*;

View file

@ -175,7 +175,7 @@ where
let elmt_pos = self.last_pos + 1;
let mut current_hash = elmt.hash_with_index(elmt_pos - 1);
let mut to_append = vec![current_hash];
let mut hashes = vec![current_hash];
let mut pos = elmt_pos;
let (peak_map, height) = peak_map_height(pos - 1);
@ -193,11 +193,11 @@ where
peak *= 2;
pos += 1;
current_hash = (left_hash, current_hash).hash_with_index(pos - 1);
to_append.push(current_hash);
hashes.push(current_hash);
}
// append all the new nodes and update the MMR index
self.backend.append(elmt, to_append)?;
self.backend.append(elmt, hashes)?;
self.last_pos = pos;
Ok(elmt_pos)
}
@ -259,7 +259,7 @@ where
}
/// Get the data element at provided position in the MMR.
pub fn get_data(&self, pos: u64) -> Option<T> {
pub fn get_data(&self, pos: u64) -> Option<T::E> {
if pos > self.last_pos {
// If we are beyond the rhs of the MMR return None.
None
@ -285,7 +285,7 @@ where
/// Helper function to get the last N nodes inserted, i.e. the last
/// n nodes along the bottom of the tree.
/// May return less than n items if the MMR has been pruned/compacted.
pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, T)> {
pub fn get_last_n_insertions(&self, n: u64) -> Vec<(Hash, T::E)> {
let mut return_vec = vec![];
let mut last_leaf = self.last_pos;
for _ in 0..n as u64 {
@ -307,7 +307,11 @@ where
/// Helper function which returns un-pruned nodes from the insertion index
/// forward
/// returns last insertion index returned along with data
pub fn elements_from_insertion_index(&self, mut index: u64, max_count: u64) -> (u64, Vec<T>) {
pub fn elements_from_insertion_index(
&self,
mut index: u64,
max_count: u64,
) -> (u64, Vec<T::E>) {
let mut return_vec = vec![];
if index == 0 {
index = 1;

View file

@ -58,7 +58,7 @@ where
}
/// Get the data element at provided position in the MMR.
pub fn get_data(&self, pos: u64) -> Option<T> {
pub fn get_data(&self, pos: u64) -> Option<T::E> {
if pos > self.last_pos {
// If we are beyond the rhs of the MMR return None.
None

View file

@ -75,7 +75,7 @@ where
}
/// Get the data element at provided position in the MMR.
pub fn get_data(&self, pos: u64) -> Option<T> {
pub fn get_data(&self, pos: u64) -> Option<T::E> {
if pos > self.last_pos {
// If we are beyond the rhs of the MMR return None.
None

View file

@ -310,7 +310,13 @@ impl FixedLength for TxKernelEntry {
+ secp::constants::AGG_SIGNATURE_SIZE;
}
impl PMMRable for TxKernelEntry {}
impl PMMRable for TxKernelEntry {
type E = Self;
fn as_elmt(self) -> Self::E {
self
}
}
/// TransactionBody is a common abstraction for transaction and block
#[derive(Serialize, Deserialize, Debug, Clone)]
@ -1237,7 +1243,13 @@ impl FixedLength for OutputIdentifier {
const LEN: usize = 1 + secp::constants::PEDERSEN_COMMITMENT_SIZE;
}
impl PMMRable for OutputIdentifier {}
impl PMMRable for OutputIdentifier {
type E = Self;
fn as_elmt(self) -> Self::E {
self
}
}
impl Writeable for OutputIdentifier {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {

View file

@ -512,7 +512,13 @@ impl FixedLength for RangeProof {
+ MAX_PROOF_SIZE;
}
impl PMMRable for RangeProof {}
impl PMMRable for RangeProof {
type E = Self;
fn as_elmt(self) -> Self::E {
self
}
}
impl Readable for Signature {
fn read(reader: &mut Reader) -> Result<Signature, Error> {
@ -682,11 +688,15 @@ pub trait FixedLength {
const LEN: usize;
}
/// Trait for types that can be added to a "hash only" PMMR (block headers for example).
pub trait HashOnlyPMMRable: Writeable + Clone + Debug {}
/// Trait for types that can be added to a PMMR.
pub trait PMMRable: FixedLength + Readable + Writeable + Clone + Debug {}
pub trait PMMRable: Writeable + Clone + Debug {
/// The type of element actually stored in the MMR data file.
/// This allows us to store Hash elements in the header MMR for variable size BlockHeaders.
type E: FixedLength + Readable + Writeable;
/// Convert the pmmrable into the element to be stored in the MMR data file.
fn as_elmt(self) -> Self::E;
}
/// Generic trait to ensure PMMR elements can be hashed with an index
pub trait PMMRIndexHashable {

View file

@ -29,7 +29,13 @@ impl FixedLength for TestElem {
const LEN: usize = 16;
}
impl PMMRable for TestElem {}
impl PMMRable for TestElem {
type E = Self;
fn as_elmt(self) -> Self::E {
self
}
}
impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
@ -77,7 +83,7 @@ impl<T: PMMRable> Backend<T> for VecBackend<T> {
}
}
fn get_data(&self, position: u64) -> Option<T> {
fn get_data(&self, position: u64) -> Option<T::E> {
if self.remove_list.contains(&position) {
None
} else {
@ -90,10 +96,10 @@ impl<T: PMMRable> Backend<T> for VecBackend<T> {
Some(hash.clone())
}
fn get_data_from_file(&self, position: u64) -> Option<T> {
fn get_data_from_file(&self, position: u64) -> Option<T::E> {
let idx = pmmr::n_leaves(position);
let data = &self.data[(idx - 1) as usize];
Some(data.clone())
let data = self.data[(idx - 1) as usize].clone();
Some(data.as_elmt())
}
fn remove(&mut self, position: u64) -> Result<(), String> {

View file

@ -18,9 +18,9 @@ use std::{fs, io, marker};
use croaring::Bitmap;
use core::core::hash::{Hash, Hashed};
use core::core::pmmr::{self, family, Backend, HashOnlyBackend};
use core::core::pmmr::{self, family, Backend};
use core::core::BlockHeader;
use core::ser::{self, PMMRable};
use core::ser::{self, FixedLength, PMMRable};
use leaf_set::LeafSet;
use prune_list::PruneList;
use types::{prune_noop, AppendOnlyFile, HashFile};
@ -69,7 +69,8 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
let position = self.hash_file.size_unsync() + shift + 1;
self.leaf_set.add(position);
}
self.data_file.append(&mut ser::ser_vec(&data).unwrap());
self.data_file
.append(&mut ser::ser_vec(&data.as_elmt()).unwrap());
for h in &hashes {
self.hash_file
.append(h)
@ -86,7 +87,7 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
self.hash_file.read(position - shift)
}
fn get_data_from_file(&self, position: u64) -> Option<T> {
fn get_data_from_file(&self, position: u64) -> Option<T::E> {
if self.is_compacted(position) {
return None;
}
@ -94,8 +95,8 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
let pos = pmmr::n_leaves(position) - 1;
// Must be on disk, doing a read at the correct position
let file_offset = ((pos - shift) as usize) * T::LEN;
let data = self.data_file.read(file_offset, T::LEN);
let file_offset = ((pos - shift) as usize) * T::E::LEN;
let data = self.data_file.read(file_offset, T::E::LEN);
match ser::deserialize(&mut &data[..]) {
Ok(h) => Some(h),
Err(e) => {
@ -120,7 +121,7 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
/// Get the data at pos.
/// Return None if it has been removed or if pos is not a leaf node.
fn get_data(&self, pos: u64) -> Option<(T)> {
fn get_data(&self, pos: u64) -> Option<(T::E)> {
if !pmmr::is_leaf(pos) {
return None;
}
@ -146,7 +147,7 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
// Rewind the data file accounting for pruned/compacted pos
let leaf_shift = self.prune_list.get_leaf_shift(position);
let flatfile_pos = pmmr::n_leaves(position);
let file_pos = (flatfile_pos - leaf_shift) * T::LEN as u64;
let file_pos = (flatfile_pos - leaf_shift) * T::E::LEN as u64;
self.data_file.rewind(file_pos);
Ok(())
@ -240,7 +241,7 @@ impl<T: PMMRable> PMMRBackend<T> {
/// Number of elements in the underlying stored data. Extremely dependent on
/// pruning and compaction.
pub fn data_size(&self) -> u64 {
self.data_file.size() / T::LEN as u64
self.data_file.size() / T::E::LEN as u64
}
/// Size of the underlying hashed data. Extremely dependent on pruning
@ -332,13 +333,13 @@ impl<T: PMMRable> PMMRBackend<T> {
let off_to_rm = map_vec!(leaf_pos_to_rm, |&pos| {
let flat_pos = pmmr::n_leaves(pos);
let shift = self.prune_list.get_leaf_shift(pos);
(flat_pos - 1 - shift) * T::LEN as u64
(flat_pos - 1 - shift) * T::E::LEN as u64
});
self.data_file.save_prune(
tmp_prune_file_data.clone(),
&off_to_rm,
T::LEN as u64,
T::E::LEN as u64,
prune_cb,
)?;
}
@ -405,64 +406,6 @@ impl<T: PMMRable> PMMRBackend<T> {
}
}
/// Simple MMR Backend for hashes only (data maintained in the db).
pub struct HashOnlyMMRBackend {
/// The hash file underlying this MMR backend.
hash_file: HashFile,
}
impl HashOnlyBackend for HashOnlyMMRBackend {
fn append(&mut self, hashes: Vec<Hash>) -> Result<(), String> {
for h in &hashes {
self.hash_file
.append(h)
.map_err(|e| format!("Failed to append to backend, {:?}", e))?;
}
Ok(())
}
fn rewind(&mut self, position: u64) -> Result<(), String> {
self.hash_file
.rewind(position)
.map_err(|e| format!("Failed to rewind backend, {:?}", e))?;
Ok(())
}
fn get_hash(&self, position: u64) -> Option<Hash> {
self.hash_file.read(position)
}
}
impl HashOnlyMMRBackend {
/// Instantiates a new PMMR backend.
/// Use the provided dir to store its files.
pub fn new(data_dir: &str) -> io::Result<HashOnlyMMRBackend> {
let hash_file = HashFile::open(&format!("{}/{}", data_dir, PMMR_HASH_FILE))?;
Ok(HashOnlyMMRBackend { hash_file })
}
/// The unpruned size of this MMR backend.
pub fn unpruned_size(&self) -> u64 {
self.hash_file.size()
}
/// Discard any pending changes to this MMR backend.
pub fn discard(&mut self) {
self.hash_file.discard();
}
/// Sync pending changes to the backend file on disk.
pub fn sync(&mut self) -> io::Result<()> {
if let Err(e) = self.hash_file.flush() {
return Err(io::Error::new(
io::ErrorKind::Interrupted,
format!("Could not write to hash storage, disk full? {:?}", e),
));
}
Ok(())
}
}
/// Filter remove list to exclude roots.
/// We want to keep roots around so we have hashes for Merkle proofs.
fn removed_excl_roots(removed: &Bitmap) -> Bitmap {

View file

@ -758,7 +758,13 @@ impl FixedLength for TestElem {
const LEN: usize = 4;
}
impl PMMRable for TestElem {}
impl PMMRable for TestElem {
type E = Self;
fn as_elmt(self) -> Self::E {
self
}
}
impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {