PMMRable cleanup (#1910)

* cleanup pmmrable and len()
introduce FixedLength trait with a const LEN
make Hash impl FixedLength for consistency

* rustfmt

* store tests cleanup

* rustfmt

* whats going on with those comments and rustfmt?
This commit is contained in:
Antioch Peverell 2018-11-01 20:14:46 +00:00 committed by GitHub
parent 9cebbf24b8
commit d23dec73d0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
14 changed files with 81 additions and 112 deletions

View file

@ -69,18 +69,12 @@ impl HashOnlyMMRHandle {
}
}
struct PMMRHandle<T>
where
T: PMMRable,
{
struct PMMRHandle<T: PMMRable> {
backend: PMMRBackend<T>,
last_pos: u64,
}
impl<T> PMMRHandle<T>
where
T: PMMRable + ::std::fmt::Debug,
{
impl<T: PMMRable> PMMRHandle<T> {
fn new(
root_dir: &str,
sub_dir: &str,

View file

@ -35,7 +35,7 @@ use core::{
use global;
use keychain::{self, BlindingFactor};
use pow::{Difficulty, Proof, ProofOfWork};
use ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
use ser::{self, HashOnlyPMMRable, Readable, Reader, Writeable, Writer};
use util::{secp, static_secp_instance};
/// Errors thrown by Block validation
@ -147,11 +147,9 @@ fn fixed_size_of_serialized_header(_version: u16) -> usize {
size += mem::size_of::<u16>(); // version
size += mem::size_of::<u64>(); // height
size += mem::size_of::<i64>(); // timestamp
// prev_hash, prev_root, output_root, range_proof_root, kernel_root
size += 5 * mem::size_of::<Hash>();
size += 5 * mem::size_of::<Hash>(); // prev_hash, prev_root, output_root, range_proof_root, kernel_root
size += mem::size_of::<BlindingFactor>(); // total_kernel_offset
// output_mmr_size, kernel_mmr_size
size += 2 * mem::size_of::<u64>();
size += 2 * mem::size_of::<u64>(); // output_mmr_size, kernel_mmr_size
size += mem::size_of::<Difficulty>(); // total_difficulty
size += mem::size_of::<u32>(); // secondary_scaling
size += mem::size_of::<u64>(); // nonce
@ -190,13 +188,7 @@ impl Default for BlockHeader {
}
}
/// Block header hashes are maintained in the header MMR
/// but we store the data itself in the db.
impl PMMRable for BlockHeader {
fn len() -> usize {
0
}
}
impl HashOnlyPMMRable for BlockHeader {}
/// Serialization of a block header
impl Writeable for BlockHeader {

View file

@ -26,7 +26,7 @@ use std::{fmt, ops};
use blake2::blake2b::Blake2b;
use consensus;
use ser::{self, AsFixedBytes, Error, Readable, Reader, Writeable, Writer};
use ser::{self, AsFixedBytes, Error, FixedLength, Readable, Reader, Writeable, Writer};
use util;
/// A hash consisting of all zeroes, used as a sentinel. No known preimage.
@ -52,15 +52,17 @@ impl fmt::Display for Hash {
}
}
impl Hash {
impl FixedLength for Hash {
/// Size of a hash in bytes.
pub const SIZE: usize = 32;
const LEN: usize = 32;
}
impl Hash {
/// Builds a Hash from a byte vector. If the vector is too short, it will be
/// completed by zeroes. If it's too long, it will be truncated.
pub fn from_vec(v: &[u8]) -> Hash {
let mut h = [0; Hash::SIZE];
let copy_size = min(v.len(), Hash::SIZE);
let mut h = [0; Hash::LEN];
let copy_size = min(v.len(), Hash::LEN);
h[..copy_size].copy_from_slice(&v[..copy_size]);
Hash(h)
}

View file

@ -34,10 +34,7 @@ pub trait HashOnlyBackend {
/// The PMMR itself does not need the Backend to be accurate on the existence
/// of an element (i.e. remove could be a no-op) but layers above can
/// depend on an accurate Backend to check existence.
pub trait Backend<T>
where
T: PMMRable,
{
pub trait Backend<T: PMMRable> {
/// Append the provided Hashes to the backend storage, and optionally an
/// associated data element to flatfile storage (for leaf nodes only). The
/// position of the first element of the Vec in the MMR is provided to

View file

@ -18,12 +18,12 @@ use std::marker;
use core::hash::{Hash, ZERO_HASH};
use core::pmmr::{bintree_postorder_height, is_leaf, peak_map_height, peaks, HashOnlyBackend};
use ser::{PMMRIndexHashable, PMMRable};
use ser::{HashOnlyPMMRable, PMMRIndexHashable};
/// Database backed MMR.
pub struct DBPMMR<'a, T, B>
where
T: PMMRable,
T: HashOnlyPMMRable,
B: 'a + HashOnlyBackend,
{
/// The last position in the PMMR
@ -36,7 +36,7 @@ where
impl<'a, T, B> DBPMMR<'a, T, B>
where
T: PMMRable + ::std::fmt::Debug,
T: HashOnlyPMMRable,
B: 'a + HashOnlyBackend,
{
/// Build a new db backed MMR.

View file

@ -47,7 +47,7 @@ where
impl<'a, T, B> PMMR<'a, T, B>
where
T: PMMRable + ::std::fmt::Debug,
T: PMMRable,
B: 'a + Backend<T>,
{
/// Build a new prunable Merkle Mountain Range using the provided backend.

View file

@ -35,7 +35,7 @@ where
impl<'a, T, B> ReadonlyPMMR<'a, T, B>
where
T: PMMRable + ::std::fmt::Debug,
T: PMMRable,
B: 'a + Backend<T>,
{
/// Build a new readonly PMMR.

View file

@ -37,7 +37,7 @@ where
impl<'a, T, B> RewindablePMMR<'a, T, B>
where
T: PMMRable + ::std::fmt::Debug,
T: PMMRable,
B: 'a + Backend<T>,
{
/// Build a new readonly PMMR.

View file

@ -26,7 +26,7 @@ use core::hash::Hashed;
use core::verifier_cache::VerifierCache;
use core::{committed, Committed};
use keychain::{self, BlindingFactor};
use ser::{self, read_multi, PMMRable, Readable, Reader, Writeable, Writer};
use ser::{self, read_multi, FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use util;
use util::secp::pedersen::{Commitment, RangeProof};
use util::secp::{self, Message, Signature};
@ -240,13 +240,14 @@ impl TxKernel {
}
}
impl PMMRable for TxKernel {
fn len() -> usize {
17 + // features plus fee and lock_height
secp::constants::PEDERSEN_COMMITMENT_SIZE + secp::constants::AGG_SIGNATURE_SIZE
}
impl FixedLength for TxKernel {
const LEN: usize = 17 // features plus fee and lock_height
+ secp::constants::PEDERSEN_COMMITMENT_SIZE
+ secp::constants::AGG_SIGNATURE_SIZE;
}
impl PMMRable for TxKernel {}
/// TransactionBody is a common abstraction for transaction and block
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct TransactionBody {
@ -1172,13 +1173,12 @@ impl OutputIdentifier {
}
}
/// Ensure this is implemented to centralize hashing with indexes
impl PMMRable for OutputIdentifier {
fn len() -> usize {
1 + secp::constants::PEDERSEN_COMMITMENT_SIZE
}
impl FixedLength for OutputIdentifier {
const LEN: usize = 1 + secp::constants::PEDERSEN_COMMITMENT_SIZE;
}
impl PMMRable for OutputIdentifier {}
impl Writeable for OutputIdentifier {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_u8(self.features.bits())?;

View file

@ -23,6 +23,7 @@ use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use consensus;
use core::hash::{Hash, Hashed};
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
use std::fmt::Debug;
use std::io::{self, Read, Write};
use std::{cmp, error, fmt, mem};
use util::secp::constants::{
@ -371,12 +372,12 @@ impl Readable for RangeProof {
}
}
impl PMMRable for RangeProof {
fn len() -> usize {
MAX_PROOF_SIZE + 8
}
impl FixedLength for RangeProof {
const LEN: usize = MAX_PROOF_SIZE + 8;
}
impl PMMRable for RangeProof {}
impl Readable for Signature {
fn read(reader: &mut Reader) -> Result<Signature, Error> {
let a = reader.read_fixed_bytes(AGG_SIGNATURE_SIZE)?;
@ -535,31 +536,30 @@ impl Writeable for [u8; 4] {
}
}
/// Trait for types that can serialize and report their size
pub trait PMMRable: Readable + Writeable + Clone {
/// Length in bytes
fn len() -> usize;
/// Trait for types that serialize to a known fixed length.
pub trait FixedLength {
/// The length in bytes
const LEN: usize;
}
/// Trait for types that can be added to a "hash only" PMMR (block headers for example).
pub trait HashOnlyPMMRable: Writeable + Clone + Debug {}
/// Trait for types that can be added to a PMMR.
pub trait PMMRable: FixedLength + Readable + Writeable + Clone + Debug {}
/// Generic trait to ensure PMMR elements can be hashed with an index
pub trait PMMRIndexHashable {
/// Hash with a given index
fn hash_with_index(&self, index: u64) -> Hash;
}
impl<T: PMMRable> PMMRIndexHashable for T {
impl<T: Writeable> PMMRIndexHashable for T {
fn hash_with_index(&self, index: u64) -> Hash {
(index, self).hash()
}
}
// Convenient way to hash two existing hashes together with an index.
impl PMMRIndexHashable for (Hash, Hash) {
fn hash_with_index(&self, index: u64) -> Hash {
(index, &self.0, &self.1).hash()
}
}
/// Useful marker trait on types that can be sized byte slices
pub trait AsFixedBytes: Sized + AsRef<[u8]> {
/// The length in bytes

View file

@ -20,17 +20,17 @@ use core::core::hash::Hash;
use core::core::pmmr::{self, Backend};
use core::core::BlockHeader;
use core::ser;
use core::ser::{PMMRable, Readable, Reader, Writeable, Writer};
use core::ser::{FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct TestElem(pub [u32; 4]);
impl PMMRable for TestElem {
fn len() -> usize {
16
}
impl FixedLength for TestElem {
const LEN: usize = 16;
}
impl PMMRable for TestElem {}
impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
try!(writer.write_u32(self.0[0]));
@ -54,10 +54,7 @@ impl Readable for TestElem {
/// Simple MMR backend implementation based on a Vector. Pruning does not
/// compact the Vec itself.
#[derive(Clone, Debug)]
pub struct VecBackend<T>
where
T: PMMRable,
{
pub struct VecBackend<T: PMMRable> {
/// Backend elements
pub data: Vec<T>,
pub hashes: Vec<Hash>,
@ -65,10 +62,7 @@ where
pub remove_list: Vec<u64>,
}
impl<T> Backend<T> for VecBackend<T>
where
T: PMMRable,
{
impl<T: PMMRable> Backend<T> for VecBackend<T> {
fn append(&mut self, data: T, hashes: Vec<Hash>) -> Result<(), String> {
self.data.push(data);
self.hashes.append(&mut hashes.clone());
@ -125,10 +119,7 @@ where
fn dump_stats(&self) {}
}
impl<T> VecBackend<T>
where
T: PMMRable,
{
impl<T: PMMRable> VecBackend<T> {
/// Instantiates a new VecBackend<T>
pub fn new() -> VecBackend<T> {
VecBackend {

View file

@ -20,7 +20,7 @@ use croaring::Bitmap;
use core::core::hash::{Hash, Hashed};
use core::core::pmmr::{self, family, Backend, HashOnlyBackend};
use core::core::BlockHeader;
use core::ser::{self, PMMRable};
use core::ser::{self, FixedLength, PMMRable};
use leaf_set::LeafSet;
use prune_list::PruneList;
use types::{prune_noop, AppendOnlyFile, HashFile};
@ -49,10 +49,7 @@ pub const PMMR_FILES: [&str; 4] = [
/// * A leaf_set tracks unpruned (unremoved) leaf positions in the MMR..
/// * A prune_list tracks the positions of pruned (and compacted) roots in the
/// MMR.
pub struct PMMRBackend<T>
where
T: PMMRable,
{
pub struct PMMRBackend<T: PMMRable> {
data_dir: String,
prunable: bool,
hash_file: AppendOnlyFile,
@ -62,16 +59,13 @@ where
_marker: marker::PhantomData<T>,
}
impl<T> Backend<T> for PMMRBackend<T>
where
T: PMMRable + ::std::fmt::Debug,
{
impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
/// Append the provided data and hashes to the backend storage.
/// Add the new leaf pos to our leaf_set if this is a prunable MMR.
#[allow(unused_variables)]
fn append(&mut self, data: T, hashes: Vec<Hash>) -> Result<(), String> {
if self.prunable {
let record_len = Hash::SIZE as u64;
let record_len = Hash::LEN as u64;
let shift = self.prune_list.get_total_shift();
let position = (self.hash_file.size_unsync() / record_len) + shift + 1;
self.leaf_set.add(position);
@ -95,7 +89,7 @@ where
let pos = position - 1;
// Must be on disk, doing a read at the correct position
let hash_record_len = Hash::SIZE;
let hash_record_len = Hash::LEN;
let file_offset = ((pos - shift) as usize) * hash_record_len;
let data = self.hash_file.read(file_offset, hash_record_len);
match ser::deserialize(&mut &data[..]) {
@ -118,7 +112,7 @@ where
let pos = pmmr::n_leaves(position) - 1;
// Must be on disk, doing a read at the correct position
let record_len = T::len();
let record_len = T::LEN;
let file_offset = ((pos - shift) as usize) * record_len;
let data = self.data_file.read(file_offset, record_len);
match ser::deserialize(&mut &data[..]) {
@ -164,14 +158,14 @@ where
// Rewind the hash file accounting for pruned/compacted pos
let shift = self.prune_list.get_shift(position);
let record_len = Hash::SIZE as u64;
let record_len = Hash::LEN as u64;
let file_pos = (position - shift) * record_len;
self.hash_file.rewind(file_pos);
// Rewind the data file accounting for pruned/compacted pos
let leaf_shift = self.prune_list.get_leaf_shift(position);
let flatfile_pos = pmmr::n_leaves(position);
let record_len = T::len() as u64;
let record_len = T::LEN as u64;
let file_pos = (flatfile_pos - leaf_shift) * record_len;
self.data_file.rewind(file_pos);
@ -209,10 +203,7 @@ where
}
}
impl<T> PMMRBackend<T>
where
T: PMMRable + ::std::fmt::Debug,
{
impl<T: PMMRable> PMMRBackend<T> {
/// Instantiates a new PMMR backend.
/// Use the provided dir to store its files.
pub fn new(
@ -263,7 +254,7 @@ where
pub fn unpruned_size(&self) -> io::Result<u64> {
let total_shift = self.prune_list.get_total_shift();
let record_len = Hash::SIZE as u64;
let record_len = Hash::LEN as u64;
let sz = self.hash_file.size()?;
Ok(sz / record_len + total_shift)
}
@ -271,14 +262,14 @@ where
/// Number of elements in the underlying stored data. Extremely dependent on
/// pruning and compaction.
pub fn data_size(&self) -> io::Result<u64> {
let record_len = T::len() as u64;
let record_len = T::LEN as u64;
self.data_file.size().map(|sz| sz / record_len)
}
/// Size of the underlying hashed data. Extremely dependent on pruning
/// and compaction.
pub fn hash_size(&self) -> io::Result<u64> {
self.hash_file.size().map(|sz| sz / Hash::SIZE as u64)
self.hash_file.size().map(|sz| sz / Hash::LEN as u64)
}
/// Syncs all files to disk. A call to sync is required to ensure all the
@ -348,7 +339,7 @@ where
// 1. Save compact copy of the hash file, skipping removed data.
{
let record_len = Hash::SIZE as u64;
let record_len = Hash::LEN as u64;
let off_to_rm = map_vec!(pos_to_rm, |pos| {
let shift = self.prune_list.get_shift(pos.into());
@ -365,7 +356,7 @@ where
// 2. Save compact copy of the data file, skipping removed leaves.
{
let record_len = T::len() as u64;
let record_len = T::LEN as u64;
let leaf_pos_to_rm = pos_to_rm
.iter()
@ -488,7 +479,7 @@ impl HashOnlyMMRBackend {
/// The unpruned size of this MMR backend.
pub fn unpruned_size(&self) -> io::Result<u64> {
let sz = self.hash_file.size()?;
Ok(sz / Hash::SIZE as u64)
Ok(sz / Hash::LEN as u64)
}
/// Discard any pending changes to this MMR backend.

View file

@ -26,7 +26,7 @@ use libc::{ftruncate as ftruncate64, off_t as off64_t};
use libc::{ftruncate64, off64_t};
use core::core::hash::Hash;
use core::ser;
use core::ser::{self, FixedLength};
/// A no-op function for doing nothing with some pruned data.
pub fn prune_noop(_pruned_data: &[u8]) {}
@ -58,8 +58,8 @@ impl HashFile {
let pos = position - 1;
// Must be on disk, doing a read at the correct position
let file_offset = (pos as usize) * Hash::SIZE;
let data = self.file.read(file_offset, Hash::SIZE);
let file_offset = (pos as usize) * Hash::LEN;
let data = self.file.read(file_offset, Hash::LEN);
match ser::deserialize(&mut &data[..]) {
Ok(h) => Some(h),
Err(e) => {
@ -74,7 +74,7 @@ impl HashFile {
/// Rewind the backend file to the specified position.
pub fn rewind(&mut self, position: u64) -> io::Result<()> {
self.file.rewind(position * Hash::SIZE as u64);
self.file.rewind(position * Hash::LEN as u64);
Ok(())
}

View file

@ -24,7 +24,9 @@ use chrono::prelude::Utc;
use croaring::Bitmap;
use core::core::pmmr::{Backend, PMMR};
use core::ser::{Error, PMMRIndexHashable, PMMRable, Readable, Reader, Writeable, Writer};
use core::ser::{
Error, FixedLength, PMMRIndexHashable, PMMRable, Readable, Reader, Writeable, Writer,
};
use store::types::prune_noop;
#[test]
@ -752,12 +754,12 @@ fn load(pos: u64, elems: &[TestElem], backend: &mut store::pmmr::PMMRBackend<Tes
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
struct TestElem(u32);
impl PMMRable for TestElem {
fn len() -> usize {
4
}
impl FixedLength for TestElem {
const LEN: usize = 4;
}
impl PMMRable for TestElem {}
impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
writer.write_u32(self.0)