Enable faster sync (#3108)

* add bitmap accumulator
refactor vec backend so we can use it outside of tests
introduce a "hash only" vec backend for the accumulator

* get core tests passing

* initial test coverage for bitmap_accumulator

* better test coverage for bitmap accumulator and cleanup code

* refactor txhashset roots, call validate() on roots during block validation

* fix store tests

* log the "merged" root when validating roots

* cleanup, revise based on feedback

* cleanup

* rework it to pass explicit size into bitmap accumulator when applying
This commit is contained in:
Antioch Peverell 2019-11-26 20:21:49 +00:00 committed by GitHub
parent 41896f0ec2
commit 11ac7d827a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 902 additions and 176 deletions

8
Cargo.lock generated
View file

@ -130,6 +130,11 @@ dependencies = [
"which 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)", "which 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
] ]
[[package]]
name = "bit-vec"
version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
[[package]] [[package]]
name = "bitflags" name = "bitflags"
version = "0.9.1" version = "0.9.1"
@ -719,6 +724,7 @@ dependencies = [
name = "grin_chain" name = "grin_chain"
version = "3.0.0-alpha.1" version = "3.0.0-alpha.1"
dependencies = [ dependencies = [
"bit-vec 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)", "bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)",
@ -894,6 +900,7 @@ dependencies = [
name = "grin_store" name = "grin_store"
version = "3.0.0-alpha.1" version = "3.0.0-alpha.1"
dependencies = [ dependencies = [
"bit-vec 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)",
"byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)", "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
"chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)", "chrono 0.4.7 (registry+https://github.com/rust-lang/crates.io-index)",
"croaring 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)", "croaring 0.3.9 (registry+https://github.com/rust-lang/crates.io-index)",
@ -2744,6 +2751,7 @@ dependencies = [
"checksum backtrace-sys 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "12cb9f1eef1d1fc869ad5a26c9fa48516339a15e54a227a25460fc304815fdb3" "checksum backtrace-sys 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)" = "12cb9f1eef1d1fc869ad5a26c9fa48516339a15e54a227a25460fc304815fdb3"
"checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643" "checksum base64 0.9.3 (registry+https://github.com/rust-lang/crates.io-index)" = "489d6c0ed21b11d038c31b6ceccca973e65d73ba3bd8ecb9a2babf5546164643"
"checksum bindgen 0.37.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1b25ab82877ea8fe6ce1ce1f8ac54361f0218bad900af9eb11803994bf67c221" "checksum bindgen 0.37.4 (registry+https://github.com/rust-lang/crates.io-index)" = "1b25ab82877ea8fe6ce1ce1f8ac54361f0218bad900af9eb11803994bf67c221"
"checksum bit-vec 0.6.1 (registry+https://github.com/rust-lang/crates.io-index)" = "a4523a10839ffae575fb08aa3423026c8cb4687eef43952afb956229d4f246f7"
"checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5" "checksum bitflags 0.9.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4efd02e230a02e18f92fc2735f44597385ed02ad8f831e7c1c1156ee5e1ab3a5"
"checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd" "checksum bitflags 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3d155346769a6855b86399e9bc3814ab343cd3d62c7e985113d46a0ec3c281fd"
"checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400" "checksum blake2-rfc 0.2.18 (registry+https://github.com/rust-lang/crates.io-index)" = "5d6d530bdd2d52966a6d03b7a964add7ae1a288d25214066fd4b600f0f796400"

View file

@ -119,7 +119,7 @@ impl TxHashSet {
pub fn from_head(head: Arc<chain::Chain>) -> TxHashSet { pub fn from_head(head: Arc<chain::Chain>) -> TxHashSet {
let roots = head.get_txhashset_roots(); let roots = head.get_txhashset_roots();
TxHashSet { TxHashSet {
output_root_hash: roots.output_root.to_hex(), output_root_hash: roots.output_root().to_hex(),
range_proof_root_hash: roots.rproof_root.to_hex(), range_proof_root_hash: roots.rproof_root.to_hex(),
kernel_root_hash: roots.kernel_root.to_hex(), kernel_root_hash: roots.kernel_root.to_hex(),
} }

View file

@ -10,6 +10,7 @@ workspace = ".."
edition = "2018" edition = "2018"
[dependencies] [dependencies]
bit-vec = "0.6"
bitflags = "1" bitflags = "1"
byteorder = "1" byteorder = "1"
failure = "0.1" failure = "0.1"

View file

@ -594,7 +594,7 @@ impl Chain {
b.header.prev_root = prev_root; b.header.prev_root = prev_root;
// Set the output, rangeproof and kernel MMR roots. // Set the output, rangeproof and kernel MMR roots.
b.header.output_root = roots.output_root; b.header.output_root = roots.output_root();
b.header.range_proof_root = roots.rproof_root; b.header.range_proof_root = roots.rproof_root;
b.header.kernel_root = roots.kernel_root; b.header.kernel_root = roots.kernel_root;

View file

@ -15,10 +15,12 @@
//! Utility structs to handle the 3 hashtrees (output, range proof, //! Utility structs to handle the 3 hashtrees (output, range proof,
//! kernel) more conveniently and transactionally. //! kernel) more conveniently and transactionally.
mod bitmap_accumulator;
mod rewindable_kernel_view; mod rewindable_kernel_view;
mod txhashset; mod txhashset;
mod utxo_view; mod utxo_view;
pub use self::bitmap_accumulator::*;
pub use self::rewindable_kernel_view::*; pub use self::rewindable_kernel_view::*;
pub use self::txhashset::*; pub use self::txhashset::*;
pub use self::utxo_view::*; pub use self::utxo_view::*;

View file

@ -0,0 +1,239 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::convert::TryFrom;
use std::time::Instant;
use bit_vec::BitVec;
use croaring::Bitmap;
use crate::core::core::hash::{DefaultHashable, Hash};
use crate::core::core::pmmr::{self, ReadonlyPMMR, VecBackend, PMMR};
use crate::core::ser::{self, FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use crate::error::{Error, ErrorKind};
/// The "bitmap accumulator" allows us to commit to a specific bitmap by splitting it into
/// fragments and inserting these fragments into an MMR to produce an overall root hash.
/// Leaves in the MMR are fragments of the bitmap consisting of 1024 contiguous bits
/// from the overall bitmap. The first (leftmost) leaf in the MMR represents the first 1024 bits
/// of the bitmap, the next leaf is the next 1024 bits of the bitmap etc.
///
/// Flipping a single bit does not require the full bitmap to be rehashed, only the path from the
/// relevant leaf up to its associated peak.
///
/// Flipping multiple bits *within* a single chunk is no more expensive than flipping a single bit
/// as a leaf node in the MMR represents a sequence of 1024 bits. Flipping multiple bits located
/// close together is a relatively cheap operation with minimal rehashing required to update the
/// relevant peaks and the overall MMR root.
///
/// It is also possible to generate Merkle proofs for these 1024 bit fragments, proving
/// both inclusion and location in the overall "accumulator" MMR. We plan to take advantage of
/// this during fast sync, allowing for validation of partial data.
///
#[derive(Clone)]
pub struct BitmapAccumulator {
backend: VecBackend<BitmapChunk>,
}
impl BitmapAccumulator {
/// Crate a new empty bitmap accumulator.
pub fn new() -> BitmapAccumulator {
BitmapAccumulator {
backend: VecBackend::new_hash_only(),
}
}
/// Initialize a bitmap accumulator given the provided idx iterator.
pub fn init<T: IntoIterator<Item = u64>>(&mut self, idx: T, size: u64) -> Result<(), Error> {
self.apply_from(idx, 0, size)
}
/// Find the start of the first "chunk" of 1024 bits from the provided idx.
/// Zero the last 10 bits to round down to multiple of 1024.
pub fn chunk_start_idx(idx: u64) -> u64 {
idx & !0x3ff
}
/// The first 1024 belong to chunk 0, the next 1024 to chunk 1 etc.
fn chunk_idx(idx: u64) -> u64 {
idx / 1024
}
/// Apply the provided idx iterator to our bitmap accumulator.
/// We start at the chunk containing from_idx and rebuild chunks as necessary
/// for the bitmap, limiting it to size (in bits).
/// If from_idx is 1023 and size is 1024 then we rebuild a single chunk.
fn apply_from<T>(&mut self, idx: T, from_idx: u64, size: u64) -> Result<(), Error>
where
T: IntoIterator<Item = u64>,
{
let now = Instant::now();
// Find the (1024 bit chunk) chunk_idx for the (individual bit) from_idx.
let from_chunk_idx = BitmapAccumulator::chunk_idx(from_idx);
let mut chunk_idx = from_chunk_idx;
let mut chunk = BitmapChunk::new();
let mut idx_iter = idx.into_iter().filter(|&x| x < size).peekable();
while let Some(x) = idx_iter.peek() {
if *x < chunk_idx * 1024 {
// skip until we reach our first chunk
idx_iter.next();
} else if *x < (chunk_idx + 1) * 1024 {
let idx = idx_iter.next().expect("next after peek");
chunk.set(idx % 1024, true);
} else {
self.append_chunk(chunk)?;
chunk_idx += 1;
chunk = BitmapChunk::new();
}
}
if chunk.any() {
self.append_chunk(chunk)?;
}
debug!(
"applied {} chunks from idx {} to idx {} ({}ms)",
1 + chunk_idx - from_chunk_idx,
from_chunk_idx,
chunk_idx,
now.elapsed().as_millis(),
);
Ok(())
}
/// Apply updates to the bitmap accumulator given an iterator of invalidated idx and
/// an iterator of idx to be set to true.
/// We determine the existing chunks to be rebuilt given the invalidated idx.
/// We then rebuild given idx, extending the accumulator with new chunk(s) as necessary.
/// Resulting bitmap accumulator will contain sufficient bitmap chunks to cover size.
/// If size is 1 then we will have a single chunk.
/// If size is 1023 then we will have a single chunk (bits 0 to 1023 inclusive).
/// If the size is 1024 then we will have two chunks.
pub fn apply<T, U>(&mut self, invalidated_idx: T, idx: U, size: u64) -> Result<(), Error>
where
T: IntoIterator<Item = u64>,
U: IntoIterator<Item = u64>,
{
// Determine the earliest chunk by looking at the min invalidated idx (assume sorted).
// Rewind prior to this and reapply new_idx.
// Note: We rebuild everything after rewind point but much of the bitmap may be
// unchanged. This can be further optimized by only rebuilding necessary chunks and
// rehashing.
if let Some(from_idx) = invalidated_idx.into_iter().next() {
self.rewind_prior(from_idx)?;
self.pad_left(from_idx)?;
self.apply_from(idx, from_idx, size)?;
}
Ok(())
}
/// Given the provided (bit) idx rewind the bitmap accumulator to the end of the
/// previous chunk ready for the updated chunk to be appended.
fn rewind_prior(&mut self, from_idx: u64) -> Result<(), Error> {
let chunk_idx = BitmapAccumulator::chunk_idx(from_idx);
let last_pos = self.backend.size();
let mut pmmr = PMMR::at(&mut self.backend, last_pos);
let chunk_pos = pmmr::insertion_to_pmmr_index(chunk_idx + 1);
let rewind_pos = chunk_pos.saturating_sub(1);
pmmr.rewind(rewind_pos, &Bitmap::create())
.map_err(|e| ErrorKind::Other(e))?;
Ok(())
}
/// Make sure we append empty chunks to fill in any gap before we append the chunk
/// we actually care about. This effectively pads the bitmap with 1024 chunks of 0s
/// as necessary to put the new chunk at the correct place.
fn pad_left(&mut self, from_idx: u64) -> Result<(), Error> {
let chunk_idx = BitmapAccumulator::chunk_idx(from_idx);
let current_chunk_idx = pmmr::n_leaves(self.backend.size());
for _ in current_chunk_idx..chunk_idx {
self.append_chunk(BitmapChunk::new())?;
}
Ok(())
}
/// Append a new chunk to the BitmapAccumulator.
/// Append parent hashes (if any) as necessary to build associated peak.
pub fn append_chunk(&mut self, chunk: BitmapChunk) -> Result<u64, Error> {
let last_pos = self.backend.size();
PMMR::at(&mut self.backend, last_pos)
.push(&chunk)
.map_err(|e| ErrorKind::Other(e).into())
}
/// The root hash of the bitmap accumulator MMR.
pub fn root(&self) -> Hash {
ReadonlyPMMR::at(&self.backend, self.backend.size()).root()
}
}
/// A bitmap "chunk" representing 1024 contiguous bits of the overall bitmap.
/// The first 1024 bits belong in one chunk. The next 1024 bits in the next chunk, etc.
#[derive(Clone, Debug)]
pub struct BitmapChunk(BitVec);
impl BitmapChunk {
const LEN_BITS: usize = 1024;
const LEN_BYTES: usize = Self::LEN_BITS / 8;
/// Create a new bitmap chunk, defaulting all bits in the chunk to false.
pub fn new() -> BitmapChunk {
BitmapChunk(BitVec::from_elem(Self::LEN_BITS, false))
}
/// Set a single bit in this chunk.
/// 0-indexed from start of chunk.
/// Panics if idx is outside the valid range of bits in a chunk.
pub fn set(&mut self, idx: u64, value: bool) {
let idx = usize::try_from(idx).expect("usize from u64");
assert!(idx < Self::LEN_BITS);
self.0.set(idx, value)
}
/// Does this bitmap chunk have any bits set to 1?
pub fn any(&self) -> bool {
self.0.any()
}
}
impl PMMRable for BitmapChunk {
type E = Self;
fn as_elmt(&self) -> Self::E {
self.clone()
}
}
impl FixedLength for BitmapChunk {
const LEN: usize = Self::LEN_BYTES;
}
impl DefaultHashable for BitmapChunk {}
impl Writeable for BitmapChunk {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.0.to_bytes().write(writer)
}
}
impl Readable for BitmapChunk {
/// Reading is not currently supported, just return an empty one for now.
/// We store the underlying roaring bitmap externally for the bitmap accumulator
/// and the "hash only" backend means we never actually read these chunks.
fn read(_reader: &mut dyn Reader) -> Result<BitmapChunk, ser::Error> {
Ok(BitmapChunk::new())
}
}

View file

@ -23,8 +23,9 @@ use crate::core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, TxK
use crate::core::ser::{PMMRIndexHashable, PMMRable, ProtocolVersion}; use crate::core::ser::{PMMRIndexHashable, PMMRable, ProtocolVersion};
use crate::error::{Error, ErrorKind}; use crate::error::{Error, ErrorKind};
use crate::store::{Batch, ChainStore}; use crate::store::{Batch, ChainStore};
use crate::txhashset::bitmap_accumulator::BitmapAccumulator;
use crate::txhashset::{RewindableKernelView, UTXOView}; use crate::txhashset::{RewindableKernelView, UTXOView};
use crate::types::{OutputMMRPosition, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use crate::types::{OutputMMRPosition, OutputRoots, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use crate::util::secp::pedersen::{Commitment, RangeProof}; use crate::util::secp::pedersen::{Commitment, RangeProof};
use crate::util::{file, secp_static, zip}; use crate::util::{file, secp_static, zip};
use croaring::Bitmap; use croaring::Bitmap;
@ -117,6 +118,8 @@ pub struct TxHashSet {
rproof_pmmr_h: PMMRHandle<RangeProof>, rproof_pmmr_h: PMMRHandle<RangeProof>,
kernel_pmmr_h: PMMRHandle<TxKernel>, kernel_pmmr_h: PMMRHandle<TxKernel>,
bitmap_accumulator: BitmapAccumulator,
// chain store used as index of commitments to MMR positions // chain store used as index of commitments to MMR positions
commit_index: Arc<ChainStore>, commit_index: Arc<ChainStore>,
} }
@ -148,6 +151,9 @@ impl TxHashSet {
header, header,
)?; )?;
// Initialize the bitmap accumulator from the current output PMMR.
let bitmap_accumulator = TxHashSet::bitmap_accumulator(&output_pmmr_h)?;
let mut maybe_kernel_handle: Option<PMMRHandle<TxKernel>> = None; let mut maybe_kernel_handle: Option<PMMRHandle<TxKernel>> = None;
let versions = vec![ProtocolVersion(2), ProtocolVersion(1)]; let versions = vec![ProtocolVersion(2), ProtocolVersion(1)];
for version in versions { for version in versions {
@ -195,6 +201,7 @@ impl TxHashSet {
output_pmmr_h, output_pmmr_h,
rproof_pmmr_h, rproof_pmmr_h,
kernel_pmmr_h, kernel_pmmr_h,
bitmap_accumulator,
commit_index, commit_index,
}) })
} else { } else {
@ -202,6 +209,15 @@ impl TxHashSet {
} }
} }
// Build a new bitmap accumulator for the provided output PMMR.
fn bitmap_accumulator(pmmr_h: &PMMRHandle<Output>) -> Result<BitmapAccumulator, Error> {
let pmmr = ReadonlyPMMR::at(&pmmr_h.backend, pmmr_h.last_pos);
let size = pmmr::n_leaves(pmmr_h.last_pos);
let mut bitmap_accumulator = BitmapAccumulator::new();
bitmap_accumulator.init(&mut pmmr.leaf_idx_iter(0), size)?;
Ok(bitmap_accumulator)
}
/// Close all backend file handles /// Close all backend file handles
pub fn release_backend_files(&mut self) { pub fn release_backend_files(&mut self) {
self.output_pmmr_h.backend.release_files(); self.output_pmmr_h.backend.release_files();
@ -329,7 +345,10 @@ impl TxHashSet {
ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); ReadonlyPMMR::at(&self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
TxHashSetRoots { TxHashSetRoots {
output_root: output_pmmr.root(), output_roots: OutputRoots {
pmmr_root: output_pmmr.root(),
bitmap_root: self.bitmap_accumulator.root(),
},
rproof_root: rproof_pmmr.root(), rproof_root: rproof_pmmr.root(),
kernel_root: kernel_pmmr.root(), kernel_root: kernel_pmmr.root(),
} }
@ -554,6 +573,7 @@ where
let sizes: (u64, u64, u64); let sizes: (u64, u64, u64);
let res: Result<T, Error>; let res: Result<T, Error>;
let rollback: bool; let rollback: bool;
let bitmap_accumulator: BitmapAccumulator;
let head = batch.head()?; let head = batch.head()?;
@ -581,6 +601,7 @@ where
rollback = extension_pair.extension.rollback; rollback = extension_pair.extension.rollback;
sizes = extension_pair.extension.sizes(); sizes = extension_pair.extension.sizes();
bitmap_accumulator = extension_pair.extension.bitmap_accumulator.clone();
} }
// During an extension we do not want to modify the header_extension (and only read from it). // During an extension we do not want to modify the header_extension (and only read from it).
@ -610,6 +631,9 @@ where
trees.output_pmmr_h.last_pos = sizes.0; trees.output_pmmr_h.last_pos = sizes.0;
trees.rproof_pmmr_h.last_pos = sizes.1; trees.rproof_pmmr_h.last_pos = sizes.1;
trees.kernel_pmmr_h.last_pos = sizes.2; trees.kernel_pmmr_h.last_pos = sizes.2;
// Update our bitmap_accumulator based on our extension
trees.bitmap_accumulator = bitmap_accumulator;
} }
trace!("TxHashSet extension done."); trace!("TxHashSet extension done.");
@ -826,6 +850,8 @@ pub struct Extension<'a> {
rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>, rproof_pmmr: PMMR<'a, RangeProof, PMMRBackend<RangeProof>>,
kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>, kernel_pmmr: PMMR<'a, TxKernel, PMMRBackend<TxKernel>>,
bitmap_accumulator: BitmapAccumulator,
/// Rollback flag. /// Rollback flag.
rollback: bool, rollback: bool,
@ -879,6 +905,7 @@ impl<'a> Extension<'a> {
&mut trees.kernel_pmmr_h.backend, &mut trees.kernel_pmmr_h.backend,
trees.kernel_pmmr_h.last_pos, trees.kernel_pmmr_h.last_pos,
), ),
bitmap_accumulator: trees.bitmap_accumulator.clone(),
rollback: false, rollback: false,
batch, batch,
} }
@ -901,28 +928,53 @@ impl<'a> Extension<'a> {
/// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs). /// Apply a new block to the current txhashet extension (output, rangeproof, kernel MMRs).
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> { pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
let mut affected_pos = vec![];
for out in b.outputs() { for out in b.outputs() {
let pos = self.apply_output(out)?; let pos = self.apply_output(out)?;
// Update the (output_pos,height) index for the new output. affected_pos.push(pos);
self.batch self.batch
.save_output_pos_height(&out.commitment(), pos, b.header.height)?; .save_output_pos_height(&out.commitment(), pos, b.header.height)?;
} }
for input in b.inputs() { for input in b.inputs() {
self.apply_input(input)?; let pos = self.apply_input(input)?;
affected_pos.push(pos);
} }
for kernel in b.kernels() { for kernel in b.kernels() {
self.apply_kernel(kernel)?; self.apply_kernel(kernel)?;
} }
// Update our BitmapAccumulator based on affected outputs (both spent and created).
self.apply_to_bitmap_accumulator(&affected_pos)?;
// Update the head of the extension to reflect the block we just applied. // Update the head of the extension to reflect the block we just applied.
self.head = Tip::from_header(&b.header); self.head = Tip::from_header(&b.header);
Ok(()) Ok(())
} }
fn apply_input(&mut self, input: &Input) -> Result<(), Error> { fn apply_to_bitmap_accumulator(&mut self, output_pos: &[u64]) -> Result<(), Error> {
// if self.output_pmmr.is_empty() || output_pos.is_empty() {
// return Ok(());
// }
let mut output_idx: Vec<_> = output_pos
.iter()
.map(|x| pmmr::n_leaves(*x).saturating_sub(1))
.collect();
output_idx.sort_unstable();
let min_idx = output_idx.first().cloned().unwrap_or(0);
let size = pmmr::n_leaves(self.output_pmmr.last_pos);
self.bitmap_accumulator.apply(
output_idx,
self.output_pmmr
.leaf_idx_iter(BitmapAccumulator::chunk_start_idx(min_idx)),
size,
)
}
fn apply_input(&mut self, input: &Input) -> Result<u64, Error> {
let commit = input.commitment(); let commit = input.commitment();
let pos_res = self.batch.get_output_pos(&commit); let pos_res = self.batch.get_output_pos(&commit);
if let Ok(pos) = pos_res { if let Ok(pos) = pos_res {
@ -943,14 +995,14 @@ impl<'a> Extension<'a> {
self.rproof_pmmr self.rproof_pmmr
.prune(pos) .prune(pos)
.map_err(|e| ErrorKind::TxHashSetErr(e))?; .map_err(|e| ErrorKind::TxHashSetErr(e))?;
Ok(pos)
} }
Ok(false) => return Err(ErrorKind::AlreadySpent(commit).into()), Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()),
Err(e) => return Err(ErrorKind::TxHashSetErr(e).into()), Err(e) => Err(ErrorKind::TxHashSetErr(e).into()),
} }
} else { } else {
return Err(ErrorKind::AlreadySpent(commit).into()); Err(ErrorKind::AlreadySpent(commit).into())
} }
Ok(())
} }
fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> { fn apply_output(&mut self, out: &Output) -> Result<(u64), Error> {
@ -1083,6 +1135,13 @@ impl<'a> Extension<'a> {
self.kernel_pmmr self.kernel_pmmr
.rewind(kernel_pos, &Bitmap::create()) .rewind(kernel_pos, &Bitmap::create())
.map_err(&ErrorKind::TxHashSetErr)?; .map_err(&ErrorKind::TxHashSetErr)?;
// Update our BitmapAccumulator based on affected outputs.
// We want to "unspend" every rewound spent output.
// Treat output_pos as an affected output to ensure we rebuild far enough back.
let mut affected_pos: Vec<_> = rewind_rm_pos.iter().map(|x| x as u64).collect();
affected_pos.push(output_pos);
self.apply_to_bitmap_accumulator(&affected_pos)?;
Ok(()) Ok(())
} }
@ -1090,10 +1149,13 @@ impl<'a> Extension<'a> {
/// and kernel sum trees. /// and kernel sum trees.
pub fn roots(&self) -> Result<TxHashSetRoots, Error> { pub fn roots(&self) -> Result<TxHashSetRoots, Error> {
Ok(TxHashSetRoots { Ok(TxHashSetRoots {
output_root: self output_roots: OutputRoots {
pmmr_root: self
.output_pmmr .output_pmmr
.root() .root()
.map_err(|_| ErrorKind::InvalidRoot)?, .map_err(|_| ErrorKind::InvalidRoot)?,
bitmap_root: self.bitmap_accumulator.root(),
},
rproof_root: self rproof_root: self
.rproof_pmmr .rproof_pmmr
.root() .root()
@ -1111,16 +1173,7 @@ impl<'a> Extension<'a> {
return Ok(()); return Ok(());
} }
let head_header = self.batch.get_block_header(&self.head.hash())?; let head_header = self.batch.get_block_header(&self.head.hash())?;
let header_roots = TxHashSetRoots { self.roots()?.validate(&head_header)
output_root: head_header.output_root,
rproof_root: head_header.range_proof_root,
kernel_root: head_header.kernel_root,
};
if header_roots != self.roots()? {
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
} }
/// Validate the header, output and kernel MMR sizes against the block header. /// Validate the header, output and kernel MMR sizes against the block header.

View file

@ -20,8 +20,8 @@ use std::sync::Arc;
use crate::core::core::hash::{Hash, Hashed, ZERO_HASH}; use crate::core::core::hash::{Hash, Hashed, ZERO_HASH};
use crate::core::core::{Block, BlockHeader}; use crate::core::core::{Block, BlockHeader};
use crate::core::pow::Difficulty; use crate::core::pow::Difficulty;
use crate::core::ser; use crate::core::ser::{self, PMMRIndexHashable};
use crate::error::Error; use crate::error::{Error, ErrorKind};
use crate::util::RwLock; use crate::util::RwLock;
bitflags! { bitflags! {
@ -181,18 +181,70 @@ impl TxHashsetWriteStatus for SyncState {
} }
} }
/// A helper to hold the roots of the txhashset in order to keep them /// A helper for the various txhashset MMR roots.
/// readable. #[derive(Debug)]
#[derive(Debug, PartialEq)]
pub struct TxHashSetRoots { pub struct TxHashSetRoots {
/// Output root /// Output roots
pub output_root: Hash, pub output_roots: OutputRoots,
/// Range Proof root /// Range Proof root
pub rproof_root: Hash, pub rproof_root: Hash,
/// Kernel root /// Kernel root
pub kernel_root: Hash, pub kernel_root: Hash,
} }
impl TxHashSetRoots {
/// Accessor for the underlying output PMMR root
pub fn output_root(&self) -> Hash {
self.output_roots.output_root()
}
/// Validate roots against the provided block header.
pub fn validate(&self, header: &BlockHeader) -> Result<(), Error> {
debug!(
"validate roots: {} at {}, output_root: {}, output pmmr: {} (bitmap: {}, merged: {})",
header.hash(),
header.height,
header.output_root,
self.output_roots.output_root(),
self.output_roots.bitmap_root,
self.output_roots.merged_root(header),
);
if header.output_root != self.output_roots.pmmr_root {
Err(ErrorKind::InvalidRoot.into())
} else if header.range_proof_root != self.rproof_root {
Err(ErrorKind::InvalidRoot.into())
} else if header.kernel_root != self.kernel_root {
Err(ErrorKind::InvalidRoot.into())
} else {
Ok(())
}
}
}
/// A helper for the various output roots.
#[derive(Debug)]
pub struct OutputRoots {
/// The output PMMR root
pub pmmr_root: Hash,
/// The bitmap accumulator root
pub bitmap_root: Hash,
}
impl OutputRoots {
/// The root of the underlying output PMMR.
pub fn output_root(&self) -> Hash {
self.pmmr_root
}
/// Hash the root of the output PMMR and the root of the bitmap accumulator
/// together with the size of the output PMMR (for consistency with existing PMMR impl).
/// H(pmmr_size | pmmr_root | bitmap_root)
pub fn merged_root(&self, header: &BlockHeader) -> Hash {
(self.pmmr_root, self.bitmap_root).hash_with_index(header.output_mmr_size)
}
}
/// A helper to hold the output pmmr position of the txhashset in order to keep them /// A helper to hold the output pmmr position of the txhashset in order to keep them
/// readable. /// readable.
#[derive(Debug)] #[derive(Debug)]

View file

@ -0,0 +1,188 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use self::chain::txhashset::BitmapAccumulator;
use self::core::core::hash::Hash;
use self::core::ser::PMMRIndexHashable;
use bit_vec::BitVec;
use grin_chain as chain;
use grin_core as core;
use grin_util as util;
#[test]
fn test_bitmap_accumulator() {
util::init_test_logger();
let mut accumulator = BitmapAccumulator::new();
assert_eq!(accumulator.root(), Hash::default());
// 1000... (rebuild from 0, setting [0] true)
accumulator.apply(vec![0], vec![0], 1).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(0, true);
bit_vec.to_bytes().hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);
// 1100... (rebuild from 0, setting [0, 1] true)
accumulator.apply(vec![0], vec![0, 1], 2).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(0, true);
bit_vec.set(1, true);
bit_vec.to_bytes().hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);
// 0100... (rebuild from 0, setting [1] true, which will reset [0] false)
accumulator.apply(vec![0], vec![1], 2).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
let expected_bytes = bit_vec.to_bytes();
expected_bytes.hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);
// 0100... (rebuild from 1, setting [1] true)
accumulator.apply(vec![1], vec![1], 2).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
let expected_bytes = bit_vec.to_bytes();
expected_bytes.hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);
// 0100...0001 (rebuild from 0, setting [1, 1023] true)
accumulator.apply(vec![0], vec![1, 1023], 1024).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
bit_vec.set(1023, true);
let expected_bytes = bit_vec.to_bytes();
expected_bytes.hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);
// Now set bits such that we extend the bitmap accumulator across multiple 1024 bit chunks.
// We need a second bit_vec here to reflect the additional chunk.
// 0100...0001, 1000...0000 (rebuild from 0, setting [1, 1023, 1024] true)
accumulator
.apply(vec![0], vec![1, 1023, 1024], 1025)
.unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
bit_vec.set(1023, true);
let mut bit_vec2 = BitVec::from_elem(1024, false);
bit_vec2.set(0, true);
let expected_bytes_0 = bit_vec.to_bytes();
let expected_bytes_1 = bit_vec2.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
(expected_hash_0, expected_hash_1).hash_with_index(2)
};
assert_eq!(accumulator.root(), expected_hash);
// Just rebuild the second bitmap chunk.
// 0100...0001, 0100...0000 (rebuild from 1025, setting [1025] true)
accumulator.apply(vec![1025], vec![1025], 1026).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
bit_vec.set(1023, true);
let mut bit_vec2 = BitVec::from_elem(1024, false);
bit_vec2.set(1, true);
let expected_bytes_0 = bit_vec.to_bytes();
let expected_bytes_1 = bit_vec2.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
(expected_hash_0, expected_hash_1).hash_with_index(2)
};
assert_eq!(accumulator.root(), expected_hash);
// Rebuild the first bitmap chunk and all chunks after it.
// 0100...0000, 0100...0000 (rebuild from 1, setting [1, 1025] true)
accumulator.apply(vec![1], vec![1, 1025], 1026).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1, true);
let mut bit_vec2 = BitVec::from_elem(1024, false);
bit_vec2.set(1, true);
let expected_bytes_0 = bit_vec.to_bytes();
let expected_bytes_1 = bit_vec2.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
(expected_hash_0, expected_hash_1).hash_with_index(2)
};
assert_eq!(accumulator.root(), expected_hash);
// Make sure we handle the case where the first chunk is all 0s
// 0000...0000, 0100...0000 (rebuild from 1, setting [1025] true)
accumulator.apply(vec![1], vec![1025], 1026).unwrap();
let expected_hash = {
let bit_vec = BitVec::from_elem(1024, false);
let mut bit_vec2 = BitVec::from_elem(1024, false);
bit_vec2.set(1, true);
let expected_bytes_0 = bit_vec.to_bytes();
let expected_bytes_1 = bit_vec2.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
(expected_hash_0, expected_hash_1).hash_with_index(2)
};
assert_eq!(accumulator.root(), expected_hash);
// Check that removing the last bit in a chunk removes the now empty chunk
// if it is the rightmost chunk.
// 0000...0001 (rebuild from 1023, setting [1023] true)
accumulator.apply(vec![1023], vec![1023], 1024).unwrap();
let expected_hash = {
let mut bit_vec = BitVec::from_elem(1024, false);
bit_vec.set(1023, true);
let expected_bytes = bit_vec.to_bytes();
expected_bytes.hash_with_index(0)
};
assert_eq!(accumulator.root(), expected_hash);
// Make sure we pad appropriately with 0s if we set a distant bit to 1.
// Start with an empty accumulator.
// 0000...0000, 0000...0000, 0000...0000, 0000...0001 (rebuild from 4095, setting [4095] true)
let mut accumulator = BitmapAccumulator::new();
accumulator.apply(vec![4095], vec![4095], 4096).unwrap();
let expected_hash = {
let bit_vec0 = BitVec::from_elem(1024, false);
let bit_vec1 = BitVec::from_elem(1024, false);
let bit_vec2 = BitVec::from_elem(1024, false);
let mut bit_vec3 = BitVec::from_elem(1024, false);
bit_vec3.set(1023, true);
let expected_bytes_0 = bit_vec0.to_bytes();
let expected_bytes_1 = bit_vec1.to_bytes();
let expected_bytes_2 = bit_vec2.to_bytes();
let expected_bytes_3 = bit_vec3.to_bytes();
let expected_hash_0 = expected_bytes_0.hash_with_index(0);
let expected_hash_1 = expected_bytes_1.hash_with_index(1);
let expected_hash_2 = (expected_hash_0, expected_hash_1).hash_with_index(2);
let expected_hash_3 = expected_bytes_2.hash_with_index(3);
let expected_hash_4 = expected_bytes_3.hash_with_index(4);
let expected_hash_5 = (expected_hash_3, expected_hash_4).hash_with_index(5);
(expected_hash_2, expected_hash_5).hash_with_index(6)
};
assert_eq!(accumulator.root(), expected_hash);
}

View file

@ -40,8 +40,10 @@ mod backend;
mod pmmr; mod pmmr;
mod readonly_pmmr; mod readonly_pmmr;
mod rewindable_pmmr; mod rewindable_pmmr;
mod vec_backend;
pub use self::backend::*; pub use self::backend::*;
pub use self::pmmr::*; pub use self::pmmr::*;
pub use self::readonly_pmmr::*; pub use self::readonly_pmmr::*;
pub use self::rewindable_pmmr::*; pub use self::rewindable_pmmr::*;
pub use self::vec_backend::*;

View file

@ -58,6 +58,10 @@ pub trait Backend<T: PMMRable> {
/// Number of leaves /// Number of leaves
fn n_unpruned_leaves(&self) -> u64; fn n_unpruned_leaves(&self) -> u64;
/// Iterator over current (unpruned, unremoved) leaf insertion index.
/// Note: This differs from underlying MMR pos - [0, 1, 2, 3, 4] vs. [1, 2, 4, 5, 8].
fn leaf_idx_iter(&self, from_idx: u64) -> Box<dyn Iterator<Item = u64> + '_>;
/// Remove Hash by insertion position. An index is also provided so the /// Remove Hash by insertion position. An index is also provided so the
/// underlying backend can implement some rollback of positions up to a /// underlying backend can implement some rollback of positions up to a
/// given index (practically the index is the height of a block that /// given index (practically the index is the height of a block that

View file

@ -84,6 +84,11 @@ where
self.backend.n_unpruned_leaves() self.backend.n_unpruned_leaves()
} }
/// Iterator over current (unpruned, unremoved) leaf insertion indices.
pub fn leaf_idx_iter(&self, from_idx: u64) -> impl Iterator<Item = u64> + '_ {
self.backend.leaf_idx_iter(from_idx)
}
/// Returns a vec of the peaks of this MMR. /// Returns a vec of the peaks of this MMR.
pub fn peaks(&self) -> Vec<Hash> { pub fn peaks(&self) -> Vec<Hash> {
let peaks_pos = peaks(self.last_pos); let peaks_pos = peaks(self.last_pos);

View file

@ -91,6 +91,11 @@ where
self.backend.leaf_pos_iter() self.backend.leaf_pos_iter()
} }
/// Iterator over current (unpruned, unremoved) leaf insertion indices.
pub fn leaf_idx_iter(&self, from_idx: u64) -> impl Iterator<Item = u64> + '_ {
self.backend.leaf_idx_iter(from_idx)
}
/// Is the MMR empty? /// Is the MMR empty?
pub fn is_empty(&self) -> bool { pub fn is_empty(&self) -> bool {
self.last_pos == 0 self.last_pos == 0

View file

@ -0,0 +1,153 @@
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashSet;
use std::convert::TryFrom;
use std::fs::File;
use croaring::Bitmap;
use crate::core::hash::Hash;
use crate::core::pmmr::{self, Backend};
use crate::core::BlockHeader;
use crate::ser::PMMRable;
/// Simple/minimal/naive MMR backend implementation backed by Vec<T> and Vec<Hash>.
/// Removed pos are maintained in a HashSet<u64>.
#[derive(Clone, Debug)]
pub struct VecBackend<T: PMMRable> {
/// Backend elements (optional, possible to just store hashes).
pub data: Option<Vec<T>>,
/// Vec of hashes for the PMMR (both leaves and parents).
pub hashes: Vec<Hash>,
/// Positions of removed elements (is this applicable if we do not store data?)
pub removed: HashSet<u64>,
}
impl<T: PMMRable> Backend<T> for VecBackend<T> {
fn append(&mut self, elmt: &T, hashes: Vec<Hash>) -> Result<(), String> {
if let Some(data) = &mut self.data {
data.push(elmt.clone());
}
self.hashes.append(&mut hashes.clone());
Ok(())
}
fn get_hash(&self, position: u64) -> Option<Hash> {
if self.removed.contains(&position) {
None
} else {
self.get_from_file(position)
}
}
fn get_data(&self, position: u64) -> Option<T::E> {
if self.removed.contains(&position) {
None
} else {
self.get_data_from_file(position)
}
}
fn get_from_file(&self, position: u64) -> Option<Hash> {
let idx = usize::try_from(position.saturating_sub(1)).expect("usize from u64");
self.hashes.get(idx).cloned()
}
fn get_data_from_file(&self, position: u64) -> Option<T::E> {
if let Some(data) = &self.data {
let idx = usize::try_from(pmmr::n_leaves(position).saturating_sub(1))
.expect("usize from u64");
data.get(idx).map(|x| x.as_elmt())
} else {
None
}
}
fn data_as_temp_file(&self) -> Result<File, String> {
unimplemented!()
}
/// Number of leaves in the MMR
fn n_unpruned_leaves(&self) -> u64 {
unimplemented!()
}
fn leaf_pos_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
Box::new(
self.hashes
.iter()
.enumerate()
.map(|(x, _)| (x + 1) as u64)
.filter(move |x| pmmr::is_leaf(*x) && !self.removed.contains(x)),
)
}
fn leaf_idx_iter(&self, from_idx: u64) -> Box<dyn Iterator<Item = u64> + '_> {
let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1);
Box::new(
self.leaf_pos_iter()
.skip_while(move |x| *x < from_pos)
.map(|x| pmmr::n_leaves(x).saturating_sub(1)),
)
}
fn remove(&mut self, position: u64) -> Result<(), String> {
self.removed.insert(position);
Ok(())
}
fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> {
if let Some(data) = &mut self.data {
let idx = pmmr::n_leaves(position);
data.truncate(usize::try_from(idx).expect("usize from u64"));
}
self.hashes
.truncate(usize::try_from(position).expect("usize from u64"));
Ok(())
}
fn snapshot(&self, _header: &BlockHeader) -> Result<(), String> {
Ok(())
}
fn release_files(&mut self) {}
fn dump_stats(&self) {}
}
impl<T: PMMRable> VecBackend<T> {
/// Instantiates a new empty vec backend.
pub fn new() -> VecBackend<T> {
VecBackend {
data: Some(vec![]),
hashes: vec![],
removed: HashSet::new(),
}
}
/// Instantiate a new empty "hash only" vec backend.
pub fn new_hash_only() -> VecBackend<T> {
VecBackend {
data: None,
hashes: vec![],
removed: HashSet::new(),
}
}
/// Size of this vec backend in hashes.
pub fn size(&self) -> u64 {
self.hashes.len() as u64
}
}

View file

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
pub mod common; mod common;
use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o}; use crate::common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
use crate::core::consensus::BLOCK_OUTPUT_WEIGHT; use crate::core::consensus::BLOCK_OUTPUT_WEIGHT;
use crate::core::core::block::Error; use crate::core::core::block::Error;

View file

@ -15,15 +15,18 @@
//! Common test functions //! Common test functions
use grin_core::core::{Block, BlockHeader, KernelFeatures, Transaction}; use grin_core::core::{Block, BlockHeader, KernelFeatures, Transaction};
use grin_core::core::hash::DefaultHashable;
use grin_core::libtx::{ use grin_core::libtx::{
build::{self, input, output}, build::{self, input, output},
proof::{ProofBuild, ProofBuilder}, proof::{ProofBuild, ProofBuilder},
reward, reward,
}; };
use grin_core::pow::Difficulty; use grin_core::pow::Difficulty;
use grin_core::ser::{self, FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use keychain::{Identifier, Keychain}; use keychain::{Identifier, Keychain};
// utility producing a transaction with 2 inputs and a single outputs // utility producing a transaction with 2 inputs and a single outputs
#[allow(dead_code)]
pub fn tx2i1o() -> Transaction { pub fn tx2i1o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap(); let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
let builder = ProofBuilder::new(&keychain); let builder = ProofBuilder::new(&keychain);
@ -41,6 +44,7 @@ pub fn tx2i1o() -> Transaction {
} }
// utility producing a transaction with a single input and output // utility producing a transaction with a single input and output
#[allow(dead_code)]
pub fn tx1i1o() -> Transaction { pub fn tx1i1o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap(); let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
let builder = ProofBuilder::new(&keychain); let builder = ProofBuilder::new(&keychain);
@ -59,6 +63,7 @@ pub fn tx1i1o() -> Transaction {
// utility producing a transaction with a single input // utility producing a transaction with a single input
// and two outputs (one change output) // and two outputs (one change output)
// Note: this tx has an "offset" kernel // Note: this tx has an "offset" kernel
#[allow(dead_code)]
pub fn tx1i2o() -> Transaction { pub fn tx1i2o() -> Transaction {
let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap(); let keychain = keychain::ExtKeychain::from_random_seed(false).unwrap();
let builder = ProofBuilder::new(&keychain); let builder = ProofBuilder::new(&keychain);
@ -77,6 +82,7 @@ pub fn tx1i2o() -> Transaction {
// utility to create a block without worrying about the key or previous // utility to create a block without worrying about the key or previous
// header // header
#[allow(dead_code)]
pub fn new_block<K, B>( pub fn new_block<K, B>(
txs: Vec<&Transaction>, txs: Vec<&Transaction>,
keychain: &K, keychain: &K,
@ -101,6 +107,7 @@ where
// utility producing a transaction that spends an output with the provided // utility producing a transaction that spends an output with the provided
// value and blinding key // value and blinding key
#[allow(dead_code)]
pub fn txspend1i1o<K, B>( pub fn txspend1i1o<K, B>(
v: u64, v: u64,
keychain: &K, keychain: &K,
@ -120,3 +127,40 @@ where
) )
.unwrap() .unwrap()
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub struct TestElem(pub [u32; 4]);
impl DefaultHashable for TestElem {}
impl FixedLength for TestElem {
const LEN: usize = 16;
}
impl PMMRable for TestElem {
type E = Self;
fn as_elmt(&self) -> Self::E {
self.clone()
}
}
impl Writeable for TestElem {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
r#try!(writer.write_u32(self.0[0]));
r#try!(writer.write_u32(self.0[1]));
r#try!(writer.write_u32(self.0[2]));
writer.write_u32(self.0[3])
}
}
impl Readable for TestElem {
fn read(reader: &mut dyn Reader) -> Result<TestElem, ser::Error> {
Ok(TestElem([
reader.read_u32()?,
reader.read_u32()?,
reader.read_u32()?,
reader.read_u32()?,
]))
}
}

View file

@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
mod vec_backend; mod common;
use self::core::core::merkle_proof::MerkleProof; use self::core::core::merkle_proof::MerkleProof;
use self::core::core::pmmr::PMMR; use self::core::core::pmmr::{VecBackend, PMMR};
use self::core::ser::{self, PMMRIndexHashable}; use self::core::ser::{self, PMMRIndexHashable};
use crate::vec_backend::{TestElem, VecBackend}; use crate::common::TestElem;
use grin_core as core; use grin_core as core;
#[test] #[test]

View file

@ -12,12 +12,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
mod vec_backend; mod common;
use self::core::core::hash::Hash; use self::core::core::hash::Hash;
use self::core::core::pmmr::{self, PMMR}; use self::core::core::pmmr::{self, VecBackend, PMMR};
use self::core::ser::PMMRIndexHashable; use self::core::ser::PMMRIndexHashable;
use crate::vec_backend::{TestElem, VecBackend}; use crate::common::TestElem;
use chrono::prelude::Utc; use chrono::prelude::Utc;
use grin_core as core; use grin_core as core;
use std::u64; use std::u64;
@ -433,7 +433,7 @@ fn pmmr_prune() {
// First check the initial numbers of elements. // First check the initial numbers of elements.
assert_eq!(ba.hashes.len(), 16); assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 0); assert_eq!(ba.removed.len(), 0);
// pruning a leaf with no parent should do nothing // pruning a leaf with no parent should do nothing
{ {
@ -442,7 +442,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap()); assert_eq!(orig_root, pmmr.root().unwrap());
} }
assert_eq!(ba.hashes.len(), 16); assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 1); assert_eq!(ba.removed.len(), 1);
// pruning leaves with no shared parent just removes 1 element // pruning leaves with no shared parent just removes 1 element
{ {
@ -451,7 +451,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap()); assert_eq!(orig_root, pmmr.root().unwrap());
} }
assert_eq!(ba.hashes.len(), 16); assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 2); assert_eq!(ba.removed.len(), 2);
{ {
let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz); let mut pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut ba, sz);
@ -459,7 +459,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap()); assert_eq!(orig_root, pmmr.root().unwrap());
} }
assert_eq!(ba.hashes.len(), 16); assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 3); assert_eq!(ba.removed.len(), 3);
// pruning a non-leaf node has no effect // pruning a non-leaf node has no effect
{ {
@ -468,7 +468,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap()); assert_eq!(orig_root, pmmr.root().unwrap());
} }
assert_eq!(ba.hashes.len(), 16); assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 3); assert_eq!(ba.removed.len(), 3);
// TODO - no longer true (leaves only now) - pruning sibling removes subtree // TODO - no longer true (leaves only now) - pruning sibling removes subtree
{ {
@ -477,7 +477,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap()); assert_eq!(orig_root, pmmr.root().unwrap());
} }
assert_eq!(ba.hashes.len(), 16); assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 4); assert_eq!(ba.removed.len(), 4);
// TODO - no longer true (leaves only now) - pruning all leaves under level >1 // TODO - no longer true (leaves only now) - pruning all leaves under level >1
// removes all subtree // removes all subtree
@ -487,7 +487,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap()); assert_eq!(orig_root, pmmr.root().unwrap());
} }
assert_eq!(ba.hashes.len(), 16); assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 5); assert_eq!(ba.removed.len(), 5);
// pruning everything should only leave us with a single peak // pruning everything should only leave us with a single peak
{ {
@ -498,7 +498,7 @@ fn pmmr_prune() {
assert_eq!(orig_root, pmmr.root().unwrap()); assert_eq!(orig_root, pmmr.root().unwrap());
} }
assert_eq!(ba.hashes.len(), 16); assert_eq!(ba.hashes.len(), 16);
assert_eq!(ba.remove_list.len(), 9); assert_eq!(ba.removed.len(), 9);
} }
#[test] #[test]

View file

@ -12,139 +12,56 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::fs::File; mod common;
use self::core::core::hash::{DefaultHashable, Hash}; use self::core::core::pmmr::{VecBackend, PMMR};
use self::core::core::pmmr::{self, Backend}; use crate::common::TestElem;
use self::core::core::BlockHeader;
use self::core::ser;
use self::core::ser::{FixedLength, PMMRable, Readable, Reader, Writeable, Writer};
use croaring;
use croaring::Bitmap;
use grin_core as core; use grin_core as core;
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[test]
pub struct TestElem(pub [u32; 4]); fn leaf_pos_and_idx_iter_test() {
let elems = [
impl DefaultHashable for TestElem {} TestElem([0, 0, 0, 1]),
TestElem([0, 0, 0, 2]),
impl FixedLength for TestElem { TestElem([0, 0, 0, 3]),
const LEN: usize = 16; TestElem([0, 0, 0, 4]),
TestElem([0, 0, 0, 5]),
];
let mut backend = VecBackend::new();
let mut pmmr = PMMR::new(&mut backend);
for x in &elems {
pmmr.push(x).unwrap();
}
assert_eq!(
vec![0, 1, 2, 3, 4],
pmmr.leaf_idx_iter(0).collect::<Vec<_>>()
);
assert_eq!(
vec![1, 2, 4, 5, 8],
pmmr.leaf_pos_iter().collect::<Vec<_>>()
);
} }
impl PMMRable for TestElem { #[test]
type E = Self; fn leaf_pos_and_idx_iter_hash_only_test() {
let elems = [
fn as_elmt(&self) -> Self::E { TestElem([0, 0, 0, 1]),
self.clone() TestElem([0, 0, 0, 2]),
} TestElem([0, 0, 0, 3]),
} TestElem([0, 0, 0, 4]),
TestElem([0, 0, 0, 5]),
impl Writeable for TestElem { ];
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { let mut backend = VecBackend::new_hash_only();
writer.write_u32(self.0[0])?; let mut pmmr = PMMR::new(&mut backend);
writer.write_u32(self.0[1])?; for x in &elems {
writer.write_u32(self.0[2])?; pmmr.push(x).unwrap();
writer.write_u32(self.0[3])
}
}
impl Readable for TestElem {
fn read(reader: &mut dyn Reader) -> Result<TestElem, ser::Error> {
Ok(TestElem([
reader.read_u32()?,
reader.read_u32()?,
reader.read_u32()?,
reader.read_u32()?,
]))
}
}
/// Simple MMR backend implementation based on a Vector. Pruning does not
/// compact the Vec itself.
#[derive(Clone, Debug)]
pub struct VecBackend<T: PMMRable> {
/// Backend elements
pub data: Vec<T>,
pub hashes: Vec<Hash>,
/// Positions of removed elements
pub remove_list: Vec<u64>,
}
impl<T: PMMRable> Backend<T> for VecBackend<T> {
fn append(&mut self, data: &T, hashes: Vec<Hash>) -> Result<(), String> {
self.data.push(data.clone());
self.hashes.append(&mut hashes.clone());
Ok(())
}
fn get_hash(&self, position: u64) -> Option<Hash> {
if self.remove_list.contains(&position) {
None
} else {
self.get_from_file(position)
}
}
fn get_data(&self, position: u64) -> Option<T::E> {
if self.remove_list.contains(&position) {
None
} else {
self.get_data_from_file(position)
}
}
fn get_from_file(&self, position: u64) -> Option<Hash> {
let hash = &self.hashes[(position - 1) as usize];
Some(hash.clone())
}
fn get_data_from_file(&self, position: u64) -> Option<T::E> {
let idx = pmmr::n_leaves(position);
let data = self.data[(idx - 1) as usize].clone();
Some(data.as_elmt())
}
fn data_as_temp_file(&self) -> Result<File, String> {
unimplemented!()
}
fn leaf_pos_iter(&self) -> Box<dyn Iterator<Item = u64> + '_> {
unimplemented!()
}
fn n_unpruned_leaves(&self) -> u64 {
unimplemented!()
}
fn remove(&mut self, position: u64) -> Result<(), String> {
self.remove_list.push(position);
Ok(())
}
fn rewind(&mut self, position: u64, _rewind_rm_pos: &Bitmap) -> Result<(), String> {
let idx = pmmr::n_leaves(position);
self.data = self.data[0..(idx as usize) + 1].to_vec();
self.hashes = self.hashes[0..(position as usize) + 1].to_vec();
Ok(())
}
fn snapshot(&self, _header: &BlockHeader) -> Result<(), String> {
Ok(())
}
fn release_files(&mut self) {}
fn dump_stats(&self) {}
}
impl<T: PMMRable> VecBackend<T> {
/// Instantiates a new VecBackend<T>
pub fn new() -> VecBackend<T> {
VecBackend {
data: vec![],
hashes: vec![],
remove_list: vec![],
}
} }
assert_eq!(
vec![0, 1, 2, 3, 4],
pmmr.leaf_idx_iter(0).collect::<Vec<_>>()
);
assert_eq!(
vec![1, 2, 4, 5, 8],
pmmr.leaf_pos_iter().collect::<Vec<_>>()
);
} }

View file

@ -10,6 +10,7 @@ workspace = ".."
edition = "2018" edition = "2018"
[dependencies] [dependencies]
bit-vec = "0.6"
byteorder = "1" byteorder = "1"
croaring = "0.3.9" croaring = "0.3.9"
env_logger = "0.5" env_logger = "0.5"

View file

@ -148,6 +148,28 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
} }
} }
/// Returns an iterator over all the leaf insertion indices (0-indexed).
/// If our pos are [1,2,4,5,8] (first 5 leaf pos) then our insertion indices are [0,1,2,3,4]
fn leaf_idx_iter(&self, from_idx: u64) -> Box<dyn Iterator<Item = u64> + '_> {
// pass from_idx in as param
// convert this to pos
// iterate, skipping everything prior to this
// pass in from_idx=0 then we want to convert to pos=1
let from_pos = pmmr::insertion_to_pmmr_index(from_idx + 1);
if self.prunable {
Box::new(
self.leaf_set
.iter()
.skip_while(move |x| *x < from_pos)
.map(|x| pmmr::n_leaves(x).saturating_sub(1)),
)
} else {
panic!("leaf_idx_iter not implemented for non-prunable PMMR")
}
}
fn data_as_temp_file(&self) -> Result<File, String> { fn data_as_temp_file(&self) -> Result<File, String> {
self.data_file self.data_file
.as_temp_file() .as_temp_file()

View file

@ -28,6 +28,36 @@ use crate::core::ser::{
Writer, Writer,
}; };
#[test]
fn pmmr_leaf_idx_iter() {
let (data_dir, elems) = setup("leaf_idx_iter");
{
let mut backend = store::pmmr::PMMRBackend::new(
data_dir.to_string(),
true,
false,
ProtocolVersion(1),
None,
)
.unwrap();
// adding first set of 4 elements and sync
let mmr_size = load(0, &elems[0..5], &mut backend);
backend.sync().unwrap();
{
let pmmr: PMMR<'_, TestElem, _> = PMMR::at(&mut backend, mmr_size);
let leaf_idx = pmmr.leaf_idx_iter(0).collect::<Vec<_>>();
let leaf_pos = pmmr.leaf_pos_iter().collect::<Vec<_>>();
// The first 5 leaves [0,1,2,3,4] are at pos [1,2,4,5,8] in the MMR.
assert_eq!(leaf_idx, vec![0, 1, 2, 3, 4]);
assert_eq!(leaf_pos, vec![1, 2, 4, 5, 8]);
}
}
teardown(data_dir);
}
#[test] #[test]
fn pmmr_append() { fn pmmr_append() {
let (data_dir, elems) = setup("append"); let (data_dir, elems) = setup("append");