Cleanup positional indexes for rewind, introducing block markers (#759)

* Cleanup MMRs positional indexes. Get rid of the kernel position index.
Introduced a new block marker index that keeps, for each block, the
respective positions in the output and kernel MMRs. This is now sufficient
for rewind.
* Block marker compaction
This commit is contained in:
Ignotus Peverell 2018-03-08 21:36:51 +00:00 committed by GitHub
parent a059bbde0d
commit ab4b2a19e3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 57 additions and 134 deletions

View file

@ -124,7 +124,7 @@ impl OutputHandler {
.iter()
.filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
.map(|output| {
OutputPrintable::from_output(output, w(&self.chain), &block, include_proof)
OutputPrintable::from_output(output, w(&self.chain), &header, include_proof)
})
.collect();
BlockOutputs {

View file

@ -244,7 +244,7 @@ impl OutputPrintable {
pub fn from_output(
output: &core::Output,
chain: Arc<chain::Chain>,
block: &core::Block,
block_header: &core::BlockHeader,
include_proof: bool,
) -> OutputPrintable {
let output_type = if output
@ -274,7 +274,7 @@ impl OutputPrintable {
.features
.contains(core::transaction::OutputFeatures::COINBASE_OUTPUT) && !spent
{
merkle_proof = chain.get_merkle_proof(&out_id, &block).ok()
merkle_proof = chain.get_merkle_proof(&out_id, &block_header).ok()
};
OutputPrintable {
@ -563,7 +563,7 @@ impl BlockPrintable {
.outputs
.iter()
.map(|output| {
OutputPrintable::from_output(output, chain.clone(), &block, include_proof)
OutputPrintable::from_output(output, chain.clone(), &block.header, include_proof)
})
.collect();
let kernels = block
@ -602,7 +602,7 @@ impl CompactBlockPrintable {
let block = chain.get_block(&cb.hash()).unwrap();
let out_full = cb.out_full
.iter()
.map(|x| OutputPrintable::from_output(x, chain.clone(), &block, false))
.map(|x| OutputPrintable::from_output(x, chain.clone(), &block.header, false))
.collect();
let kern_full = cb.kern_full
.iter()

View file

@ -450,13 +450,13 @@ impl Chain {
pub fn get_merkle_proof(
&self,
output: &OutputIdentifier,
block: &Block,
block_header: &BlockHeader,
) -> Result<MerkleProof, Error> {
let mut txhashset = self.txhashset.write().unwrap();
let merkle_proof = txhashset::extending(&mut txhashset, |extension| {
extension.force_rollback();
extension.merkle_proof_via_rewind(output, block)
extension.merkle_proof_via_rewind(output, block_header)
})?;
Ok(merkle_proof)
@ -472,14 +472,12 @@ impl Chain {
/// the required indexes for a consumer to rewind to a consistent state
/// at the provided block hash.
pub fn txhashset_read(&self, h: Hash) -> Result<(u64, u64, File), Error> {
let b = self.get_block(&h)?;
// get the indexes for the block
let out_index: u64;
let kernel_index: u64;
{
let txhashset = self.txhashset.read().unwrap();
let (oi, ki) = txhashset.indexes_at(&b)?;
let (oi, ki) = txhashset.indexes_at(&h)?;
out_index = oi;
kernel_index = ki;
}
@ -561,6 +559,7 @@ impl Chain {
Ok(b) => {
self.store.delete_block(&b.hash())?;
self.store.delete_block_pmmr_file_metadata(&b.hash())?;
self.store.delete_block_marker(&b.hash())?;
}
Err(NotFoundErr) => {
break;

View file

@ -512,13 +512,13 @@ pub fn rewind_and_apply_fork(
}
}
let forked_block = store.get_block(&current)?;
let forked_block = store.get_block_header(&current)?;
debug!(
LOGGER,
"rewind_and_apply_fork @ {} [{}]",
forked_block.header.height,
forked_block.header.hash(),
forked_block.height,
forked_block.hash(),
);
// rewind the sum trees up to the forking block

View file

@ -34,7 +34,7 @@ const HEADER_HEAD_PREFIX: u8 = 'I' as u8;
const SYNC_HEAD_PREFIX: u8 = 's' as u8;
const HEADER_HEIGHT_PREFIX: u8 = '8' as u8;
const COMMIT_POS_PREFIX: u8 = 'c' as u8;
const KERNEL_POS_PREFIX: u8 = 'k' as u8;
const BLOCK_MARKER_PREFIX: u8 = 'm' as u8;
const BLOCK_PMMR_FILE_METADATA_PREFIX: u8 = 'p' as u8;
/// An implementation of the ChainStore trait backed by a simple key-value
@ -176,18 +176,21 @@ impl ChainStore for ChainKVStore {
.delete(&to_key(COMMIT_POS_PREFIX, &mut commit.to_vec()))
}
fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> {
self.db.put_ser(
&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..],
&pos,
fn save_block_marker(&self, bh: &Hash, marker: &(u64, u64)) -> Result<(), Error> {
self.db
.put_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())[..], &marker)
}
fn get_block_marker(&self, bh: &Hash) -> Result<(u64, u64), Error> {
option_to_not_found(
self.db
.get_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())),
)
}
fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, Error> {
option_to_not_found(
self.db
.get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())),
)
fn delete_block_marker(&self, bh: &Hash) -> Result<(), Error> {
self.db
.delete(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec()))
}
fn save_block_pmmr_file_metadata(

View file

@ -18,7 +18,6 @@
use std::fs;
use std::collections::HashMap;
use std::fs::File;
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
@ -184,8 +183,8 @@ impl TxHashSet {
}
/// Output and kernel MMR indexes at the end of the provided block
pub fn indexes_at(&self, block: &Block) -> Result<(u64, u64), Error> {
indexes_at(block, self.commit_index.deref())
pub fn indexes_at(&self, bh: &Hash) -> Result<(u64, u64), Error> {
self.commit_index.get_block_marker(bh).map_err(&From::from)
}
/// Last file positions of Output set.. hash file,data file
@ -250,7 +249,7 @@ where
rollback = extension.rollback;
if res.is_ok() && !rollback {
extension.save_pos_index()?;
extension.save_indexes()?;
}
sizes = extension.sizes();
}
@ -294,7 +293,7 @@ pub struct Extension<'a> {
commit_index: Arc<ChainStore>,
new_output_commits: HashMap<Commitment, u64>,
new_kernel_excesses: HashMap<Commitment, u64>,
new_block_markers: HashMap<Hash, (u64, u64)>,
rollback: bool,
}
@ -316,7 +315,7 @@ impl<'a> Extension<'a> {
),
commit_index: commit_index,
new_output_commits: HashMap::new(),
new_kernel_excesses: HashMap::new(),
new_block_markers: HashMap::new(),
rollback: false,
}
}
@ -348,25 +347,28 @@ impl<'a> Extension<'a> {
}
}
// finally, applying all kernels
// then applying all kernels
for kernel in &b.kernels {
self.apply_kernel(kernel)?;
}
// finally, recording the PMMR positions after this block for future rewind
let last_output_pos = self.output_pmmr.unpruned_size();
let last_kernel_pos = self.kernel_pmmr.unpruned_size();
self.new_block_markers
.insert(b.hash(), (last_output_pos, last_kernel_pos));
Ok(())
}
fn save_pos_index(&self) -> Result<(), Error> {
fn save_indexes(&self) -> Result<(), Error> {
// store all new output pos in the index
for (commit, pos) in &self.new_output_commits {
self.commit_index.save_output_pos(commit, *pos)?;
}
// store all new kernel pos in the index
for (excess, pos) in &self.new_kernel_excesses {
self.commit_index.save_kernel_pos(excess, *pos)?;
for (bh, tag) in &self.new_block_markers {
self.commit_index.save_block_marker(bh, tag)?;
}
Ok(())
}
@ -445,20 +447,10 @@ impl<'a> Extension<'a> {
}
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
if let Ok(pos) = self.get_kernel_pos(&kernel.excess) {
// same as outputs
if let Some((h, _)) = self.kernel_pmmr.get(pos, false) {
if h == kernel.hash() {
return Err(Error::DuplicateKernel(kernel.excess.clone()));
}
}
}
// push kernels in their MMR and file
let pos = self.kernel_pmmr
self.kernel_pmmr
.push(kernel.clone())
.map_err(&Error::TxHashSetErr)?;
self.new_kernel_excesses.insert(kernel.excess, pos);
Ok(())
}
@ -471,15 +463,16 @@ impl<'a> Extension<'a> {
pub fn merkle_proof_via_rewind(
&mut self,
output: &OutputIdentifier,
block: &Block,
block_header: &BlockHeader,
) -> Result<MerkleProof, Error> {
debug!(
LOGGER,
"txhashset: merkle_proof_via_rewind: rewinding to block {:?}",
block.hash()
block_header.hash()
);
// rewind to the specified block
self.rewind(block)?;
self.rewind(block_header)?;
// then calculate the Merkle Proof based on the known pos
let pos = self.get_output_pos(&output.commit)?;
let merkle_proof = self.output_pmmr
@ -491,17 +484,14 @@ impl<'a> Extension<'a> {
/// Rewinds the MMRs to the provided block, using the last output and
/// last kernel of the block we want to rewind to.
pub fn rewind(&mut self, block: &Block) -> Result<(), Error> {
debug!(
LOGGER,
"Rewind txhashset to header {} at {}",
block.header.hash(),
block.header.height,
);
pub fn rewind(&mut self, block_header: &BlockHeader) -> Result<(), Error> {
let hash = block_header.hash();
let height = block_header.height;
debug!(LOGGER, "Rewind to header {} at {}", hash, height);
// rewind each MMR
let (out_pos_rew, kern_pos_rew) = indexes_at(block, self.commit_index.deref())?;
self.rewind_pos(block.header.height, out_pos_rew, kern_pos_rew)?;
let (out_pos_rew, kern_pos_rew) = self.commit_index.get_block_marker(&hash)?;
self.rewind_pos(height, out_pos_rew, kern_pos_rew)?;
Ok(())
}
@ -539,14 +529,6 @@ impl<'a> Extension<'a> {
}
}
fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, grin_store::Error> {
if let Some(pos) = self.new_kernel_excesses.get(excess) {
Ok(*pos)
} else {
self.commit_index.get_kernel_pos(excess)
}
}
/// Current root hashes and sums (if applicable) for the Output, range proof
/// and kernel sum trees.
pub fn roots(&self) -> TxHashSetRoots {
@ -611,15 +593,6 @@ impl<'a> Extension<'a> {
}
}
}
for n in 1..self.kernel_pmmr.unpruned_size() + 1 {
// non-pruned leaves only
if pmmr::bintree_postorder_height(n) == 0 {
if let Some((_, kernel)) = self.kernel_pmmr.get(n, true) {
self.commit_index
.save_kernel_pos(&kernel.expect("not a leaf node").excess, n)?;
}
}
}
Ok(())
}
@ -717,54 +690,6 @@ impl<'a> Extension<'a> {
}
}
/// Output and kernel MMR indexes at the end of the provided block.
/// This requires us to know the "last" output processed in the block
/// and needs to be consistent with how we originally processed
/// the outputs in apply_block()
fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Error> {
// If we have any regular outputs then the "last" output is the last regular
// output otherwise it is the last coinbase output.
// This is because we process coinbase outputs before regular outputs in
// apply_block().
//
// TODO - consider maintaining coinbase outputs in a separate vec in a block?
//
let mut last_coinbase_output: Option<Output> = None;
let mut last_regular_output: Option<Output> = None;
for x in &block.outputs {
if x.features.contains(OutputFeatures::COINBASE_OUTPUT) {
last_coinbase_output = Some(*x);
} else {
last_regular_output = Some(*x);
}
}
// use last regular output if we have any, otherwise last coinbase output
let last_output = if last_regular_output.is_some() {
last_regular_output.unwrap()
} else if last_coinbase_output.is_some() {
last_coinbase_output.unwrap()
} else {
return Err(Error::Other("can't get index in an empty block".to_owned()));
};
let out_idx = commit_index
.get_output_pos(&last_output.commitment())
.map_err(|e| Error::StoreErr(e, format!("missing output pos for block")))?;
let kern_idx = match block.kernels.last() {
Some(kernel) => commit_index
.get_kernel_pos(&kernel.excess)
.map_err(|e| Error::StoreErr(e, format!("missing kernel pos for block")))?,
None => {
return Err(Error::Other("can't get index in an empty block".to_owned()));
}
};
Ok((out_idx, kern_idx))
}
/// Packages the txhashset data files into a zip and returns a Read to the
/// resulting file
pub fn zip_read(root_dir: String) -> Result<File, Error> {

View file

@ -79,8 +79,6 @@ pub enum Error {
AlreadySpent(Commitment),
/// An output with that commitment already exists (should be unique)
DuplicateCommitment(Commitment),
/// A kernel with that excess commitment already exists (should be unique)
DuplicateKernel(Commitment),
/// output not found
OutputNotFound,
/// output spent
@ -286,13 +284,11 @@ pub trait ChainStore: Send + Sync {
/// Deletes the MMR position of an output.
fn delete_output_pos(&self, commit: &[u8]) -> Result<(), store::Error>;
/// Saves the position of a kernel, represented by its excess, in the
/// Kernel MMR. Used as an index for spending and pruning.
fn save_kernel_pos(&self, commit: &Commitment, pos: u64) -> Result<(), store::Error>;
fn save_block_marker(&self, bh: &Hash, marker: &(u64, u64)) -> Result<(), store::Error>;
/// Gets the position of a kernel, represented by its excess, in the
/// Kernel MMR. Used as an index for spending and pruning.
fn get_kernel_pos(&self, commit: &Commitment) -> Result<u64, store::Error>;
fn get_block_marker(&self, bh: &Hash) -> Result<(u64, u64), store::Error>;
fn delete_block_marker(&self, bh: &Hash) -> Result<(), store::Error>;
/// Saves information about the last written PMMR file positions for each
/// committed block

View file

@ -259,7 +259,7 @@ fn spend_in_fork_and_compact() {
.process_block(b.clone(), chain::Options::SKIP_POW)
.unwrap();
let merkle_proof = chain.get_merkle_proof(&out_id, &b).unwrap();
let merkle_proof = chain.get_merkle_proof(&out_id, &b.header).unwrap();
println!("First block");

View file

@ -108,7 +108,7 @@ fn test_coinbase_maturity() {
.process_block(block.clone(), chain::Options::MINE)
.unwrap();
let merkle_proof = chain.get_merkle_proof(&out_id, &block).unwrap();
let merkle_proof = chain.get_merkle_proof(&out_id, &block.header).unwrap();
let prev = chain.head_header().unwrap();