Remove block markers, use header mmr pos instead (#1188)

Also fixes `spend_in_fork_and_compact` chain test.
This commit is contained in:
Ignotus Peverell 2018-06-23 00:36:10 +01:00 committed by GitHub
parent 3530b8300c
commit dc827ebe93
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 21 additions and 191 deletions

View file

@ -31,7 +31,7 @@ use grin_store::Error::NotFoundErr;
use pipe;
use store;
use txhashset;
use types::{BlockMarker, ChainAdapter, Error, Options, Tip};
use types::{ChainAdapter, Error, Options, Tip};
use util::LOGGER;
use util::secp::pedersen::{Commitment, RangeProof};
@ -489,21 +489,14 @@ impl Chain {
/// the required indexes for a consumer to rewind to a consistent state
/// at the provided block hash.
pub fn txhashset_read(&self, h: Hash) -> Result<(u64, u64, File), Error> {
// get the indexes for the block
let marker = {
let txhashset = self.txhashset.read().unwrap();
txhashset.indexes_at(&h)?
};
// now we want to rewind the txhashset extension and
// sync a "rewound" copy of the leaf_set files to disk
// so we can send these across as part of the zip file.
// The fast sync client does *not* have the necessary data
// to rewind after receiving the txhashset zip.
{
let head_header = self.store.head_header()?;
let header = self.store.get_block_header(&h)?;
let head_header = self.store.head_header()?;
{
let mut txhashset = self.txhashset.write().unwrap();
txhashset::extending_readonly(&mut txhashset, |extension| {
extension.rewind(&header, &head_header, true, true, true)?;
@ -514,7 +507,11 @@ impl Chain {
// prepares the zip and return the corresponding Read
let txhashset_reader = txhashset::zip_read(self.db_root.clone())?;
Ok((marker.output_pos, marker.kernel_pos, txhashset_reader))
Ok((
header.output_mmr_size,
header.kernel_mmr_size,
txhashset_reader,
))
}
/// Writes a reading view on a txhashset state that's been provided to us.
@ -538,18 +535,6 @@ impl Chain {
let header = self.store.get_block_header(&h)?;
txhashset::zip_write(self.db_root.clone(), txhashset_data)?;
{
// write the block marker so we can safely rewind to
// the pos for that block when we validate the extension below
let batch = self.store.batch()?;
let marker = BlockMarker {
output_pos: rewind_to_output,
kernel_pos: rewind_to_kernel,
};
batch.save_block_marker(&h, &marker)?;
batch.commit()?;
}
let mut txhashset =
txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone(), Some(&header))?;
@ -636,8 +621,6 @@ impl Chain {
Ok(b) => {
count += 1;
batch.delete_block(&b.hash())?;
//TODO: Validation seems to fail as block markers are being deleted?
batch.delete_block_marker(&b.hash())?;
batch.delete_block_input_bitmap(&b.hash())?;
}
Err(NotFoundErr) => {
@ -754,13 +737,6 @@ impl Chain {
.map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
}
/// Get the block marker for the specified block hash.
pub fn get_block_marker(&self, bh: &Hash) -> Result<BlockMarker, Error> {
self.store
.get_block_marker(bh)
.map_err(|e| Error::StoreErr(e, "chain get block marker".to_owned()))
}
/// Gets the block header at the provided height
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
self.store

View file

@ -48,4 +48,4 @@ pub mod types;
pub use chain::{Chain, MAX_ORPHAN_SIZE};
pub use store::ChainStore;
pub use types::{BlockSums, ChainAdapter, Error, Options, Tip};
pub use types::{ChainAdapter, Error, Options, Tip};

View file

@ -28,7 +28,7 @@ use core::core::target::Difficulty;
use core::core::{Block, BlockHeader};
use grin_store as store;
use grin_store::{option_to_not_found, to_key, Error, u64_to_key};
use types::{BlockMarker, BlockSums, Tip};
use types::Tip;
const STORE_SUBPATH: &'static str = "chain";
@ -146,20 +146,6 @@ impl ChainStore {
)
}
pub fn get_block_marker(&self, bh: &Hash) -> Result<BlockMarker, Error> {
option_to_not_found(
self.db
.get_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())),
)
}
pub fn get_block_sums(&self, bh: &Hash) -> Result<BlockSums, Error> {
option_to_not_found(
self.db
.get_ser(&to_key(BLOCK_SUMS_PREFIX, &mut bh.to_vec())),
)
}
pub fn build_block_input_bitmap(&self, block: &Block) -> Result<Bitmap, Error> {
let bitmap = block
.inputs
@ -327,23 +313,6 @@ impl<'a> Batch<'a> {
.delete(&to_key(COMMIT_POS_PREFIX, &mut commit.to_vec()))
}
pub fn get_block_marker(&self, bh: &Hash) -> Result<BlockMarker, Error> {
option_to_not_found(
self.db
.get_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())),
)
}
pub fn save_block_marker(&self, bh: &Hash, marker: &BlockMarker) -> Result<(), Error> {
self.db
.put_ser(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec())[..], &marker)
}
pub fn delete_block_marker(&self, bh: &Hash) -> Result<(), Error> {
self.db
.delete(&to_key(BLOCK_MARKER_PREFIX, &mut bh.to_vec()))
}
pub fn get_block_header_db(&self, h: &Hash) -> Result<BlockHeader, Error> {
option_to_not_found(
self.db

View file

@ -40,7 +40,7 @@ use grin_store;
use grin_store::pmmr::PMMRBackend;
use grin_store::types::prune_noop;
use store::{Batch, ChainStore};
use types::{BlockMarker, Error, TxHashSetRoots};
use types::{Error, TxHashSetRoots};
use util::{secp_static, zip, LOGGER};
const TXHASHSET_SUBDIR: &'static str = "txhashset";
@ -201,11 +201,6 @@ impl TxHashSet {
rproof_pmmr.elements_from_insertion_index(start_index, max_count)
}
/// Output and kernel MMR indexes at the end of the provided block
pub fn indexes_at(&self, bh: &Hash) -> Result<BlockMarker, Error> {
self.commit_index.get_block_marker(bh).map_err(&From::from)
}
/// Get sum tree roots
/// TODO: Return data instead of hashes
pub fn roots(&mut self) -> (Hash, Hash, Hash) {
@ -235,10 +230,8 @@ impl TxHashSet {
// horizon for compacting is based on current_height
let horizon = current_height.saturating_sub(global::cut_through_horizon().into());
let horizon_header = self.commit_index.get_header_by_height(horizon)?;
let horizon_marker = self.commit_index.get_block_marker(&horizon_header.hash())?;
let rewind_add_pos =
output_pos_to_rewind(self.commit_index.clone(), &horizon_header, &head_header)?;
let rewind_add_pos = output_pos_to_rewind(&horizon_header, &head_header)?;
let rewind_rm_pos =
input_pos_to_rewind(self.commit_index.clone(), &horizon_header, &head_header)?;
@ -253,14 +246,14 @@ impl TxHashSet {
};
self.output_pmmr_h.backend.check_compact(
horizon_marker.output_pos,
horizon_header.output_mmr_size,
&rewind_add_pos,
&rewind_rm_pos.1,
clean_output_index,
)?;
self.rproof_pmmr_h.backend.check_compact(
horizon_marker.output_pos,
horizon_header.output_mmr_size,
&rewind_add_pos,
&rewind_rm_pos.1,
&prune_noop,
@ -385,7 +378,6 @@ pub struct Extension<'a> {
commit_index: Arc<ChainStore>,
new_output_commits: HashMap<Commitment, u64>,
new_block_markers: HashMap<Hash, BlockMarker>,
rollback: bool,
/// Batch in which the extension occurs, public so it can be used within
@ -446,7 +438,6 @@ impl<'a> Extension<'a> {
),
commit_index,
new_output_commits: HashMap::new(),
new_block_markers: HashMap::new(),
rollback: false,
batch,
}
@ -622,24 +613,14 @@ impl<'a> Extension<'a> {
self.apply_kernel(kernel)?;
}
// finally, recording the PMMR positions after this block for future rewind
let marker = BlockMarker {
output_pos: self.output_pmmr.unpruned_size(),
kernel_pos: self.kernel_pmmr.unpruned_size(),
};
self.new_block_markers.insert(b.hash(), marker);
Ok(())
}
// Store all new output pos in the index.
// Also store any new block_markers.
fn save_indexes(&self) -> Result<(), Error> {
for (commit, pos) in &self.new_output_commits {
self.batch.save_output_pos(commit, *pos)?;
}
for (bh, marker) in &self.new_block_markers {
self.batch.save_block_marker(bh, marker)?;
}
Ok(())
}
@ -775,29 +756,20 @@ impl<'a> Extension<'a> {
rewind_kernel: bool,
rewind_rangeproof: bool,
) -> Result<(), Error> {
let hash = block_header.hash();
trace!(
LOGGER,
"Rewind to header {} @ {}",
block_header.height,
hash
block_header.hash(),
);
// Rewind our MMRs to the appropriate positions
// based on the block_marker.
let (output_pos, kernel_pos) = {
let marker = self.batch.get_block_marker(&hash)?;
(marker.output_pos, marker.kernel_pos)
};
// We need to build bitmaps of added and removed output positions
// so we can correctly rewind all operations applied to the output MMR
// after the position we are rewinding to (these operations will be
// undone during rewind).
// Rewound output pos will be removed from the MMR.
// Rewound input (spent) pos will be added back to the MMR.
let rewind_add_pos =
output_pos_to_rewind(self.commit_index.clone(), block_header, head_header)?;
let rewind_add_pos = output_pos_to_rewind(block_header, head_header)?;
let rewind_rm_pos =
input_pos_to_rewind(self.commit_index.clone(), block_header, head_header)?;
if !rewind_rm_pos.0 {
@ -806,8 +778,8 @@ impl<'a> Extension<'a> {
}
self.rewind_to_pos(
output_pos,
kernel_pos,
block_header.output_mmr_size,
block_header.kernel_mmr_size,
&rewind_add_pos,
&rewind_rm_pos.1,
rewind_utxo,
@ -1123,15 +1095,12 @@ pub fn zip_write(root_dir: String, txhashset_data: File) -> Result<(), Error> {
/// The MMR is append-only so we can simply look for all positions added after
/// the rewind pos.
fn output_pos_to_rewind(
commit_index: Arc<ChainStore>,
block_header: &BlockHeader,
head_header: &BlockHeader,
) -> Result<Bitmap, Error> {
let marker_to = commit_index.get_block_marker(&head_header.hash())?;
let marker_from = commit_index.get_block_marker(&block_header.hash())?;
Ok(((marker_from.output_pos + 1)..=marker_to.output_pos)
.map(|x| x as u32)
.collect())
let marker_to = head_header.output_mmr_size;
let marker_from = block_header.output_mmr_size;
Ok(((marker_from + 1)..=marker_to).map(|x| x as u32).collect())
}
/// Given a block header to rewind to and the block header at the

View file

@ -270,79 +270,3 @@ pub struct NoopAdapter {}
impl ChainAdapter for NoopAdapter {
fn block_accepted(&self, _: &Block, _: Options) {}
}
/// The output and kernel positions that define the size of the MMRs for a
/// particular block.
#[derive(Debug, Clone)]
pub struct BlockMarker {
/// The output (and rangeproof) MMR position of the final output in the
/// block
pub output_pos: u64,
/// The kernel position of the final kernel in the block
pub kernel_pos: u64,
}
impl Writeable for BlockMarker {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_u64(self.output_pos)?;
writer.write_u64(self.kernel_pos)?;
Ok(())
}
}
impl Readable for BlockMarker {
fn read(reader: &mut Reader) -> Result<BlockMarker, ser::Error> {
Ok(BlockMarker {
output_pos: reader.read_u64()?,
kernel_pos: reader.read_u64()?,
})
}
}
impl Default for BlockMarker {
fn default() -> BlockMarker {
BlockMarker {
output_pos: 0,
kernel_pos: 0,
}
}
}
/// The output_sum and kernel_sum for a given block.
/// This is used to validate the next block being processed by applying
/// the inputs, outputs, kernels and kernel_offset from the new block
/// and checking everything sums correctly.
#[derive(Debug, Clone)]
pub struct BlockSums {
/// The total output sum so far.
pub output_sum: Commitment,
/// The total kernel sum so far.
pub kernel_sum: Commitment,
}
impl Writeable for BlockSums {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
writer.write_fixed_bytes(&self.output_sum)?;
writer.write_fixed_bytes(&self.kernel_sum)?;
Ok(())
}
}
impl Readable for BlockSums {
fn read(reader: &mut Reader) -> Result<BlockSums, ser::Error> {
Ok(BlockSums {
output_sum: Commitment::read(reader)?,
kernel_sum: Commitment::read(reader)?,
})
}
}
impl Default for BlockSums {
fn default() -> BlockSums {
let zero_commit = secp_static::commit_to_zero_value();
BlockSums {
output_sum: zero_commit.clone(),
kernel_sum: zero_commit.clone(),
}
}
}

View file

@ -97,14 +97,6 @@ fn data_files() {
let head = Tip::from_block(&b.header);
// Check we have block markers for the last block and the block previous
let _cur_pmmr_md = chain
.get_block_marker(&head.last_block_h)
.expect("block marker does not exist");
chain
.get_block_marker(&head.prev_block_h)
.expect("prev block marker does not exist");
chain.validate(false).unwrap();
}
}