validate root of header MMR when processing headers (during sync and full blocks) (#1836)

validate root of header MMR when processing headers (during sync and full blocks) (#1836)
This commit is contained in:
Antioch Peverell 2018-11-01 09:51:32 +00:00 committed by GitHub
parent e8f4c47178
commit 12be191ecd
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
26 changed files with 466 additions and 302 deletions

View file

@ -25,7 +25,7 @@ use hyper::{Body, Request, StatusCode};
use auth::BasicAuthMiddleware;
use chain;
use core::core::hash::{Hash, Hashed};
use core::core::{OutputFeatures, OutputIdentifier, Transaction};
use core::core::{BlockHeader, OutputFeatures, OutputIdentifier, Transaction};
use core::ser;
use p2p;
use p2p::types::{PeerInfoDisplay, ReasonForBan};
@ -569,7 +569,9 @@ impl HeaderHandler {
}
if let Ok(height) = input.parse() {
match w(&self.chain).get_header_by_height(height) {
Ok(header) => return Ok(BlockHeaderPrintable::from_header(&header)),
Ok(header) => {
return self.convert_header(&header);
}
Err(_) => return Err(ErrorKind::NotFound)?,
}
}
@ -580,13 +582,18 @@ impl HeaderHandler {
let header = w(&self.chain)
.get_block_header(&h)
.context(ErrorKind::NotFound)?;
Ok(BlockHeaderPrintable::from_header(&header))
self.convert_header(&header)
}
/// Convert a header into a "printable" version for json serialization.
fn convert_header(&self, header: &BlockHeader) -> Result<BlockHeaderPrintable, Error> {
return Ok(BlockHeaderPrintable::from_header(header));
}
fn get_header_for_output(&self, commit_id: String) -> Result<BlockHeaderPrintable, Error> {
let oid = get_output(&self.chain, &commit_id)?.1;
match w(&self.chain).get_header_for_output(&oid) {
Ok(header) => return Ok(BlockHeaderPrintable::from_header(&header)),
Ok(header) => return self.convert_header(&header),
Err(_) => return Err(ErrorKind::NotFound)?,
}
}

View file

@ -472,11 +472,11 @@ pub struct BlockHeaderInfo {
}
impl BlockHeaderInfo {
pub fn from_header(h: &core::BlockHeader) -> BlockHeaderInfo {
pub fn from_header(header: &core::BlockHeader) -> BlockHeaderInfo {
BlockHeaderInfo {
hash: util::to_hex(h.hash().to_vec()),
height: h.height,
previous: util::to_hex(h.previous.to_vec()),
hash: util::to_hex(header.hash().to_vec()),
height: header.height,
previous: util::to_hex(header.prev_hash.to_vec()),
}
}
}
@ -516,23 +516,23 @@ pub struct BlockHeaderPrintable {
}
impl BlockHeaderPrintable {
pub fn from_header(h: &core::BlockHeader) -> BlockHeaderPrintable {
pub fn from_header(header: &core::BlockHeader) -> BlockHeaderPrintable {
BlockHeaderPrintable {
hash: util::to_hex(h.hash().to_vec()),
version: h.version,
height: h.height,
previous: util::to_hex(h.previous.to_vec()),
prev_root: util::to_hex(h.prev_root.to_vec()),
timestamp: h.timestamp.to_rfc3339(),
output_root: util::to_hex(h.output_root.to_vec()),
range_proof_root: util::to_hex(h.range_proof_root.to_vec()),
kernel_root: util::to_hex(h.kernel_root.to_vec()),
nonce: h.pow.nonce,
edge_bits: h.pow.edge_bits(),
cuckoo_solution: h.pow.proof.nonces.clone(),
total_difficulty: h.pow.total_difficulty.to_num(),
secondary_scaling: h.pow.secondary_scaling,
total_kernel_offset: h.total_kernel_offset.to_hex(),
hash: util::to_hex(header.hash().to_vec()),
version: header.version,
height: header.height,
previous: util::to_hex(header.prev_hash.to_vec()),
prev_root: util::to_hex(header.prev_root.to_vec()),
timestamp: header.timestamp.to_rfc3339(),
output_root: util::to_hex(header.output_root.to_vec()),
range_proof_root: util::to_hex(header.range_proof_root.to_vec()),
kernel_root: util::to_hex(header.kernel_root.to_vec()),
nonce: header.pow.nonce,
edge_bits: header.pow.edge_bits(),
cuckoo_solution: header.pow.proof.nonces.clone(),
total_difficulty: header.pow.total_difficulty.to_num(),
secondary_scaling: header.pow.secondary_scaling,
total_kernel_offset: header.total_kernel_offset.to_hex(),
}
}
}

View file

@ -181,13 +181,35 @@ impl Chain {
setup_head(genesis.clone(), store.clone(), &mut txhashset)?;
let head = store.head()?;
debug!(
"Chain init: {} @ {} [{}]",
head.total_difficulty.to_num(),
head.height,
head.last_block_h,
);
{
let head = store.head()?;
debug!(
"init: head: {} @ {} [{}]",
head.total_difficulty.to_num(),
head.height,
head.last_block_h,
);
}
{
let header_head = store.header_head()?;
debug!(
"init: header_head: {} @ {} [{}]",
header_head.total_difficulty.to_num(),
header_head.height,
header_head.last_block_h,
);
}
{
let sync_head = store.get_sync_head()?;
debug!(
"init: sync_head: {} @ {} [{}]",
sync_head.total_difficulty.to_num(),
sync_head.height,
sync_head.last_block_h,
);
}
Ok(Chain {
db_root: db_root,
@ -475,15 +497,23 @@ impl Chain {
})
}
/// Sets the txhashset roots on a brand new block by applying the block on
/// the current txhashset state.
pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> {
/// *** Only used in tests. ***
/// Convenience for setting roots on a block header when
/// creating a chain fork during tests.
pub fn set_txhashset_roots_forked(
&self,
b: &mut Block,
prev: &BlockHeader,
) -> Result<(), Error> {
let prev_block = self.get_block(&prev.hash())?;
let mut txhashset = self.txhashset.write();
let (prev_root, roots, sizes) =
txhashset::extending_readonly(&mut txhashset, |extension| {
if is_fork {
pipe::rewind_and_apply_fork(b, extension)?;
}
// Put the txhashset in the correct state as of the previous block.
// We cannot use the new block to do this because we have no
// explicit previous linkage (and prev_root not yet setup).
pipe::rewind_and_apply_fork(&prev_block, extension)?;
extension.apply_block(&prev_block)?;
// Retrieve the header root before we apply the new block
let prev_root = extension.header_root();
@ -513,6 +543,40 @@ impl Chain {
Ok(())
}
/// Sets the txhashset roots on a brand new block by applying the block on
/// the current txhashset state.
pub fn set_txhashset_roots(&self, b: &mut Block) -> Result<(), Error> {
let mut txhashset = self.txhashset.write();
let (prev_root, roots, sizes) =
txhashset::extending_readonly(&mut txhashset, |extension| {
// Retrieve the header root before we apply the new block
let prev_root = extension.header_root();
// Apply the latest block to the chain state via the extension.
extension.apply_block(b)?;
Ok((prev_root, extension.roots(), extension.sizes()))
})?;
// Set the prev_root on the header.
b.header.prev_root = prev_root;
// Set the output, rangeproof and kernel MMR roots.
b.header.output_root = roots.output_root;
b.header.range_proof_root = roots.rproof_root;
b.header.kernel_root = roots.kernel_root;
// Set the output and kernel MMR sizes.
{
// Carefully destructure these correctly...
let (_, output_mmr_size, _, kernel_mmr_size) = sizes;
b.header.output_mmr_size = output_mmr_size;
b.header.kernel_mmr_size = kernel_mmr_size;
}
Ok(())
}
/// Return a pre-built Merkle proof for the given commitment from the store.
pub fn get_merkle_proof(
&self,
@ -580,9 +644,7 @@ impl Chain {
header: &BlockHeader,
txhashset: &txhashset::TxHashSet,
) -> Result<(), Error> {
debug!(
"chain: validate_kernel_history: rewinding and validating kernel history (readonly)"
);
debug!("validate_kernel_history: rewinding and validating kernel history (readonly)");
let mut count = 0;
let mut current = header.clone();
@ -590,14 +652,14 @@ impl Chain {
while current.height > 0 {
view.rewind(&current)?;
view.validate_root()?;
current = view.batch().get_block_header(&current.previous)?;
current = view.batch().get_previous_header(&current)?;
count += 1;
}
Ok(())
})?;
debug!(
"chain: validate_kernel_history: validated kernel root on {} headers",
"validate_kernel_history: validated kernel root on {} headers",
count,
);
@ -667,13 +729,13 @@ impl Chain {
// The txhashset.zip contains the output, rangeproof and kernel MMRs.
// We must rebuild the header MMR ourselves based on the headers in our db.
self.rebuild_header_mmr(&Tip::from_block(&header), &mut txhashset)?;
self.rebuild_header_mmr(&Tip::from_header(&header), &mut txhashset)?;
// Validate the full kernel history (kernel MMR root for every block header).
self.validate_kernel_history(&header, &txhashset)?;
// all good, prepare a new batch and update all the required records
debug!("chain: txhashset_write: rewinding a 2nd time (writeable)");
debug!("txhashset_write: rewinding a 2nd time (writeable)");
let mut batch = self.store.batch()?;
@ -697,13 +759,13 @@ impl Chain {
Ok(())
})?;
debug!("chain: txhashset_write: finished validating and rebuilding");
debug!("txhashset_write: finished validating and rebuilding");
status.on_save();
// Save the new head to the db and rebuild the header by height index.
{
let tip = Tip::from_block(&header);
let tip = Tip::from_header(&header);
batch.save_body_head(&tip)?;
batch.save_header_height(&header)?;
batch.build_by_height_index(&header, true)?;
@ -712,7 +774,7 @@ impl Chain {
// Commit all the changes to the db.
batch.commit()?;
debug!("chain: txhashset_write: finished committing the batch (head etc.)");
debug!("txhashset_write: finished committing the batch (head etc.)");
// Replace the chain txhashset with the newly built one.
{
@ -720,7 +782,7 @@ impl Chain {
*txhashset_ref = txhashset;
}
debug!("chain: txhashset_write: replaced our txhashset with the new one");
debug!("txhashset_write: replaced our txhashset with the new one");
// Check for any orphan blocks and process them based on the new chain state.
self.check_orphans(header.height + 1);
@ -761,7 +823,7 @@ impl Chain {
let cutoff = head.height.saturating_sub(horizon);
debug!(
"chain: compact_blocks_db: head height: {}, horizon: {}, cutoff: {}",
"compact_blocks_db: head height: {}, horizon: {}, cutoff: {}",
head.height, horizon, cutoff,
);
@ -791,14 +853,14 @@ impl Chain {
if current.height <= 1 {
break;
}
match batch.get_block_header(&current.previous) {
match batch.get_previous_header(&current) {
Ok(h) => current = h,
Err(NotFoundErr(_)) => break,
Err(e) => return Err(From::from(e)),
}
}
batch.commit()?;
debug!("chain: compact_blocks_db: removed {} blocks.", count);
debug!("compact_blocks_db: removed {} blocks.", count);
Ok(())
}
@ -909,6 +971,13 @@ impl Chain {
.map_err(|e| ErrorKind::StoreErr(e, "chain get header".to_owned()).into())
}
/// Get previous block header.
pub fn get_previous_header(&self, header: &BlockHeader) -> Result<BlockHeader, Error> {
self.store
.get_previous_header(header)
.map_err(|e| ErrorKind::StoreErr(e, "chain get previous header".to_owned()).into())
}
/// Get block_sums by header hash.
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
self.store
@ -928,12 +997,16 @@ impl Chain {
&self,
output_ref: &OutputIdentifier,
) -> Result<BlockHeader, Error> {
let mut txhashset = self.txhashset.write();
let (_, pos) = txhashset.is_unspent(output_ref)?;
let pos = {
let mut txhashset = self.txhashset.write();
let (_, pos) = txhashset.is_unspent(output_ref)?;
pos
};
let mut min = 1;
let mut max = {
let h = self.head()?;
h.height
let head = self.head()?;
head.height
};
loop {
@ -1034,7 +1107,7 @@ fn setup_head(
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
{
debug!(
"chain: init: building (missing) block sums for {} @ {}",
"init: building (missing) block sums for {} @ {}",
header.height,
header.hash()
);
@ -1054,7 +1127,7 @@ fn setup_head(
}
debug!(
"chain: init: rewinding and validating before we start... {} at {}",
"init: rewinding and validating before we start... {} at {}",
header.hash(),
header.height,
);
@ -1070,27 +1143,33 @@ fn setup_head(
let prev_header = batch.get_block_header(&head.prev_block_h)?;
let _ = batch.delete_block(&header.hash());
let _ = batch.setup_height(&prev_header, &head)?;
head = Tip::from_block(&prev_header);
head = Tip::from_header(&prev_header);
batch.save_head(&head)?;
}
}
}
Err(NotFoundErr(_)) => {
// Save the genesis header with a "zero" header_root.
// We will update this later once we have the correct header_root.
batch.save_block_header(&genesis.header)?;
batch.save_block(&genesis)?;
let tip = Tip::from_block(&genesis.header);
let tip = Tip::from_header(&genesis.header);
batch.save_head(&tip)?;
batch.setup_height(&genesis.header, &tip)?;
// Apply the genesis block to our empty MMRs.
txhashset::extending(txhashset, &mut batch, |extension| {
extension.apply_block(&genesis)?;
// Initialize our header MM with the genesis header.
txhashset::header_extending(txhashset, &mut batch, |extension| {
extension.apply_header(&genesis.header)?;
Ok(())
})?;
batch.save_block_header(&genesis.header)?;
// Save the block_sums to the db for use later.
batch.save_block_sums(&genesis.hash(), &BlockSums::default())?;
info!("chain: init: saved genesis: {:?}", genesis.hash());
info!("init: saved genesis: {:?}", genesis.hash());
}
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
};

View file

@ -56,10 +56,35 @@ pub struct BlockContext<'a> {
pub orphans: Arc<OrphanBlockPool>,
}
// Check if this block is the next block *immediately*
// after our current chain head.
fn is_next_block(header: &BlockHeader, head: &Tip) -> bool {
header.previous == head.last_block_h
/// Process a block header as part of processing a full block.
/// We want to make sure the header is valid before we process the full block.
fn process_header_for_block(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
// If we do not have the previous header then treat the block for this header
// as an orphan.
if ctx.batch.get_previous_header(header).is_err() {
return Err(ErrorKind::Orphan.into());
}
txhashset::header_extending(&mut ctx.txhashset, &mut ctx.batch, |extension| {
extension.force_rollback();
// Optimize this if "next" header
rewind_and_apply_header_fork(header, extension)?;
// Check the current root is correct.
extension.validate_root(header)?;
// Apply the new header to our header extension.
extension.apply_header(header)?;
Ok(())
})?;
validate_header(header, ctx)?;
add_block_header(header, &ctx.batch)?;
update_header_head(header, ctx)?;
Ok(())
}
/// Runs the block processing pipeline, including validation and finding a
@ -70,7 +95,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
// spend resources reading the full block when its header is invalid
debug!(
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
"pipe: process_block {} at {}, in/out/kern: {}/{}/{}",
b.hash(),
b.header.height,
b.inputs().len(),
@ -96,15 +121,12 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
}
// Header specific processing.
{
validate_header(&b.header, ctx)?;
add_block_header(&b.header, ctx)?;
update_header_head(&b.header, ctx)?;
}
process_header_for_block(&b.header, ctx)?;
// Check if are processing the "next" block relative to the current chain head.
let prev_header = ctx.batch.get_previous_header(&b.header)?;
let head = ctx.batch.head()?;
if is_next_block(&b.header, &head) {
if prev_header.hash() == head.last_block_h {
// If this is the "next" block then either -
// * common case where we process blocks sequentially.
// * special case where this is the first fast sync full block
@ -123,11 +145,9 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
// Start a chain extension unit of work dependent on the success of the
// internal validation and saving operations
txhashset::extending(&mut ctx.txhashset, &mut ctx.batch, |mut extension| {
// First we rewind the txhashset extension if necessary
// to put it into a consistent state for validating the block.
// We can skip this step if the previous header is the latest header we saw.
if is_next_block(&b.header, &head) {
// No need to rewind if we are processing the next block.
let prev = extension.batch.get_previous_header(&b.header)?;
if prev.hash() == head.last_block_h {
// Not a fork so we do not need to rewind or reapply any blocks.
} else {
// Rewind and re-apply blocks on the forked chain to
// put the txhashset in the correct forked state
@ -165,14 +185,10 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
Ok(())
})?;
trace!(
"pipe: process_block: {} at {} is valid, save and append.",
b.hash(),
b.header.height,
);
// Add the newly accepted block and header to our index.
add_block(b, ctx)?;
// Add the validated block to the db.
// We do this even if we have not increased the total cumulative work
// so we can maintain multiple (in progress) forks.
add_block(b, &ctx.batch)?;
// Update the chain head if total work is increased.
let res = update_head(b, ctx)?;
@ -203,24 +219,30 @@ pub fn sync_block_headers(
};
if !all_known {
for header in headers {
validate_header(header, ctx)?;
add_block_header(header, ctx)?;
}
let first_header = headers.first().unwrap();
let prev_header = ctx.batch.get_block_header(&first_header.previous)?;
let prev_header = ctx.batch.get_previous_header(&first_header)?;
txhashset::sync_extending(&mut ctx.txhashset, &mut ctx.batch, |extension| {
// Optimize this if "next" header
extension.rewind(&prev_header)?;
for header in headers {
// Check the current root is correct.
extension.validate_root(header)?;
// Apply the header to the header MMR.
extension.apply_header(header)?;
// Save the header to the db.
add_block_header(header, &extension.batch)?;
}
Ok(())
})?;
// Validate all our headers now that we have added each "previous"
// header to the db in this batch above.
for header in headers {
validate_header(header, ctx)?;
}
}
// Update header_head (if most work) and sync_head (regardless) in all cases,
@ -329,7 +351,8 @@ fn check_known_store(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(),
// We cannot assume we can use the chain head for this
// as we may be dealing with a fork (with less work currently).
fn check_prev_store(header: &BlockHeader, batch: &mut store::Batch) -> Result<(), Error> {
match batch.block_exists(&header.previous) {
let prev = batch.get_previous_header(&header)?;
match batch.block_exists(&prev.hash()) {
Ok(true) => {
// We have the previous block in the store, so we can proceed.
Ok(())
@ -381,13 +404,14 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
// first I/O cost, better as late as possible
let prev = match ctx.batch.get_block_header(&header.previous) {
let prev = match ctx.batch.get_previous_header(&header) {
Ok(prev) => prev,
Err(grin_store::Error::NotFoundErr(_)) => return Err(ErrorKind::Orphan.into()),
Err(e) => {
return Err(
ErrorKind::StoreErr(e, format!("previous header {}", header.previous)).into(),
)
return Err(ErrorKind::StoreErr(
e,
format!("Failed to find previous header to {}", header.hash()),
).into())
}
};
@ -424,8 +448,9 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
// explicit check to ensure total_difficulty has increased by exactly
// the _network_ difficulty of the previous block
// (during testnet1 we use _block_ difficulty here)
let prev = ctx.batch.get_previous_header(&header)?;
let child_batch = ctx.batch.child()?;
let diff_iter = store::DifficultyIter::from_batch(header.previous, child_batch);
let diff_iter = store::DifficultyIter::from_batch(prev.hash(), child_batch);
let next_header_info = consensus::next_difficulty(header.height, diff_iter);
if target_difficulty != next_header_info.difficulty {
info!(
@ -439,8 +464,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
if header.pow.secondary_scaling != next_header_info.secondary_scaling {
info!(
"validate_header: header secondary scaling {} != {}",
header.pow.secondary_scaling,
next_header_info.secondary_scaling
header.pow.secondary_scaling, next_header_info.secondary_scaling
);
return Err(ErrorKind::InvalidScaling.into());
}
@ -450,7 +474,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
fn validate_block(block: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
let prev = ctx.batch.get_block_header(&block.header.previous)?;
let prev = ctx.batch.get_previous_header(&block.header)?;
block
.validate(&prev.total_kernel_offset, ctx.verifier_cache.clone())
.map_err(|e| ErrorKind::InvalidBlockProof(e))?;
@ -472,8 +496,10 @@ fn verify_coinbase_maturity(block: &Block, ext: &mut txhashset::Extension) -> Re
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
/// of the new block.
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
// TODO - this is 2 db calls, can we optimize this?
// Retrieve the block_sums for the previous block.
let block_sums = ext.batch.get_block_sums(&b.header.previous)?;
let prev = ext.batch.get_previous_header(&b.header)?;
let block_sums = ext.batch.get_block_sums(&prev.hash())?;
// Overage is based purely on the new block.
// Previous block_sums have taken all previous overage into account.
@ -509,22 +535,20 @@ fn apply_block_to_txhashset(block: &Block, ext: &mut txhashset::Extension) -> Re
}
/// Officially adds the block to our chain.
fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
// Save the block itself to the db (via the batch).
ctx.batch
/// Header must be added separately (assume this has been done previously).
fn add_block(b: &Block, batch: &store::Batch) -> Result<(), Error> {
batch
.save_block(b)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save block".to_owned()))?;
// Build the block_input_bitmap, save to the db (via the batch) and cache locally.
ctx.batch.build_and_cache_block_input_bitmap(&b)?;
Ok(())
}
/// Officially adds the block header to our header chain.
fn add_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
ctx.batch
fn add_block_header(bh: &BlockHeader, batch: &store::Batch) -> Result<(), Error> {
batch
.save_block_header(bh)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header".to_owned()).into())
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header".to_owned()))?;
Ok(())
}
/// Directly updates the head if we've just appended a new block to it or handle
@ -540,7 +564,7 @@ fn update_head(b: &Block, ctx: &BlockContext) -> Result<Option<Tip>, Error> {
.setup_height(&b.header, &head)
.map_err(|e| ErrorKind::StoreErr(e, "pipe setup height".to_owned()))?;
let tip = Tip::from_block(&b.header);
let tip = Tip::from_header(&b.header);
ctx.batch
.save_body_head(&tip)
@ -564,7 +588,7 @@ fn has_more_work(header: &BlockHeader, head: &Tip) -> bool {
/// Update the sync head so we can keep syncing from where we left off.
fn update_sync_head(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Error> {
let tip = Tip::from_block(bh);
let tip = Tip::from_header(bh);
batch
.save_sync_head(&tip)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
@ -576,7 +600,7 @@ fn update_sync_head(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Er
fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
let header_head = ctx.batch.header_head()?;
if has_more_work(&bh, &header_head) {
let tip = Tip::from_block(bh);
let tip = Tip::from_header(bh);
ctx.batch
.save_header_head(&tip)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
@ -592,6 +616,35 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
}
}
/// Rewind the header chain and reapply headers on a fork.
pub fn rewind_and_apply_header_fork(
header: &BlockHeader,
ext: &mut txhashset::HeaderExtension,
) -> Result<(), Error> {
let mut fork_hashes = vec![];
let mut current = ext.batch.get_previous_header(header)?;
while current.height > 0 && !ext.batch.is_on_current_chain(&current).is_ok() {
fork_hashes.push(current.hash());
current = ext.batch.get_previous_header(&current)?;
}
fork_hashes.reverse();
let forked_header = current;
// Rewind the txhashset state back to the block where we forked from the most work chain.
ext.rewind(&forked_header)?;
// Re-apply all headers on this fork.
for h in fork_hashes {
let header = ext
.batch
.get_block_header(&h)
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked headers")))?;
ext.apply_header(&header)?;
}
Ok(())
}
/// Utility function to handle forks. From the forked block, jump backward
/// to find to fork root. Rewind the txhashset to the root and apply all the
/// forked blocks prior to the one being processed to set the txhashset in
@ -599,36 +652,21 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
// extending a fork, first identify the block where forking occurred
// keeping the hashes of blocks along the fork
let mut current = b.header.previous;
let mut fork_hashes = vec![];
loop {
let curr_header = ext.batch.get_block_header(&current)?;
if let Ok(_) = ext.batch.is_on_current_chain(&curr_header) {
break;
} else {
fork_hashes.insert(0, (curr_header.height, curr_header.hash()));
current = curr_header.previous;
}
let mut current = ext.batch.get_previous_header(&b.header)?;
while current.height > 0 && !ext.batch.is_on_current_chain(&current).is_ok() {
fork_hashes.push(current.hash());
current = ext.batch.get_previous_header(&current)?;
}
fork_hashes.reverse();
let forked_header = ext.batch.get_block_header(&current)?;
trace!(
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
forked_header.height,
forked_header.hash(),
b.header.height,
b.header.hash()
);
let forked_header = current;
// Rewind the txhashset state back to the block where we forked from the most work chain.
ext.rewind(&forked_header)?;
trace!("rewind_and_apply_fork: blocks on fork: {:?}", fork_hashes,);
// Now re-apply all blocks on this fork.
for (_, h) in fork_hashes {
for h in fork_hashes {
let fb = ext
.batch
.get_block(&h)

View file

@ -122,12 +122,16 @@ impl ChainStore {
}
}
pub fn get_previous_header(&self, header: &BlockHeader) -> Result<BlockHeader, Error> {
self.get_block_header(&header.prev_hash)
}
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
{
let mut header_cache = self.header_cache.write();
let mut cache = self.header_cache.write();
// cache hit - return the value from the cache
if let Some(header) = header_cache.get_mut(h) {
if let Some(header) = cache.get_mut(h) {
return Ok(header.clone());
}
}
@ -141,8 +145,8 @@ impl ChainStore {
// cache miss - so adding to the cache for next time
if let Ok(header) = header {
{
let mut header_cache = self.header_cache.write();
header_cache.insert(*h, header.clone());
let mut cache = self.header_cache.write();
cache.insert(*h, header.clone());
}
Ok(header)
} else {
@ -283,11 +287,16 @@ impl<'a> Batch<'a> {
self.db.exists(&to_key(BLOCK_PREFIX, &mut h.to_vec()))
}
/// Save the block and its header, caching the header.
/// Save the block and the associated input bitmap.
/// Note: the block header is not saved to the db here, assumes this has already been done.
pub fn save_block(&self, b: &Block) -> Result<(), Error> {
self.save_block_header(&b.header)?;
// Build the "input bitmap" for this new block and cache it locally.
self.build_and_cache_block_input_bitmap(&b)?;
// Save the block itself to the db.
self.db
.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?;
Ok(())
}
@ -310,13 +319,16 @@ impl<'a> Batch<'a> {
pub fn save_block_header(&self, header: &BlockHeader) -> Result<(), Error> {
let hash = header.hash();
// Cache the header.
{
let mut header_cache = self.header_cache.write();
header_cache.insert(hash, header.clone());
}
// Store the header itself indexed by hash.
self.db
.put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut hash.to_vec())[..], header)?;
Ok(())
}
@ -349,12 +361,16 @@ impl<'a> Batch<'a> {
.delete(&to_key(COMMIT_POS_PREFIX, &mut commit.to_vec()))
}
pub fn get_previous_header(&self, header: &BlockHeader) -> Result<BlockHeader, Error> {
self.get_block_header(&header.prev_hash)
}
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
{
let mut header_cache = self.header_cache.write();
let mut cache = self.header_cache.write();
// cache hit - return the value from the cache
if let Some(header) = header_cache.get_mut(h) {
if let Some(header) = cache.get_mut(h) {
return Ok(header.clone());
}
}
@ -368,8 +384,8 @@ impl<'a> Batch<'a> {
// cache miss - so adding to the cache for next time
if let Ok(header) = header {
{
let mut header_cache = self.header_cache.write();
header_cache.insert(*h, header.clone());
let mut cache = self.header_cache.write();
cache.insert(*h, header.clone());
}
Ok(header)
} else {
@ -479,7 +495,7 @@ impl<'a> Batch<'a> {
self.save_header_height(&header)?;
if header.height > 0 {
let mut prev_header = self.get_block_header(&header.previous)?;
let mut prev_header = self.get_previous_header(&header)?;
while prev_header.height > 0 {
if !force {
if let Ok(_) = self.is_on_current_chain(&prev_header) {
@ -488,7 +504,7 @@ impl<'a> Batch<'a> {
}
self.save_header_height(&prev_header)?;
prev_header = self.get_block_header(&prev_header.previous)?;
prev_header = self.get_previous_header(&prev_header)?;
}
}
Ok(())
@ -504,7 +520,7 @@ impl<'a> Batch<'a> {
Ok(bitmap)
}
pub fn build_and_cache_block_input_bitmap(&self, block: &Block) -> Result<Bitmap, Error> {
fn build_and_cache_block_input_bitmap(&self, block: &Block) -> Result<Bitmap, Error> {
// Build the bitmap.
let bitmap = self.build_block_input_bitmap(block)?;
@ -637,10 +653,10 @@ impl<'a> Iterator for DifficultyIter<'a> {
// Otherwise we are done.
if let Some(header) = self.header.clone() {
if let Some(ref batch) = self.batch {
self.prev_header = batch.get_block_header(&header.previous).ok();
self.prev_header = batch.get_previous_header(&header).ok();
} else {
if let Some(ref store) = self.store {
self.prev_header = store.get_block_header(&header.previous).ok();
self.prev_header = store.get_previous_header(&header).ok();
} else {
self.prev_header = None;
}

View file

@ -544,9 +544,9 @@ where
let res: Result<T, Error>;
let rollback: bool;
// We want to use the current head of the header chain unless
// We want to use the current head of the most work chain unless
// we explicitly rewind the extension.
let head = batch.header_head()?;
let head = batch.head()?;
let header = batch.get_block_header(&head.last_block_h)?;
// create a child transaction so if the state is rolled back by itself, all
@ -620,13 +620,18 @@ impl<'a> HeaderExtension<'a> {
}
}
/// Force the rollback of this extension, no matter the result.
pub fn force_rollback(&mut self) {
self.rollback = true;
}
/// Apply a new header to the header MMR extension.
/// This may be either the header MMR or the sync MMR depending on the
/// extension.
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> {
self.pmmr.push(&header).map_err(&ErrorKind::TxHashSetErr)?;
pub fn apply_header(&mut self, header: &BlockHeader) -> Result<Hash, Error> {
self.pmmr.push(header).map_err(&ErrorKind::TxHashSetErr)?;
self.header = header.clone();
Ok(())
Ok(self.root())
}
/// Rewind the header extension to the specified header.
@ -676,7 +681,7 @@ impl<'a> HeaderExtension<'a> {
let mut current = self.batch.get_block_header(&head.last_block_h)?;
while current.height > 0 {
header_hashes.push(current.hash());
current = self.batch.get_block_header(&current.previous)?;
current = self.batch.get_previous_header(&current)?;
}
header_hashes.reverse();

View file

@ -14,7 +14,7 @@
//! Base types that the block chain pipeline requires.
use core::core::hash::{Hash, Hashed};
use core::core::hash::{Hash, Hashed, ZERO_HASH};
use core::core::{Block, BlockHeader};
use core::pow::Difficulty;
use core::ser;
@ -57,30 +57,32 @@ pub struct Tip {
pub height: u64,
/// Last block pushed to the fork
pub last_block_h: Hash,
/// Block previous to last
/// Previous block
pub prev_block_h: Hash,
/// Total difficulty accumulated on that fork
pub total_difficulty: Difficulty,
}
impl Tip {
/// Creates a new tip at height zero and the provided genesis hash.
pub fn new(gbh: Hash) -> Tip {
/// TODO - why do we have Tip when we could just use a block header?
/// Creates a new tip based on header.
pub fn from_header(header: &BlockHeader) -> Tip {
Tip {
height: 0,
last_block_h: gbh,
prev_block_h: gbh,
total_difficulty: Difficulty::min(),
height: header.height,
last_block_h: header.hash(),
prev_block_h: header.prev_hash,
total_difficulty: header.total_difficulty(),
}
}
}
/// Append a new block to this tip, returning a new updated tip.
pub fn from_block(bh: &BlockHeader) -> Tip {
impl Default for Tip {
fn default() -> Self {
Tip {
height: bh.height,
last_block_h: bh.hash(),
prev_block_h: bh.previous,
total_difficulty: bh.total_difficulty(),
height: 0,
last_block_h: ZERO_HASH,
prev_block_h: ZERO_HASH,
total_difficulty: Difficulty::min(),
}
}
}

View file

@ -92,7 +92,7 @@ fn data_files() {
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut b, false).unwrap();
chain.set_txhashset_roots(&mut b).unwrap();
pow::pow_size(
&mut b.header,
@ -101,7 +101,6 @@ fn data_files() {
global::min_edge_bits(),
).unwrap();
let _bhash = b.hash();
chain
.process_block(b.clone(), chain::Options::MINE)
.unwrap();
@ -118,7 +117,7 @@ fn data_files() {
fn _prepare_block(kc: &ExtKeychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
let mut b = _prepare_block_nosum(kc, prev, diff, vec![]);
chain.set_txhashset_roots(&mut b, false).unwrap();
chain.set_txhashset_roots(&mut b).unwrap();
b
}
@ -130,13 +129,13 @@ fn _prepare_block_tx(
txs: Vec<&Transaction>,
) -> Block {
let mut b = _prepare_block_nosum(kc, prev, diff, txs);
chain.set_txhashset_roots(&mut b, false).unwrap();
chain.set_txhashset_roots(&mut b).unwrap();
b
}
fn _prepare_fork_block(kc: &ExtKeychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
let mut b = _prepare_block_nosum(kc, prev, diff, vec![]);
chain.set_txhashset_roots(&mut b, true).unwrap();
chain.set_txhashset_roots_forked(&mut b, prev).unwrap();
b
}
@ -148,7 +147,7 @@ fn _prepare_fork_block_tx(
txs: Vec<&Transaction>,
) -> Block {
let mut b = _prepare_block_nosum(kc, prev, diff, txs);
chain.set_txhashset_roots(&mut b, true).unwrap();
chain.set_txhashset_roots_forked(&mut b, prev).unwrap();
b
}

View file

@ -74,7 +74,7 @@ fn mine_empty_chain() {
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut b, false).unwrap();
chain.set_txhashset_roots(&mut b).unwrap();
let edge_bits = if n == 2 {
global::min_edge_bits() + 1
@ -201,45 +201,35 @@ fn longer_fork() {
// then send back on the 1st
let genesis = pow::mine_genesis_block().unwrap();
let chain = setup(".grin4", genesis.clone());
let chain_fork = setup(".grin5", genesis);
// add blocks to both chains, 20 on the main one, only the first 5
// for the forked chain
let mut prev = chain.head_header().unwrap();
for n in 0..10 {
let b = prepare_block(&kc, &prev, &chain, 2 * n + 2);
let bh = b.header.clone();
if n < 5 {
chain_fork
.process_block(b.clone(), chain::Options::SKIP_POW)
.unwrap();
}
prev = b.header.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
prev = bh;
}
// check both chains are in the expected state
let forked_block = chain.get_header_by_height(5).unwrap();
let head = chain.head_header().unwrap();
assert_eq!(head.height, 10);
assert_eq!(head.hash(), prev.hash());
let head_fork = chain_fork.head_header().unwrap();
assert_eq!(head_fork.height, 5);
let mut prev_fork = head_fork.clone();
let mut prev = forked_block;
for n in 0..7 {
let b_fork = prepare_block(&kc, &prev_fork, &chain_fork, 2 * n + 11);
let bh_fork = b_fork.header.clone();
let b = b_fork.clone();
let b = prepare_fork_block(&kc, &prev, &chain, 2 * n + 11);
prev = b.header.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
chain_fork
.process_block(b_fork, chain::Options::SKIP_POW)
.unwrap();
prev_fork = bh_fork;
}
let new_head = prev;
// After all this the chain should have switched to the fork.
let head = chain.head_header().unwrap();
assert_eq!(head.height, 12);
assert_eq!(head.hash(), new_head.hash());
}
#[test]
@ -398,7 +388,7 @@ fn output_header_mappings() {
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut b, false).unwrap();
chain.set_txhashset_roots(&mut b).unwrap();
let edge_bits = if n == 2 {
global::min_edge_bits() + 1
@ -432,12 +422,13 @@ fn output_header_mappings() {
assert_eq!(header_for_output.height, n as u64);
}
}
fn prepare_block<K>(kc: &K, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block
where
K: Keychain,
{
let mut b = prepare_block_nosum(kc, prev, diff, vec![]);
chain.set_txhashset_roots(&mut b, false).unwrap();
chain.set_txhashset_roots(&mut b).unwrap();
b
}
@ -452,7 +443,7 @@ where
K: Keychain,
{
let mut b = prepare_block_nosum(kc, prev, diff, txs);
chain.set_txhashset_roots(&mut b, false).unwrap();
chain.set_txhashset_roots(&mut b).unwrap();
b
}
@ -461,7 +452,7 @@ where
K: Keychain,
{
let mut b = prepare_block_nosum(kc, prev, diff, vec![]);
chain.set_txhashset_roots(&mut b, true).unwrap();
chain.set_txhashset_roots_forked(&mut b, prev).unwrap();
b
}
@ -476,7 +467,7 @@ where
K: Keychain,
{
let mut b = prepare_block_nosum(kc, prev, diff, txs);
chain.set_txhashset_roots(&mut b, true).unwrap();
chain.set_txhashset_roots_forked(&mut b, prev).unwrap();
b
}

View file

@ -23,7 +23,7 @@ extern crate rand;
use std::fs;
use std::sync::Arc;
use chain::Tip;
use chain::{Error, Tip};
use core::core::hash::Hashed;
use core::core::{Block, BlockHeader};
use core::global::{self, ChainTypes};
@ -35,6 +35,18 @@ fn clean_output_dir(dir_name: &str) {
let _ = fs::remove_dir_all(dir_name);
}
fn setup_chain(genesis: &Block, chain_store: Arc<chain::store::ChainStore>) -> Result<(), Error> {
let batch = chain_store.batch()?;
batch.save_block_header(&genesis.header)?;
batch.save_block(&genesis)?;
let head = Tip::from_header(&genesis.header);
batch.save_head(&head)?;
batch.setup_height(&genesis.header, &head)?;
batch.save_block_header(&genesis.header)?;
batch.commit()?;
Ok(())
}
#[test]
fn test_various_store_indices() {
match env_logger::try_init() {
@ -48,29 +60,24 @@ fn test_various_store_indices() {
let key_id = ExtKeychainPath::new(1, 1, 0, 0, 0).to_identifier();
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
let chain_store = chain::store::ChainStore::new(db_env).unwrap();
let chain_store = Arc::new(chain::store::ChainStore::new(db_env).unwrap());
global::set_mining_mode(ChainTypes::AutomatedTesting);
let genesis = pow::mine_genesis_block().unwrap();
let reward = libtx::reward::output(&keychain, &key_id, 0, 1).unwrap();
setup_chain(&genesis, chain_store.clone()).unwrap();
let reward = libtx::reward::output(&keychain, &key_id, 0, 1).unwrap();
let block = Block::new(&genesis.header, vec![], Difficulty::min(), reward).unwrap();
let block_hash = block.hash();
{
let batch = chain_store.batch().unwrap();
batch.save_block(&genesis).unwrap();
batch
.setup_height(&genesis.header, &Tip::new(genesis.hash()))
.unwrap();
batch.commit().unwrap();
}
{
let batch = chain_store.batch().unwrap();
batch.save_block_header(&block.header).unwrap();
batch.save_block(&block).unwrap();
batch
.setup_height(&block.header, &Tip::from_block(&block.header))
.setup_height(&block.header, &Tip::from_header(&block.header))
.unwrap();
batch.commit().unwrap();
}
@ -113,7 +120,12 @@ fn test_store_header_height() {
clean_output_dir(chain_dir);
let db_env = Arc::new(store::new_env(chain_dir.to_string()));
let chain_store = chain::store::ChainStore::new(db_env).unwrap();
let chain_store = Arc::new(chain::store::ChainStore::new(db_env).unwrap());
global::set_mining_mode(ChainTypes::AutomatedTesting);
let genesis = pow::mine_genesis_block().unwrap();
setup_chain(&genesis, chain_store.clone()).unwrap();
let mut block_header = BlockHeader::default();
block_header.height = 1;

View file

@ -76,7 +76,7 @@ fn test_coinbase_maturity() {
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block, false).unwrap();
chain.set_txhashset_roots(&mut block).unwrap();
pow::pow_size(
&mut block.header,
@ -123,7 +123,7 @@ fn test_coinbase_maturity() {
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block, false).unwrap();
chain.set_txhashset_roots(&mut block).unwrap();
// Confirm the tx attempting to spend the coinbase output
// is not valid at the current block height given the current chain state.
@ -156,7 +156,7 @@ fn test_coinbase_maturity() {
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block, false).unwrap();
chain.set_txhashset_roots(&mut block).unwrap();
pow::pow_size(
&mut block.header,
@ -183,7 +183,7 @@ fn test_coinbase_maturity() {
block.header.timestamp = prev.timestamp + Duration::seconds(60);
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut block, false).unwrap();
chain.set_txhashset_roots(&mut block).unwrap();
pow::pow_size(
&mut block.header,

View file

@ -26,10 +26,7 @@ use std::sync::Arc;
use chain::store::ChainStore;
use chain::txhashset;
use chain::types::Tip;
use core::core::{Block, BlockHeader};
use core::pow::Difficulty;
use keychain::{ExtKeychain, ExtKeychainPath, Keychain};
use core::core::BlockHeader;
use util::file;
fn clean_output_dir(dir_name: &str) {

View file

@ -118,7 +118,7 @@ pub struct BlockHeader {
/// Height of this block since the genesis block (height 0)
pub height: u64,
/// Hash of the block previous to this in the chain.
pub previous: Hash,
pub prev_hash: Hash,
/// Root hash of the header MMR at the previous header.
pub prev_root: Hash,
/// Timestamp at which the block was built.
@ -147,10 +147,10 @@ fn fixed_size_of_serialized_header(_version: u16) -> usize {
size += mem::size_of::<u16>(); // version
size += mem::size_of::<u64>(); // height
size += mem::size_of::<i64>(); // timestamp
// previous, prev_root, output_root, range_proof_root, kernel_root
// prev_hash, prev_root, output_root, range_proof_root, kernel_root
size += 5 * mem::size_of::<Hash>();
size += mem::size_of::<BlindingFactor>(); // total_kernel_offset
// output_mmr_size, kernel_mmr_size
// output_mmr_size, kernel_mmr_size
size += 2 * mem::size_of::<u64>();
size += mem::size_of::<Difficulty>(); // total_difficulty
size += mem::size_of::<u32>(); // secondary_scaling
@ -177,7 +177,7 @@ impl Default for BlockHeader {
version: 1,
height: 0,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc),
previous: ZERO_HASH,
prev_hash: ZERO_HASH,
prev_root: ZERO_HASH,
output_root: ZERO_HASH,
range_proof_root: ZERO_HASH,
@ -213,7 +213,7 @@ impl Writeable for BlockHeader {
impl Readable for BlockHeader {
fn read(reader: &mut Reader) -> Result<BlockHeader, ser::Error> {
let (version, height, timestamp) = ser_multiread!(reader, read_u16, read_u64, read_i64);
let previous = Hash::read(reader)?;
let prev_hash = Hash::read(reader)?;
let prev_root = Hash::read(reader)?;
let output_root = Hash::read(reader)?;
let range_proof_root = Hash::read(reader)?;
@ -232,7 +232,7 @@ impl Readable for BlockHeader {
version,
height,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc),
previous,
prev_hash,
prev_root,
output_root,
range_proof_root,
@ -253,7 +253,7 @@ impl BlockHeader {
[write_u16, self.version],
[write_u64, self.height],
[write_i64, self.timestamp.timestamp()],
[write_fixed_bytes, &self.previous],
[write_fixed_bytes, &self.prev_hash],
[write_fixed_bytes, &self.prev_root],
[write_fixed_bytes, &self.output_root],
[write_fixed_bytes, &self.range_proof_root],
@ -494,7 +494,7 @@ impl Block {
header: BlockHeader {
height: prev.height + 1,
timestamp,
previous: prev.hash(),
prev_hash: prev.hash(),
total_kernel_offset,
pow: ProofOfWork {
total_difficulty: difficulty + prev.pow.total_difficulty,

View file

@ -16,7 +16,7 @@
use std::marker;
use core::hash::Hash;
use core::hash::{Hash, ZERO_HASH};
use core::pmmr::{bintree_postorder_height, is_leaf, peak_map_height, peaks, HashOnlyBackend};
use ser::{PMMRIndexHashable, PMMRable};
@ -58,16 +58,17 @@ where
}
}
/// Get the unpruned size of the MMR.
pub fn unpruned_size(&self) -> u64 {
self.last_pos
}
/// Is the MMR empty?
pub fn is_empty(&self) -> bool {
self.last_pos == 0
}
/// Total size of the tree, including intermediary nodes and ignoring any
/// pruning.
pub fn unpruned_size(&self) -> u64 {
self.last_pos
}
/// Rewind the MMR to the specified position.
pub fn rewind(&mut self, position: u64) -> Result<(), String> {
// Identify which actual position we should rewind to as the provided
@ -140,6 +141,9 @@ where
/// Return the overall root hash for this MMR.
pub fn root(&self) -> Hash {
if self.is_empty() {
return ZERO_HASH;
}
let mut res = None;
for peak in self.peaks().iter().rev() {
res = match res {

View file

@ -353,9 +353,9 @@ where
Ok(())
}
/// Check if this PMMR is (unpruned_size == 0).
/// Is the MMR empty?
pub fn is_empty(&self) -> bool {
self.unpruned_size() == 0
self.last_pos == 0
}
/// Total size of the tree, including intermediary nodes and ignoring any

View file

@ -27,7 +27,7 @@ use pow::{Difficulty, Proof, ProofOfWork};
pub fn genesis_dev() -> core::Block {
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
// previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(1997, 8, 4).and_hms(0, 0, 0),
pow: ProofOfWork {
nonce: global::get_genesis_nonce(),
@ -63,7 +63,7 @@ pub fn genesis_testnet1() -> core::Block {
pub fn genesis_testnet2() -> core::Block {
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
// previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 3, 26).and_hms(16, 0, 0),
pow: ProofOfWork {
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
@ -86,7 +86,7 @@ pub fn genesis_testnet2() -> core::Block {
pub fn genesis_testnet3() -> core::Block {
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
// previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 7, 8).and_hms(18, 0, 0),
pow: ProofOfWork {
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
@ -110,7 +110,7 @@ pub fn genesis_testnet3() -> core::Block {
pub fn genesis_testnet4() -> core::Block {
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
// previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 10, 17).and_hms(20, 0, 0),
pow: ProofOfWork {
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
@ -133,7 +133,7 @@ pub fn genesis_testnet4() -> core::Block {
pub fn genesis_main() -> core::Block {
core::Block::with_header(core::BlockHeader {
height: 0,
previous: core::hash::Hash([0xff; 32]),
// previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 8, 14).and_hms(0, 0, 0),
pow: ProofOfWork {
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),

View file

@ -28,6 +28,7 @@ pub mod common;
use std::sync::Arc;
use util::RwLock;
use core::core::hash::Hashed;
use core::core::verifier_cache::LruVerifierCache;
use core::core::{Block, BlockHeader, Transaction};
use core::pow::Difficulty;
@ -55,20 +56,25 @@ fn test_transaction_pool_block_building() {
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let fee = txs.iter().map(|x| x.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fee, height).unwrap();
let block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
let mut block = Block::new(&prev_header, txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = prev_header.hash();
chain.update_db_for_block(&block);
block.header
block
};
let header = add_block(BlockHeader::default(), vec![], &mut chain);
let block = add_block(BlockHeader::default(), vec![], &mut chain);
let header = block.header;
// Now create tx to spend that first coinbase (now matured).
// Provides us with some useful outputs to test with.
let initial_tx = test_transaction_spending_coinbase(&keychain, &header, vec![10, 20, 30, 40]);
// Mine that initial tx so we can spend it with multiple txs
let header = add_block(header, vec![initial_tx], &mut chain);
let block = add_block(header, vec![initial_tx], &mut chain);
let header = block.header;
// Initialize a new pool with our chain adapter.
let pool = RwLock::new(test_setup(Arc::new(chain.clone()), verifier_cache));
@ -112,14 +118,7 @@ fn test_transaction_pool_block_building() {
// children should have been aggregated into parents
assert_eq!(txs.len(), 3);
let block = {
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let fees = txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
Block::new(&header, txs, Difficulty::min(), reward)
}.unwrap();
chain.update_db_for_block(&block);
let block = add_block(header, txs, &mut chain);
// Now reconcile the transaction pool with the new block
// and check the resulting contents of the pool are what we expect.

View file

@ -31,6 +31,7 @@ use util::RwLock;
use core::core::{Block, BlockHeader};
use common::*;
use core::core::hash::Hashed;
use core::core::verifier_cache::LruVerifierCache;
use core::pow::Difficulty;
use keychain::{ExtKeychain, Keychain};
@ -53,7 +54,11 @@ fn test_transaction_pool_block_reconciliation() {
let height = 1;
let key_id = ExtKeychain::derive_key_id(1, height as u32, 0, 0, 0);
let reward = libtx::reward::output(&keychain, &key_id, 0, height).unwrap();
let block = Block::new(&BlockHeader::default(), vec![], Difficulty::min(), reward).unwrap();
let genesis = BlockHeader::default();
let mut block = Block::new(&genesis, vec![], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = genesis.hash();
chain.update_db_for_block(&block);
@ -68,7 +73,10 @@ fn test_transaction_pool_block_reconciliation() {
let key_id = ExtKeychain::derive_key_id(1, 2, 0, 0, 0);
let fees = initial_tx.fee();
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
let block = Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
let mut block = Block::new(&header, vec![initial_tx], Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
@ -158,7 +166,10 @@ fn test_transaction_pool_block_reconciliation() {
let key_id = ExtKeychain::derive_key_id(1, 3, 0, 0, 0);
let fees = block_txs.iter().map(|tx| tx.fee()).sum();
let reward = libtx::reward::output(&keychain, &key_id, fees, 0).unwrap();
let block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
let mut block = Block::new(&header, block_txs, Difficulty::min(), reward).unwrap();
// Set the prev_root to the prev hash for testing purposes (no MMR to obtain a root from).
block.header.prev_root = header.hash();
chain.update_db_for_block(&block);
block

View file

@ -66,13 +66,14 @@ impl ChainAdapter {
pub fn update_db_for_block(&self, block: &Block) {
let header = &block.header;
let tip = Tip::from_header(header);
let batch = self.store.batch().unwrap();
let tip = Tip::from_block(&header);
batch.save_block_header(&header).unwrap();
batch.save_block_header(header).unwrap();
batch.save_head(&tip).unwrap();
// Retrieve previous block_sums from the db.
let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&header.previous) {
let prev_sums = if let Ok(prev_sums) = batch.get_block_sums(&tip.prev_block_h) {
prev_sums
} else {
BlockSums::default()

View file

@ -159,7 +159,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
}
};
if let Ok(prev) = self.chain().get_block_header(&cb.header.previous) {
if let Ok(prev) = self.chain().get_previous_header(&cb.header) {
if block
.validate(&prev.total_kernel_offset, self.verifier_cache.clone())
.is_ok()
@ -441,8 +441,9 @@ impl NetToChainAdapter {
}
}
let prev_hash = b.header.previous;
let bhash = b.hash();
let previous = self.chain().get_previous_header(&b.header);
match self.chain().process_block(b, self.chain_opts()) {
Ok(_) => {
self.validate_chain(bhash);
@ -465,10 +466,14 @@ impl NetToChainAdapter {
Err(e) => {
match e.kind() {
chain::ErrorKind::Orphan => {
// make sure we did not miss the parent block
if !self.chain().is_orphan(&prev_hash) && !self.sync_state.is_syncing() {
debug!("adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
self.request_block_by_hash(prev_hash, &addr)
if let Ok(previous) = previous {
// make sure we did not miss the parent block
if !self.chain().is_orphan(&previous.hash())
&& !self.sync_state.is_syncing()
{
debug!("adapter: process_block: received an orphan block, checking the parent: {:}", previous.hash());
self.request_block_by_hash(previous.hash(), &addr)
}
}
true
}

View file

@ -105,7 +105,7 @@ impl BodySync {
hashes.push(header.hash());
oldest_height = header.height;
current = self.chain.get_block_header(&header.previous);
current = self.chain.get_previous_header(&header);
}
}
//+ remove me after #1880 root cause found

View file

@ -67,7 +67,7 @@ impl HeaderSync {
// Reset sync_head to the same as current header_head.
self.chain.reset_sync_head(&header_head).unwrap();
// Rebuild the sync MMR to match our updates sync_head.
// Rebuild the sync MMR to match our updated sync_head.
self.chain.rebuild_sync_mmr(&header_head).unwrap();
self.history_locator.clear();
@ -173,7 +173,7 @@ impl HeaderSync {
locator.push((header.height, header.hash()));
break;
}
header_cursor = self.chain.get_block_header(&header.previous);
header_cursor = self.chain.get_previous_header(&header);
}
}
}
@ -260,9 +260,7 @@ mod test {
// just 1 locator in history
let heights: Vec<u64> = vec![64, 62, 58, 50, 34, 2, 0];
let history_locator: Vec<(u64, Hash)> = vec![
(0, zh.clone()),
];
let history_locator: Vec<(u64, Hash)> = vec![(0, zh.clone())];
let mut locator: Vec<(u64, Hash)> = vec![];
for h in heights {
if let Some(l) = close_enough(&history_locator, h) {
@ -288,7 +286,7 @@ mod test {
// more realistic test with 11 history
let heights: Vec<u64> = vec![
2554, 2552, 2548, 2540, 2524, 2492, 2428, 2300, 2044, 1532, 508, 0
2554, 2552, 2548, 2540, 2524, 2492, 2428, 2300, 2044, 1532, 508, 0,
];
let history_locator: Vec<(u64, Hash)> = vec![
(2043, zh.clone()),
@ -310,15 +308,14 @@ mod test {
}
}
locator.dedup_by(|a, b| a.0 == b.0);
assert_eq!(locator, vec![
(2043, zh.clone()),
(1532, zh.clone()),
(0, zh.clone()),
]);
assert_eq!(
locator,
vec![(2043, zh.clone()), (1532, zh.clone()), (0, zh.clone()),]
);
// more realistic test with 12 history
let heights: Vec<u64> = vec![
4598, 4596, 4592, 4584, 4568, 4536, 4472, 4344, 4088, 3576, 2552, 504, 0
4598, 4596, 4592, 4584, 4568, 4536, 4472, 4344, 4088, 3576, 2552, 504, 0,
];
let history_locator: Vec<(u64, Hash)> = vec![
(4087, zh.clone()),
@ -341,11 +338,14 @@ mod test {
}
}
locator.dedup_by(|a, b| a.0 == b.0);
assert_eq!(locator, vec![
(4087, zh.clone()),
(3576, zh.clone()),
(3065, zh.clone()),
(0, zh.clone()),
]);
assert_eq!(
locator,
vec![
(4087, zh.clone()),
(3576, zh.clone()),
(3065, zh.clone()),
(0, zh.clone()),
]
);
}
}

View file

@ -151,10 +151,7 @@ impl StateSync {
.get_block_header(&header_head.prev_block_h)
.unwrap();
for _ in 0..(horizon - horizon / 10) {
txhashset_head = self
.chain
.get_block_header(&txhashset_head.previous)
.unwrap();
txhashset_head = self.chain.get_previous_header(&txhashset_head).unwrap();
}
let bhash = txhashset_head.hash();
debug!(

View file

@ -139,7 +139,7 @@ fn build_block(
);
// Now set txhashset roots and sizes on the header of the block being built.
let roots_result = chain.set_txhashset_roots(&mut b, false);
let roots_result = chain.set_txhashset_roots(&mut b);
match roots_result {
Ok(_) => Ok((b, block_fees)),

View file

@ -157,9 +157,10 @@ impl Miner {
// we found a solution, push our block through the chain processing pipeline
if sol {
info!(
"(Server ID: {}) Found valid proof of work, adding block {}.",
"(Server ID: {}) Found valid proof of work, adding block {} (prev_root {}).",
self.debug_output_id,
b.hash()
b.hash(),
b.header.prev_root,
);
let res = self.chain.process_block(b, chain::Options::MINE);
if let Err(e) = res {

View file

@ -99,7 +99,7 @@ pub fn add_block_with_reward(chain: &Chain, txs: Vec<&Transaction>, reward: CbDa
).unwrap();
b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.pow.secondary_scaling = next_header_info.secondary_scaling;
chain.set_txhashset_roots(&mut b, false).unwrap();
chain.set_txhashset_roots(&mut b).unwrap();
pow::pow_size(
&mut b.header,
next_header_info.difficulty,