Refactor is_orphan and is_fork during block processing (#2015)

* refactor check_prev logic in process_block

* rustfmt
This commit is contained in:
Antioch Peverell 2018-11-25 10:22:19 +00:00 committed by GitHub
parent af595fa0fc
commit 1ea82d9abe
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -57,35 +57,19 @@ pub struct BlockContext<'a> {
} }
/// Process a block header as part of processing a full block. /// Process a block header as part of processing a full block.
/// We want to make sure the header is valid before we process the full block. /// We want to be sure the header is valid before processing the full block.
fn process_header_for_block(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> { fn process_header_for_block(
let head = ctx.batch.head()?; header: &BlockHeader,
is_fork: bool,
// If we do not have the previous header then treat the block for this header ctx: &mut BlockContext,
// as an orphan. ) -> Result<(), Error> {
if ctx.batch.get_previous_header(header).is_err() {
return Err(ErrorKind::Orphan.into());
}
txhashset::header_extending(&mut ctx.txhashset, &mut ctx.batch, |extension| { txhashset::header_extending(&mut ctx.txhashset, &mut ctx.batch, |extension| {
extension.force_rollback(); extension.force_rollback();
if is_fork {
let prev = extension.batch.get_previous_header(header)?;
if prev.hash() == head.last_block_h {
// Not a fork so we do not need to rewind or reapply any headers.
} else {
// Rewind and re-apply headers on the forked chain to
// put the header extension in the correct forked state
// (immediately prior to this new header).
rewind_and_apply_header_fork(header, extension)?; rewind_and_apply_header_fork(header, extension)?;
} }
// Check the current root is correct.
extension.validate_root(header)?; extension.validate_root(header)?;
// Apply the new header to our header extension.
extension.apply_header(header)?; extension.apply_header(header)?;
Ok(()) Ok(())
})?; })?;
@ -96,6 +80,16 @@ fn process_header_for_block(header: &BlockHeader, ctx: &mut BlockContext) -> Res
Ok(()) Ok(())
} }
// Check if we already know about this block for various reasons
// from cheapest to most expensive (delay hitting the db until last).
fn check_known(block: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
check_known_head(&block.header, ctx)?;
check_known_cache(&block.header, ctx)?;
check_known_orphans(&block.header, ctx)?;
check_known_store(&block.header, ctx)?;
Ok(())
}
/// Runs the block processing pipeline, including validation and finding a /// Runs the block processing pipeline, including validation and finding a
/// place for the new block in the chain. /// place for the new block in the chain.
/// Returns new head if chain head updated. /// Returns new head if chain head updated.
@ -112,41 +106,29 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
b.kernels().len(), b.kernels().len(),
); );
// Fast in-memory checks to avoid re-processing a block we recently processed. check_known(b, ctx)?;
{
// Check if we have recently processed this block (via ctx chain head).
check_known_head(&b.header, ctx)?;
// Check if we have recently processed this block (via block_hashes_cache). // Delay hitting the db for current chain head until we know
check_known_cache(&b.header, ctx)?; // this block is not already known.
// Check if this block is already know due it being in the current set of orphan blocks.
check_known_orphans(&b.header, ctx)?;
// Check we have *this* block in the store.
// Stop if we have processed this block previously (it is in the store).
// This is more expensive than the earlier check_known() as we hit the store.
check_known_store(&b.header, ctx)?;
}
// Header specific processing.
process_header_for_block(&b.header, ctx)?;
// Check if are processing the "next" block relative to the current chain head.
let prev_header = ctx.batch.get_previous_header(&b.header)?;
let head = ctx.batch.head()?; let head = ctx.batch.head()?;
if prev_header.hash() == head.last_block_h { let is_next = b.header.prev_hash == head.last_block_h;
// If this is the "next" block then either -
// * common case where we process blocks sequentially. let prev = prev_header_store(&b.header, &mut ctx.batch)?;
// * special case where this is the first fast sync full block
// Either way we can proceed (and we know the block is new and unprocessed). // Block is an orphan if we do not know about the previous full block.
} else { // Skip this check if we have just processed the previous block
// At this point it looks like this is a new block that we have not yet processed. // or the full txhashset state (fast sync) at the previous block height.
// Check we have the *previous* block in the store. if !is_next && !ctx.batch.block_exists(&prev.hash())? {
// If we do not then treat this block as an orphan. return Err(ErrorKind::Orphan.into());
check_prev_store(&b.header, &mut ctx.batch)?;
} }
// This is a fork in the context of both header and block processing
// if this block does not immediately follow the chain head.
let is_fork = !is_next;
// Check the header is valid before we proceed with the full block.
process_header_for_block(&b.header, is_fork, ctx)?;
// Validate the block itself, make sure it is internally consistent. // Validate the block itself, make sure it is internally consistent.
// Use the verifier_cache for verifying rangeproofs and kernel signatures. // Use the verifier_cache for verifying rangeproofs and kernel signatures.
validate_block(b, ctx)?; validate_block(b, ctx)?;
@ -154,13 +136,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
// Start a chain extension unit of work dependent on the success of the // Start a chain extension unit of work dependent on the success of the
// internal validation and saving operations // internal validation and saving operations
txhashset::extending(&mut ctx.txhashset, &mut ctx.batch, |mut extension| { txhashset::extending(&mut ctx.txhashset, &mut ctx.batch, |mut extension| {
let prev = extension.batch.get_previous_header(&b.header)?; if is_fork {
if prev.hash() == head.last_block_h {
// Not a fork so we do not need to rewind or reapply any blocks.
} else {
// Rewind and re-apply blocks on the forked chain to
// put the txhashset in the correct forked state
// (immediately prior to this new block).
rewind_and_apply_fork(b, extension)?; rewind_and_apply_fork(b, extension)?;
} }
@ -359,25 +335,14 @@ fn check_known_store(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(),
} }
} }
// Check we have the *previous* block in the store. // Find the previous header from the store.
// Note: not just the header but the full block itself. // Return an Orphan error if we cannot find the previous header.
// We cannot assume we can use the chain head for this fn prev_header_store(header: &BlockHeader, batch: &mut store::Batch) -> Result<BlockHeader, Error> {
// as we may be dealing with a fork (with less work currently). let prev = batch.get_previous_header(&header).map_err(|e| match e {
fn check_prev_store(header: &BlockHeader, batch: &mut store::Batch) -> Result<(), Error> { grin_store::Error::NotFoundErr(_) => ErrorKind::Orphan,
let prev = batch.get_previous_header(&header)?; _ => ErrorKind::StoreErr(e, "check prev header".into()),
match batch.block_exists(&prev.hash()) { })?;
Ok(true) => { Ok(prev)
// We have the previous block in the store, so we can proceed.
Ok(())
}
Ok(false) => {
// We do not have the previous block in the store.
// We have not yet processed the previous block so
// this block is an orphan (for now).
Err(ErrorKind::Orphan.into())
}
Err(e) => Err(ErrorKind::StoreErr(e, "pipe get previous".to_owned()).into()),
}
} }
/// First level of block validation that only needs to act on the block header /// First level of block validation that only needs to act on the block header
@ -416,17 +381,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
} }
} }
// first I/O cost, better as late as possible // First I/O cost, delayed as late as possible.
let prev = match ctx.batch.get_previous_header(&header) { let prev = prev_header_store(header, &mut ctx.batch)?;
Ok(prev) => prev,
Err(grin_store::Error::NotFoundErr(_)) => return Err(ErrorKind::Orphan.into()),
Err(e) => {
return Err(ErrorKind::StoreErr(
e,
format!("Failed to find previous header to {}", header.hash()),
).into())
}
};
// make sure this header has a height exactly one higher than the previous // make sure this header has a height exactly one higher than the previous
// header // header
@ -461,7 +417,6 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
// explicit check to ensure total_difficulty has increased by exactly // explicit check to ensure total_difficulty has increased by exactly
// the _network_ difficulty of the previous block // the _network_ difficulty of the previous block
// (during testnet1 we use _block_ difficulty here) // (during testnet1 we use _block_ difficulty here)
let prev = ctx.batch.get_previous_header(&header)?;
let child_batch = ctx.batch.child()?; let child_batch = ctx.batch.child()?;
let diff_iter = store::DifficultyIter::from_batch(prev.hash(), child_batch); let diff_iter = store::DifficultyIter::from_batch(prev.hash(), child_batch);
let next_header_info = consensus::next_difficulty(header.height, diff_iter); let next_header_info = consensus::next_difficulty(header.height, diff_iter);