grin/chain/src/pipe.rs

516 lines
15 KiB
Rust
Raw Normal View History

// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
2016-10-21 03:06:12 +03:00
//! Implementation of the chain block acceptance (or refusal) pipeline.
use std::sync::{Arc, RwLock};
use time;
2016-10-21 03:06:12 +03:00
use core::consensus;
use core::core::hash::{Hash, Hashed};
use core::core::{Block, BlockHeader};
use core::core::target::Difficulty;
use grin_store;
use types::*;
2016-10-21 03:06:12 +03:00
use store;
use txhashset;
use core::global;
use util::LOGGER;
2016-10-21 03:06:12 +03:00
/// Contextual information required to process a new block and either reject or
/// accept it.
pub struct BlockContext {
/// The options
pub opts: Options,
/// The store
pub store: Arc<ChainStore>,
/// The head
pub head: Tip,
/// The POW verification function
pub pow_verifier: fn(&BlockHeader, u8) -> bool,
/// MMR sum tree states
pub txhashset: Arc<RwLock<txhashset::TxHashSet>>,
2016-10-21 03:06:12 +03:00
}
/// Runs the block processing pipeline, including validation and finding a
/// place for the new block in the chain. Returns the new
/// chain head if updated.
pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
2016-10-21 03:06:12 +03:00
// TODO should just take a promise for a block with a full header so we don't
// spend resources reading the full block when its header is invalid
2016-10-21 03:06:12 +03:00
debug!(
LOGGER,
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
2017-07-28 00:13:34 +03:00
b.hash(),
b.header.height,
b.inputs.len(),
b.outputs.len(),
b.kernels.len(),
2017-07-28 00:13:34 +03:00
);
check_known(b.hash(), &mut ctx)?;
validate_header(&b.header, &mut ctx)?;
2018-03-04 03:19:54 +03:00
// valid header, now check we actually have the previous block in the store
// not just the header but the block itself
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
// short circuit the test first both for performance (in-mem vs db access)
// but also for the specific case of the first fast sync full block
if b.header.previous != ctx.head.last_block_h {
2018-03-04 03:19:54 +03:00
// we cannot assume we can use the chain head for this as we may be dealing
// with a fork we cannot use heights here as the fork may have jumped in
// height
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
match ctx.store.block_exists(&b.header.previous) {
2018-03-04 03:19:54 +03:00
Ok(true) => {}
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
Ok(false) => {
return Err(Error::Orphan);
2018-03-04 03:19:54 +03:00
}
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
Err(e) => {
return Err(Error::StoreErr(e, "pipe get previous".to_owned()));
}
}
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
}
2018-03-04 03:19:54 +03:00
// valid header and we have a previous block, time to take the lock on the sum
// trees
let local_txhashset = ctx.txhashset.clone();
let mut txhashset = local_txhashset.write().unwrap();
// update head now that we're in the lock
ctx.head = ctx.store
.head()
.map_err(|e| Error::StoreErr(e, "pipe reload head".to_owned()))?;
// start a chain extension unit of work dependent on the success of the
// internal validation and saving operations
let result = txhashset::extending(&mut txhashset, |mut extension| {
validate_block(b, &mut ctx, &mut extension)?;
trace!(
LOGGER,
"pipe: process_block: {} at {} is valid, save and append.",
b.hash(),
b.header.height,
);
add_block(b, &mut ctx)?;
let h = update_head(b, &mut ctx)?;
if h.is_none() {
extension.force_rollback();
}
Ok(h)
});
result
2016-10-21 03:06:12 +03:00
}
/// Process the block header.
/// This is only ever used during sync and uses a context based on sync_head.
pub fn sync_block_header(
bh: &BlockHeader,
mut sync_ctx: BlockContext,
mut header_ctx: BlockContext,
) -> Result<Option<Tip>, Error> {
2018-03-04 03:19:54 +03:00
debug!(
LOGGER,
"pipe: sync_block_header: {} at {}",
bh.hash(),
bh.height
);
validate_header(&bh, &mut sync_ctx)?;
add_block_header(bh, &mut sync_ctx)?;
2018-03-04 03:19:54 +03:00
// now update the header_head (if new header with most work) and the sync_head
// (always)
update_header_head(bh, &mut header_ctx)?;
update_sync_head(bh, &mut sync_ctx)
}
/// Process block header as part of "header first" block propagation.
/// We validate the header but we do not store it or update header head based on this.
/// We will update these once we get the block back after requesting it.
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<(), Error> {
2018-03-04 03:19:54 +03:00
debug!(
LOGGER,
"pipe: process_block_header at {} [{}]",
bh.height,
bh.hash()
); // keep this
check_header_known(bh.hash(), &mut ctx)?;
validate_header(&bh, &mut ctx)
}
/// Quick in-memory check to fast-reject any block header we've already handled
/// recently. Keeps duplicates from the network in check.
/// ctx here is specific to the header_head (tip of the header chain)
fn check_header_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
// TODO ring buffer of the last few blocks that came through here
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
return Err(Error::Unfit("already known".to_string()));
}
if let Ok(h) = ctx.store.get_block_header(&bh) {
// there is a window where a block header can be saved but the chain head not
// updated yet, we plug that window here by re-accepting the block
if h.total_difficulty <= ctx.head.total_difficulty {
return Err(Error::Unfit("already in store".to_string()));
}
}
Ok(())
}
/// Quick in-memory check to fast-reject any block we've already handled
/// recently. Keeps duplicates from the network in check.
fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
// TODO ring buffer of the last few blocks that came through here
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
return Err(Error::Unfit("already known".to_string()));
}
if let Ok(b) = ctx.store.get_block(&bh) {
// there is a window where a block can be saved but the chain head not
// updated yet, we plug that window here by re-accepting the block
if b.header.total_difficulty <= ctx.head.total_difficulty {
return Err(Error::Unfit("already in store".to_string()));
}
}
Ok(())
}
2016-10-21 03:06:12 +03:00
/// First level of block validation that only needs to act on the block header
2016-10-21 03:06:12 +03:00
/// to make it as cheap as possible. The different validations are also
/// arranged by order of cost to have as little DoS surface as possible.
/// TODO require only the block header (with length information)
fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
2017-10-10 03:08:17 +03:00
// check version, enforces scheduled hard fork
if !consensus::valid_header_version(header.height, header.version) {
error!(
LOGGER,
2018-03-04 03:19:54 +03:00
"Invalid block header version received ({}), maybe update Grin?", header.version
);
2017-10-10 03:08:17 +03:00
return Err(Error::InvalidBlockVersion(header.version));
}
// TODO: remove CI check from here somehow
if header.timestamp
> time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
&& !global::is_automated_testing_mode()
{
2017-10-10 03:08:17 +03:00
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
// TODO add warning in p2p code if local time is too different from peers
2017-10-10 03:08:17 +03:00
return Err(Error::InvalidBlockTime);
}
if !ctx.opts.contains(Options::SKIP_POW) {
let n = global::sizeshift();
if !(ctx.pow_verifier)(header, n) {
2018-03-04 03:19:54 +03:00
error!(
LOGGER,
"pipe: validate_header failed for cuckoo shift size {}", n
);
2017-10-10 03:08:17 +03:00
return Err(Error::InvalidPow);
}
if header.height % 500 == 0 {
2018-03-04 03:19:54 +03:00
debug!(
LOGGER,
"Validating header validated, using cuckoo shift size {}", n
);
}
2017-10-10 03:08:17 +03:00
}
// first I/O cost, better as late as possible
let prev = match ctx.store.get_block_header(&header.previous) {
Ok(prev) => Ok(prev),
Err(grin_store::Error::NotFoundErr) => Err(Error::Orphan),
2018-03-04 03:19:54 +03:00
Err(e) => Err(Error::StoreErr(
e,
format!("previous header {}", header.previous),
)),
}?;
// make sure this header has a height exactly one higher than the previous
// header
if header.height != prev.height + 1 {
return Err(Error::InvalidBlockHeight);
}
// TODO - get rid of the automated testing mode check here somehow
2017-09-29 21:44:25 +03:00
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
// prevent time warp attacks and some timestamp manipulations by forcing strict
// time progression (but not in CI mode)
return Err(Error::InvalidBlockTime);
}
// verify the proof of work and related parameters
// at this point we have a previous block header
// we know the height increased by one
// so now we can check the total_difficulty increase is also valid
// check the pow hash shows a difficulty at least as large
// as the target difficulty
if !ctx.opts.contains(Options::SKIP_POW) {
if header.total_difficulty.clone() <= prev.total_difficulty.clone() {
return Err(Error::DifficultyTooLow);
}
let target_difficulty = header.total_difficulty.clone() - prev.total_difficulty.clone();
if header.pow.clone().to_difficulty() < target_difficulty {
return Err(Error::DifficultyTooLow);
}
// explicit check to ensure we are not below the minimum difficulty
// we will also check difficulty based on next_difficulty later on
if target_difficulty < Difficulty::one() {
return Err(Error::DifficultyTooLow);
}
// explicit check to ensure total_difficulty has increased by exactly
// the _network_ difficulty of the previous block
// (during testnet1 we use _block_ difficulty here)
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
let network_difficulty =
consensus::next_difficulty(diff_iter).map_err(|e| Error::Other(e.to_string()))?;
if target_difficulty != network_difficulty.clone() {
error!(
LOGGER,
"validate_header: BANNABLE OFFENCE: header cumulative difficulty {} != {}",
target_difficulty.into_num(),
prev.total_difficulty.into_num() + network_difficulty.into_num()
);
return Err(Error::WrongTotalDifficulty);
}
2016-10-21 03:06:12 +03:00
}
Ok(())
2016-10-21 03:06:12 +03:00
}
/// Fully validate the block content.
fn validate_block(
b: &Block,
ctx: &mut BlockContext,
ext: &mut txhashset::Extension,
) -> Result<(), Error> {
let prev_header = ctx.store.get_block_header(&b.header.previous)?;
// main isolated block validation
// checks all commitment sums and sigs
b.validate(&prev_header).map_err(&Error::InvalidBlockProof)?;
if b.header.previous != ctx.head.last_block_h {
rewind_and_apply_fork(b, ctx.store.clone(), ext)?;
}
// apply the new block to the MMR trees and check the new root hashes
ext.apply_block(&b)?;
let roots = ext.roots();
if roots.output_root != b.header.output_root || roots.rproof_root != b.header.range_proof_root
|| roots.kernel_root != b.header.kernel_root
{
ext.dump(false);
debug!(
LOGGER,
"validate_block: output roots - {:?}, {:?}", roots.output_root, b.header.output_root,
);
debug!(
LOGGER,
"validate_block: rproof roots - {:?}, {:?}",
roots.rproof_root,
b.header.range_proof_root,
);
debug!(
LOGGER,
2018-03-04 03:19:54 +03:00
"validate_block: kernel roots - {:?}, {:?}", roots.kernel_root, b.header.kernel_root,
);
return Err(Error::InvalidRoot);
}
Ok(())
2016-10-21 03:06:12 +03:00
}
/// Officially adds the block to our chain.
fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
ctx.store
.save_block(b)
.map_err(|e| Error::StoreErr(e, "pipe save block".to_owned()))
2016-10-21 03:06:12 +03:00
}
/// Officially adds the block header to our header chain.
fn add_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
ctx.store
.save_block_header(bh)
.map_err(|e| Error::StoreErr(e, "pipe save header".to_owned()))
}
/// Directly updates the head if we've just appended a new block to it or handle
/// the situation where we've just added enough work to have a fork with more
/// work than the head.
fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
// if we made a fork with more work than the head (which should also be true
// when extending the head), update it
let tip = Tip::from_block(&b.header);
if tip.total_difficulty > ctx.head.total_difficulty {
// update the block height index
ctx.store
.setup_height(&b.header, &ctx.head)
.map_err(|e| Error::StoreErr(e, "pipe setup height".to_owned()))?;
// in sync mode, only update the "body chain", otherwise update both the
// "header chain" and "body chain", updating the header chain in sync resets
// all additional "future" headers we've received
if ctx.opts.contains(Options::SYNC) {
ctx.store
.save_body_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save body".to_owned()))?;
} else {
ctx.store
.save_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?;
}
ctx.head = tip.clone();
if b.header.height % 100 == 0 {
2018-03-04 03:19:54 +03:00
info!(
LOGGER,
"pipe: chain head reached {} @ {} [{}]",
b.header.height,
b.header.total_difficulty,
2018-03-04 03:19:54 +03:00
b.hash()
);
} else {
2018-03-04 03:19:54 +03:00
debug!(
LOGGER,
"pipe: chain head reached {} @ {} [{}]",
b.header.height,
b.header.total_difficulty,
2018-03-04 03:19:54 +03:00
b.hash()
);
}
Ok(Some(tip))
} else {
Ok(None)
}
2016-10-21 03:06:12 +03:00
}
/// Update the sync head so we can keep syncing from where we left off.
fn update_sync_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
let tip = Tip::from_block(bh);
ctx.store
.save_sync_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?;
ctx.head = tip.clone();
if bh.height % 100 == 0 {
2018-03-04 03:19:54 +03:00
info!(
LOGGER,
"sync head {} @ {} [{}]",
bh.total_difficulty,
bh.height,
bh.hash()
);
} else {
2018-03-04 03:19:54 +03:00
debug!(
LOGGER,
"sync head {} @ {} [{}]",
bh.total_difficulty,
bh.height,
bh.hash()
);
}
Ok(Some(tip))
}
fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
let tip = Tip::from_block(bh);
if tip.total_difficulty > ctx.head.total_difficulty {
ctx.store
.save_header_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
ctx.head = tip.clone();
if bh.height % 100 == 0 {
2018-03-04 03:19:54 +03:00
info!(
LOGGER,
"header head {} @ {} [{}]",
bh.total_difficulty,
bh.height,
bh.hash()
);
} else {
2018-03-04 03:19:54 +03:00
debug!(
LOGGER,
"header head {} @ {} [{}]",
bh.total_difficulty,
bh.height,
bh.hash()
);
}
Ok(Some(tip))
} else {
Ok(None)
}
}
/// Utility function to handle forks. From the forked block, jump backward
/// to find to fork root. Rewind the txhashset to the root and apply all the
/// forked blocks prior to the one being processed to set the txhashset in
/// the expected state.
pub fn rewind_and_apply_fork(
b: &Block,
store: Arc<ChainStore>,
ext: &mut txhashset::Extension,
) -> Result<(), Error> {
// extending a fork, first identify the block where forking occurred
// keeping the hashes of blocks along the fork
let mut current = b.header.previous;
let mut hashes = vec![];
loop {
let curr_header = store.get_block_header(&current)?;
if let Ok(_) = store.is_on_current_chain(&curr_header) {
break;
} else {
hashes.insert(0, (curr_header.height, curr_header.hash()));
current = curr_header.previous;
}
}
let forked_block = store.get_block_header(&current)?;
debug!(
LOGGER,
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
forked_block.height,
forked_block.hash(),
b.header.height,
b.header.hash()
);
// rewind the sum trees up to the forking block
ext.rewind(&forked_block)?;
debug!(
LOGGER,
"rewind_and_apply_fork: blocks on fork: {:?}", hashes
);
// apply all forked blocks, including this new one
for (_, h) in hashes {
2018-03-04 03:19:54 +03:00
let fb = store
.get_block(&h)
.map_err(|e| Error::StoreErr(e, format!("getting forked blocks")))?;
ext.apply_block(&fb)?;
}
Ok(())
}