2018-03-05 22:33:44 +03:00
|
|
|
|
// Copyright 2018 The Grin Developers
|
2016-10-22 21:35:48 +03:00
|
|
|
|
//
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
2016-10-21 03:06:12 +03:00
|
|
|
|
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
|
use std::sync::{Arc, RwLock};
|
2016-12-19 02:51:54 +03:00
|
|
|
|
|
2018-08-10 16:56:35 +03:00
|
|
|
|
use chrono::prelude::Utc;
|
2018-07-30 11:33:28 +03:00
|
|
|
|
use chrono::Duration;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
2018-09-28 19:27:31 +03:00
|
|
|
|
use lru_cache::LruCache;
|
|
|
|
|
|
2018-08-10 16:56:35 +03:00
|
|
|
|
use chain::OrphanBlockPool;
|
2016-11-16 04:29:42 +03:00
|
|
|
|
use core::consensus;
|
2016-12-27 02:39:31 +03:00
|
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-08-30 17:44:34 +03:00
|
|
|
|
use core::core::verifier_cache::VerifierCache;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
use core::core::Committed;
|
|
|
|
|
use core::core::{Block, BlockHeader, BlockSums};
|
2018-05-30 23:57:13 +03:00
|
|
|
|
use core::global;
|
2018-09-28 13:53:14 +03:00
|
|
|
|
use core::pow::{self, Difficulty};
|
2018-07-01 01:36:38 +03:00
|
|
|
|
use error::{Error, ErrorKind};
|
2017-12-04 22:16:57 +03:00
|
|
|
|
use grin_store;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
use store;
|
2018-03-05 22:33:44 +03:00
|
|
|
|
use txhashset;
|
2018-07-01 01:36:38 +03:00
|
|
|
|
use types::{Options, Tip};
|
2017-10-12 19:56:44 +03:00
|
|
|
|
use util::LOGGER;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
2018-07-01 01:36:38 +03:00
|
|
|
|
use failure::ResultExt;
|
|
|
|
|
|
2016-10-21 03:06:12 +03:00
|
|
|
|
/// Contextual information required to process a new block and either reject or
|
|
|
|
|
/// accept it.
|
2016-12-19 02:51:54 +03:00
|
|
|
|
pub struct BlockContext {
|
2017-08-10 03:54:10 +03:00
|
|
|
|
/// The options
|
2017-07-04 02:46:25 +03:00
|
|
|
|
pub opts: Options,
|
2017-08-10 03:54:10 +03:00
|
|
|
|
/// The head
|
2017-07-04 02:46:25 +03:00
|
|
|
|
pub head: Tip,
|
2018-09-27 13:44:50 +03:00
|
|
|
|
/// The header head
|
|
|
|
|
pub header_head: Tip,
|
|
|
|
|
/// The sync head
|
|
|
|
|
pub sync_head: Tip,
|
2017-08-22 21:23:54 +03:00
|
|
|
|
/// The POW verification function
|
2018-09-28 13:53:14 +03:00
|
|
|
|
pub pow_verifier: fn(&BlockHeader, u8) -> Result<(), pow::Error>,
|
2017-09-28 02:46:32 +03:00
|
|
|
|
/// MMR sum tree states
|
2018-03-05 22:33:44 +03:00
|
|
|
|
pub txhashset: Arc<RwLock<txhashset::TxHashSet>>,
|
2018-07-16 23:58:56 +03:00
|
|
|
|
/// Recently processed blocks to avoid double-processing
|
2018-09-28 19:27:31 +03:00
|
|
|
|
pub block_hashes_cache: Arc<RwLock<LruCache<Hash, bool>>>,
|
2018-07-24 07:29:31 +03:00
|
|
|
|
/// Recent orphan blocks to avoid double-processing
|
|
|
|
|
pub orphans: Arc<OrphanBlockPool>,
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// Check if this block is the next block *immediately*
|
|
|
|
|
// after our current chain head.
|
2018-09-05 12:51:29 +03:00
|
|
|
|
fn is_next_block(header: &BlockHeader, ctx: &mut BlockContext) -> bool {
|
|
|
|
|
header.previous == ctx.head.last_block_h
|
2018-09-03 18:55:09 +03:00
|
|
|
|
}
|
|
|
|
|
|
2016-12-21 04:35:04 +03:00
|
|
|
|
/// Runs the block processing pipeline, including validation and finding a
|
2018-06-22 11:08:06 +03:00
|
|
|
|
/// place for the new block in the chain. Returns the new chain head if
|
|
|
|
|
/// updated.
|
2018-08-30 17:44:34 +03:00
|
|
|
|
pub fn process_block(
|
|
|
|
|
b: &Block,
|
|
|
|
|
ctx: &mut BlockContext,
|
2018-09-27 11:35:25 +03:00
|
|
|
|
batch: &mut store::Batch,
|
2018-08-30 17:44:34 +03:00
|
|
|
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
|
|
|
|
) -> Result<Option<Tip>, Error> {
|
2016-10-21 03:06:12 +03:00
|
|
|
|
// TODO should just take a promise for a block with a full header so we don't
|
2017-12-19 00:18:36 +03:00
|
|
|
|
// spend resources reading the full block when its header is invalid
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
2017-11-18 23:34:05 +03:00
|
|
|
|
debug!(
|
2017-10-12 19:56:44 +03:00
|
|
|
|
LOGGER,
|
2018-01-31 23:39:55 +03:00
|
|
|
|
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
|
2017-07-28 00:13:34 +03:00
|
|
|
|
b.hash(),
|
|
|
|
|
b.header.height,
|
2018-08-16 00:14:48 +03:00
|
|
|
|
b.inputs().len(),
|
|
|
|
|
b.outputs().len(),
|
|
|
|
|
b.kernels().len(),
|
2017-07-28 00:13:34 +03:00
|
|
|
|
);
|
2018-09-03 13:09:53 +03:00
|
|
|
|
|
|
|
|
|
// First thing we do is take a write lock on the txhashset.
|
|
|
|
|
// We may receive the same block from multiple peers simultaneously.
|
|
|
|
|
// We want to process the first one fully to avoid redundant work
|
|
|
|
|
// processing the duplicates.
|
|
|
|
|
let txhashset = ctx.txhashset.clone();
|
|
|
|
|
let mut txhashset = txhashset.write().unwrap();
|
|
|
|
|
|
2018-09-05 12:51:29 +03:00
|
|
|
|
// Fast in-memory checks to avoid re-processing a block we recently processed.
|
|
|
|
|
{
|
|
|
|
|
// Check if we have recently processed this block (via ctx chain head).
|
|
|
|
|
check_known_head(&b.header, ctx)?;
|
|
|
|
|
|
|
|
|
|
// Check if we have recently processed this block (via block_hashes_cache).
|
|
|
|
|
check_known_cache(&b.header, ctx)?;
|
|
|
|
|
|
|
|
|
|
// Check if this block is already know due it being in the current set of orphan blocks.
|
|
|
|
|
check_known_orphans(&b.header, ctx)?;
|
|
|
|
|
}
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
2018-09-27 17:12:08 +03:00
|
|
|
|
// Header specific processing.
|
|
|
|
|
handle_block_header(&b.header, ctx, batch)?;
|
2017-09-12 20:24:24 +03:00
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// Check if are processing the "next" block relative to the current chain head.
|
2018-09-05 12:51:29 +03:00
|
|
|
|
if is_next_block(&b.header, ctx) {
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// If this is the "next" block then either -
|
|
|
|
|
// * common case where we process blocks sequentially.
|
|
|
|
|
// * special case where this is the first fast sync full block
|
2018-09-05 12:51:29 +03:00
|
|
|
|
// Either way we can proceed (and we know the block is new and unprocessed).
|
2018-09-03 18:55:09 +03:00
|
|
|
|
} else {
|
2018-09-05 12:51:29 +03:00
|
|
|
|
// Check we have *this* block in the store.
|
|
|
|
|
// Stop if we have processed this block previously (it is in the store).
|
|
|
|
|
// This is more expensive than the earlier check_known() as we hit the store.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
check_known_store(&b.header, ctx, batch)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
|
|
|
|
|
// Check existing MMR (via rewind) to see if this block is known to us already.
|
|
|
|
|
// This should catch old blocks before we check to see if they appear to be
|
|
|
|
|
// orphaned due to compacting/pruning on a fast-sync node.
|
|
|
|
|
// This is more expensive than check_known_store() as we rewind the txhashset.
|
|
|
|
|
// But we only incur the cost of the rewind if this is an earlier block on the same chain.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
check_known_mmr(&b.header, ctx, batch, &mut txhashset)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
|
|
|
|
|
// At this point it looks like this is a new block that we have not yet processed.
|
|
|
|
|
// Check we have the *previous* block in the store.
|
|
|
|
|
// If we do not then treat this block as an orphan.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
check_prev_store(&b.header, batch)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
|
}
|
2017-12-04 22:16:57 +03:00
|
|
|
|
|
2018-09-20 11:19:32 +03:00
|
|
|
|
// Validate the block itself, make sure it is internally consistent.
|
|
|
|
|
// Use the verifier_cache for verifying rangeproofs and kernel signatures.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
validate_block(b, batch, verifier_cache)?;
|
2018-06-22 11:08:06 +03:00
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// Start a chain extension unit of work dependent on the success of the
|
2017-12-19 00:18:36 +03:00
|
|
|
|
// internal validation and saving operations
|
2018-09-27 11:35:25 +03:00
|
|
|
|
txhashset::extending(&mut txhashset, batch, |mut extension| {
|
2018-05-30 23:57:13 +03:00
|
|
|
|
// First we rewind the txhashset extension if necessary
|
|
|
|
|
// to put it into a consistent state for validating the block.
|
|
|
|
|
// We can skip this step if the previous header is the latest header we saw.
|
2018-09-05 12:51:29 +03:00
|
|
|
|
if is_next_block(&b.header, ctx) {
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// No need to rewind if we are processing the next block.
|
|
|
|
|
} else {
|
|
|
|
|
// Rewind the re-apply blocks on the forked chain to
|
|
|
|
|
// put the txhashset in the correct forked state
|
|
|
|
|
// (immediately prior to this new block).
|
2018-09-27 11:35:25 +03:00
|
|
|
|
rewind_and_apply_fork(b, extension)?;
|
2018-05-30 23:57:13 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-05 12:51:29 +03:00
|
|
|
|
// Check any coinbase being spent have matured sufficiently.
|
|
|
|
|
// This needs to be done within the context of a potentially
|
|
|
|
|
// rewound txhashset extension to reflect chain state prior
|
|
|
|
|
// to applying the new block.
|
|
|
|
|
verify_coinbase_maturity(b, &mut extension)?;
|
|
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
|
// Validate the block against the UTXO set.
|
|
|
|
|
validate_utxo(b, &mut extension)?;
|
|
|
|
|
|
2018-09-20 11:19:32 +03:00
|
|
|
|
// Using block_sums (utxo_sum, kernel_sum) for the previous block from the db
|
|
|
|
|
// we can verify_kernel_sums across the full UTXO sum and full kernel sum
|
|
|
|
|
// accounting for inputs/outputs/kernels in this new block.
|
|
|
|
|
// We know there are no double-spends etc. if this verifies successfully.
|
|
|
|
|
verify_block_sums(b, &mut extension)?;
|
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// Apply the block to the txhashset state.
|
|
|
|
|
// Validate the txhashset roots and sizes against the block header.
|
|
|
|
|
// Block is invalid if there are any discrepencies.
|
|
|
|
|
apply_block_to_txhashset(b, &mut extension)?;
|
|
|
|
|
|
|
|
|
|
// If applying this block does not increase the work on the chain then
|
|
|
|
|
// we know we have not yet updated the chain to produce a new chain head.
|
2018-09-05 12:51:29 +03:00
|
|
|
|
if !block_has_more_work(&b.header, &ctx.head) {
|
2017-09-28 02:46:32 +03:00
|
|
|
|
extension.force_rollback();
|
|
|
|
|
}
|
2018-09-03 18:55:09 +03:00
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
|
Ok(())
|
2018-07-08 14:42:21 +03:00
|
|
|
|
})?;
|
2018-03-03 12:08:36 +03:00
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
|
trace!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"pipe: process_block: {} at {} is valid, save and append.",
|
|
|
|
|
b.hash(),
|
|
|
|
|
b.header.height,
|
|
|
|
|
);
|
2018-09-03 18:55:09 +03:00
|
|
|
|
|
|
|
|
|
// Add the newly accepted block and header to our index.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
add_block(b, batch)?;
|
2018-09-03 18:55:09 +03:00
|
|
|
|
|
|
|
|
|
// Update the chain head in the index (if necessary)
|
2018-09-27 11:35:25 +03:00
|
|
|
|
let res = update_head(b, ctx, batch)?;
|
2018-09-03 18:55:09 +03:00
|
|
|
|
|
|
|
|
|
// Return the new chain tip if we added work, or
|
|
|
|
|
// None if this block has not added work.
|
|
|
|
|
Ok(res)
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
|
/// Process the block header.
|
|
|
|
|
/// This is only ever used during sync and uses a context based on sync_head.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
pub fn sync_block_headers(
|
|
|
|
|
headers: &Vec<BlockHeader>,
|
2018-09-27 13:44:50 +03:00
|
|
|
|
ctx: &mut BlockContext,
|
2018-06-22 11:08:06 +03:00
|
|
|
|
batch: &mut store::Batch,
|
2018-09-27 11:35:25 +03:00
|
|
|
|
) -> Result<Tip, Error> {
|
|
|
|
|
if let Some(header) = headers.first() {
|
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"pipe: sync_block_headers: {} headers from {} at {}",
|
|
|
|
|
headers.len(),
|
|
|
|
|
header.hash(),
|
|
|
|
|
header.height,
|
|
|
|
|
);
|
|
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
2018-09-29 10:47:50 +03:00
|
|
|
|
let mut sync_tip = batch.get_sync_head()?;
|
2017-12-04 22:16:57 +03:00
|
|
|
|
|
2018-09-27 11:35:25 +03:00
|
|
|
|
for header in headers {
|
2018-09-27 17:12:08 +03:00
|
|
|
|
handle_block_header(header, ctx, batch)?;
|
2018-09-27 11:35:25 +03:00
|
|
|
|
|
|
|
|
|
// Update sync_head regardless of total work.
|
|
|
|
|
// We may be syncing a long fork that will *eventually* increase the work
|
|
|
|
|
// and become the "most work" chain.
|
|
|
|
|
// header_head and sync_head will diverge in this situation until we switch to
|
|
|
|
|
// a single "most work" chain.
|
2018-09-29 10:47:50 +03:00
|
|
|
|
sync_tip = update_sync_head(header, ctx, batch)?;
|
2018-09-27 11:35:25 +03:00
|
|
|
|
}
|
2018-09-29 10:47:50 +03:00
|
|
|
|
|
|
|
|
|
Ok(sync_tip)
|
2017-02-08 00:50:01 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 17:12:08 +03:00
|
|
|
|
fn handle_block_header(
|
|
|
|
|
header: &BlockHeader,
|
|
|
|
|
ctx: &mut BlockContext,
|
|
|
|
|
batch: &mut store::Batch,
|
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
validate_header(header, ctx, batch)?;
|
|
|
|
|
add_block_header(header, batch)?;
|
|
|
|
|
|
|
|
|
|
// Update header_head (but only if this header increases our total known work).
|
|
|
|
|
// i.e. Only if this header is now the head of the current "most work" chain.
|
|
|
|
|
update_header_head(header, ctx, batch)?;
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
|
/// Process block header as part of "header first" block propagation.
|
2018-05-30 23:57:13 +03:00
|
|
|
|
/// We validate the header but we do not store it or update header head based
|
|
|
|
|
/// on this. We will update these once we get the block back after requesting
|
|
|
|
|
/// it.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
pub fn process_block_header(
|
|
|
|
|
bh: &BlockHeader,
|
|
|
|
|
ctx: &mut BlockContext,
|
|
|
|
|
batch: &mut store::Batch,
|
|
|
|
|
) -> Result<(), Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
2018-03-29 18:56:46 +03:00
|
|
|
|
"pipe: process_block_header at {} [{}]",
|
|
|
|
|
bh.height,
|
|
|
|
|
bh.hash()
|
|
|
|
|
); // keep this
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
|
check_header_known(bh.hash(), ctx)?;
|
2018-09-27 11:35:25 +03:00
|
|
|
|
validate_header(&bh, ctx, batch)
|
2018-01-30 17:42:04 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Quick in-memory check to fast-reject any block header we've already handled
|
|
|
|
|
/// recently. Keeps duplicates from the network in check.
|
|
|
|
|
/// ctx here is specific to the header_head (tip of the header chain)
|
|
|
|
|
fn check_header_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
2018-09-27 13:44:50 +03:00
|
|
|
|
if bh == ctx.header_head.last_block_h || bh == ctx.header_head.prev_block_h {
|
|
|
|
|
return Err(ErrorKind::Unfit("header already known".to_string()).into());
|
2018-01-30 17:42:04 +03:00
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-05 12:51:29 +03:00
|
|
|
|
/// Quick in-memory check to fast-reject any block handled recently.
|
|
|
|
|
/// Keeps duplicates from the network in check.
|
|
|
|
|
/// Checks against the last_block_h and prev_block_h of the chain head.
|
|
|
|
|
fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
|
|
|
|
let bh = header.hash();
|
2016-12-21 04:35:04 +03:00
|
|
|
|
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
2018-09-05 12:51:29 +03:00
|
|
|
|
return Err(ErrorKind::Unfit("already known in head".to_string()).into());
|
2016-12-21 04:35:04 +03:00
|
|
|
|
}
|
2018-09-05 12:51:29 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Quick in-memory check to fast-reject any block handled recently.
|
|
|
|
|
/// Keeps duplicates from the network in check.
|
|
|
|
|
/// Checks against the cache of recently processed block hashes.
|
|
|
|
|
fn check_known_cache(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
2018-09-28 19:27:31 +03:00
|
|
|
|
let mut cache = ctx.block_hashes_cache.write().unwrap();
|
|
|
|
|
if cache.contains_key(&header.hash()) {
|
2018-07-22 22:25:56 +03:00
|
|
|
|
return Err(ErrorKind::Unfit("already known in cache".to_string()).into());
|
2017-04-28 07:59:53 +03:00
|
|
|
|
}
|
2018-09-05 12:51:29 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Check if this block is in the set of known orphans.
|
|
|
|
|
fn check_known_orphans(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
|
|
|
|
if ctx.orphans.contains(&header.hash()) {
|
|
|
|
|
Err(ErrorKind::Unfit("already known in orphans".to_string()).into())
|
|
|
|
|
} else {
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if this block is in the store already.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
fn check_known_store(
|
|
|
|
|
header: &BlockHeader,
|
|
|
|
|
ctx: &mut BlockContext,
|
|
|
|
|
batch: &mut store::Batch,
|
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
match batch.block_exists(&header.hash()) {
|
2018-09-05 12:51:29 +03:00
|
|
|
|
Ok(true) => {
|
|
|
|
|
if header.height < ctx.head.height.saturating_sub(50) {
|
|
|
|
|
// TODO - we flag this as an "abusive peer" but only in the case
|
|
|
|
|
// where we have the full block in our store.
|
|
|
|
|
// So this is not a particularly exhaustive check.
|
|
|
|
|
Err(ErrorKind::OldBlock.into())
|
|
|
|
|
} else {
|
|
|
|
|
Err(ErrorKind::Unfit("already known in store".to_string()).into())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(false) => {
|
|
|
|
|
// Not yet processed this block, we can proceed.
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
Err(e) => {
|
|
|
|
|
return Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check we have the *previous* block in the store.
|
|
|
|
|
// Note: not just the header but the full block itself.
|
|
|
|
|
// We cannot assume we can use the chain head for this
|
|
|
|
|
// as we may be dealing with a fork (with less work currently).
|
2018-09-27 11:35:25 +03:00
|
|
|
|
fn check_prev_store(header: &BlockHeader, batch: &mut store::Batch) -> Result<(), Error> {
|
|
|
|
|
match batch.block_exists(&header.previous) {
|
2018-09-05 12:51:29 +03:00
|
|
|
|
Ok(true) => {
|
|
|
|
|
// We have the previous block in the store, so we can proceed.
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
Ok(false) => {
|
|
|
|
|
// We do not have the previous block in the store.
|
|
|
|
|
// We have not yet processed the previous block so
|
|
|
|
|
// this block is an orphan (for now).
|
|
|
|
|
Err(ErrorKind::Orphan.into())
|
|
|
|
|
}
|
|
|
|
|
Err(e) => Err(ErrorKind::StoreErr(e, "pipe get previous".to_owned()).into()),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we are processing an "old" block then
|
|
|
|
|
// we can quickly check if it already exists
|
|
|
|
|
// on our current longest chain (we have already processes it).
|
|
|
|
|
// First check the header matches via current height index.
|
|
|
|
|
// Then peek directly into the MMRs at the appropriate pos.
|
|
|
|
|
// We can avoid a full rewind in this case.
|
|
|
|
|
fn check_known_mmr(
|
|
|
|
|
header: &BlockHeader,
|
|
|
|
|
ctx: &mut BlockContext,
|
2018-09-27 11:35:25 +03:00
|
|
|
|
batch: &mut store::Batch,
|
2018-09-05 12:51:29 +03:00
|
|
|
|
write_txhashset: &mut txhashset::TxHashSet,
|
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
// No point checking the MMR if this block is not earlier in the chain.
|
|
|
|
|
if header.height > ctx.head.height {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Use "header by height" index to look at current most work chain.
|
|
|
|
|
// Header is not "known if the header differs at the given height.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
let local_header = batch.get_header_by_height(header.height)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
if local_header.hash() != header.hash() {
|
|
|
|
|
return Ok(());
|
2018-07-24 07:29:31 +03:00
|
|
|
|
}
|
2018-09-05 12:51:29 +03:00
|
|
|
|
|
|
|
|
|
// Rewind the txhashset to the given block and validate
|
|
|
|
|
// roots and sizes against the header.
|
|
|
|
|
// If everything matches then this is a "known" block
|
|
|
|
|
// and we do not need to spend any more effort
|
|
|
|
|
txhashset::extending_readonly(write_txhashset, |extension| {
|
|
|
|
|
extension.rewind(header)?;
|
|
|
|
|
|
|
|
|
|
// We want to return an error here (block already known)
|
|
|
|
|
// if we *successfully validate the MMR roots and sizes.
|
2018-09-26 11:59:00 +03:00
|
|
|
|
if extension.validate_roots().is_ok() && extension.validate_sizes().is_ok() {
|
2018-09-05 12:51:29 +03:00
|
|
|
|
// TODO - determine if block is more than 50 blocks old
|
|
|
|
|
// and return specific OldBlock error.
|
|
|
|
|
// Or pull OldBlock (abusive peer) out into separate processing step.
|
|
|
|
|
|
|
|
|
|
return Err(ErrorKind::Unfit("already known on most work chain".to_string()).into());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we get here then we have *not* seen this block before
|
|
|
|
|
// and we should continue processing the block.
|
|
|
|
|
Ok(())
|
|
|
|
|
})?;
|
|
|
|
|
|
2016-12-21 04:35:04 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
2017-12-16 06:19:04 +03:00
|
|
|
|
/// First level of block validation that only needs to act on the block header
|
2016-10-21 03:06:12 +03:00
|
|
|
|
/// to make it as cheap as possible. The different validations are also
|
|
|
|
|
/// arranged by order of cost to have as little DoS surface as possible.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
fn validate_header(
|
|
|
|
|
header: &BlockHeader,
|
|
|
|
|
ctx: &mut BlockContext,
|
|
|
|
|
batch: &mut store::Batch,
|
|
|
|
|
) -> Result<(), Error> {
|
2017-10-10 03:08:17 +03:00
|
|
|
|
// check version, enforces scheduled hard fork
|
|
|
|
|
if !consensus::valid_header_version(header.height, header.version) {
|
2017-10-11 21:12:01 +03:00
|
|
|
|
error!(
|
2017-10-12 19:56:44 +03:00
|
|
|
|
LOGGER,
|
2018-03-04 03:19:54 +03:00
|
|
|
|
"Invalid block header version received ({}), maybe update Grin?", header.version
|
2017-10-11 21:12:01 +03:00
|
|
|
|
);
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
|
2017-10-10 03:08:17 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-26 14:07:04 +03:00
|
|
|
|
// TODO: remove CI check from here somehow
|
2018-08-10 16:56:35 +03:00
|
|
|
|
if header.timestamp > Utc::now() + Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
|
2018-03-26 14:07:04 +03:00
|
|
|
|
&& !global::is_automated_testing_mode()
|
2017-10-17 00:23:10 +03:00
|
|
|
|
{
|
2017-10-10 03:08:17 +03:00
|
|
|
|
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
|
2017-12-16 06:19:04 +03:00
|
|
|
|
// TODO add warning in p2p code if local time is too different from peers
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::InvalidBlockTime.into());
|
2017-10-10 03:08:17 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-02-05 22:43:54 +03:00
|
|
|
|
if !ctx.opts.contains(Options::SKIP_POW) {
|
2018-09-19 01:12:57 +03:00
|
|
|
|
let shift = header.pow.cuckoo_sizeshift();
|
|
|
|
|
// size shift can either be larger than the minimum on the primary PoW
|
|
|
|
|
// or equal to the seconday PoW size shift
|
|
|
|
|
if shift != consensus::SECOND_POW_SIZESHIFT && global::min_sizeshift() > shift {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::LowSizeshift.into());
|
2018-06-29 20:41:28 +03:00
|
|
|
|
}
|
2018-09-19 01:12:57 +03:00
|
|
|
|
// primary PoW must have a scaling factor of 1
|
|
|
|
|
if shift != consensus::SECOND_POW_SIZESHIFT && header.pow.scaling_difficulty != 1 {
|
|
|
|
|
return Err(ErrorKind::InvalidScaling.into());
|
|
|
|
|
}
|
2018-09-28 13:53:14 +03:00
|
|
|
|
if !(ctx.pow_verifier)(header, shift).is_ok() {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
error!(
|
|
|
|
|
LOGGER,
|
2018-09-19 01:12:57 +03:00
|
|
|
|
"pipe: validate_header bad cuckoo shift size {}", shift
|
2018-03-04 03:19:54 +03:00
|
|
|
|
);
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::InvalidPow.into());
|
2017-10-10 03:08:17 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// first I/O cost, better as late as possible
|
2018-09-27 11:35:25 +03:00
|
|
|
|
let prev = match batch.get_block_header(&header.previous) {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
Ok(prev) => prev,
|
|
|
|
|
Err(grin_store::Error::NotFoundErr(_)) => return Err(ErrorKind::Orphan.into()),
|
|
|
|
|
Err(e) => {
|
|
|
|
|
return Err(
|
|
|
|
|
ErrorKind::StoreErr(e, format!("previous header {}", header.previous)).into(),
|
|
|
|
|
)
|
|
|
|
|
}
|
|
|
|
|
};
|
2016-11-17 04:03:23 +03:00
|
|
|
|
|
2018-03-15 22:16:34 +03:00
|
|
|
|
// make sure this header has a height exactly one higher than the previous
|
|
|
|
|
// header
|
2017-01-10 02:16:44 +03:00
|
|
|
|
if header.height != prev.height + 1 {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::InvalidBlockHeight.into());
|
2017-01-10 02:16:44 +03:00
|
|
|
|
}
|
2017-12-16 06:19:04 +03:00
|
|
|
|
|
|
|
|
|
// TODO - get rid of the automated testing mode check here somehow
|
2017-09-29 21:44:25 +03:00
|
|
|
|
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
|
2016-11-30 05:45:39 +03:00
|
|
|
|
// prevent time warp attacks and some timestamp manipulations by forcing strict
|
2017-12-14 00:52:21 +03:00
|
|
|
|
// time progression (but not in CI mode)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::InvalidBlockTime.into());
|
2016-11-17 04:03:23 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-15 22:16:34 +03:00
|
|
|
|
// verify the proof of work and related parameters
|
|
|
|
|
// at this point we have a previous block header
|
|
|
|
|
// we know the height increased by one
|
|
|
|
|
// so now we can check the total_difficulty increase is also valid
|
|
|
|
|
// check the pow hash shows a difficulty at least as large
|
|
|
|
|
// as the target difficulty
|
2018-02-05 22:43:54 +03:00
|
|
|
|
if !ctx.opts.contains(Options::SKIP_POW) {
|
2018-09-11 01:36:57 +03:00
|
|
|
|
if header.total_difficulty() <= prev.total_difficulty() {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::DifficultyTooLow.into());
|
2018-03-15 22:16:34 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-11 01:36:57 +03:00
|
|
|
|
let target_difficulty = header.total_difficulty() - prev.total_difficulty();
|
2018-03-15 22:16:34 +03:00
|
|
|
|
|
2018-06-01 22:41:26 +03:00
|
|
|
|
if header.pow.to_difficulty() < target_difficulty {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::DifficultyTooLow.into());
|
2018-03-15 22:16:34 +03:00
|
|
|
|
}
|
2016-12-27 02:39:31 +03:00
|
|
|
|
|
2017-11-14 03:45:10 +03:00
|
|
|
|
// explicit check to ensure we are not below the minimum difficulty
|
|
|
|
|
// we will also check difficulty based on next_difficulty later on
|
2018-03-15 22:16:34 +03:00
|
|
|
|
if target_difficulty < Difficulty::one() {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::DifficultyTooLow.into());
|
2017-11-14 03:45:10 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-12-16 06:19:04 +03:00
|
|
|
|
// explicit check to ensure total_difficulty has increased by exactly
|
2018-01-12 21:35:37 +03:00
|
|
|
|
// the _network_ difficulty of the previous block
|
|
|
|
|
// (during testnet1 we use _block_ difficulty here)
|
2018-09-27 11:35:25 +03:00
|
|
|
|
let child_batch = batch.child()?;
|
|
|
|
|
let diff_iter = store::DifficultyIter::from(header.previous, child_batch);
|
2018-07-01 01:36:38 +03:00
|
|
|
|
let network_difficulty = consensus::next_difficulty(diff_iter)
|
|
|
|
|
.context(ErrorKind::Other("network difficulty".to_owned()))?;
|
2018-03-15 22:16:34 +03:00
|
|
|
|
if target_difficulty != network_difficulty.clone() {
|
2018-01-12 21:35:37 +03:00
|
|
|
|
error!(
|
|
|
|
|
LOGGER,
|
2018-09-19 01:12:57 +03:00
|
|
|
|
"validate_header: header target difficulty {} != {}",
|
2018-06-01 22:41:26 +03:00
|
|
|
|
target_difficulty.to_num(),
|
2018-07-31 19:35:57 +03:00
|
|
|
|
network_difficulty.to_num()
|
2018-01-12 21:35:37 +03:00
|
|
|
|
);
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::WrongTotalDifficulty.into());
|
2017-01-10 07:30:02 +03:00
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
2016-11-17 04:03:23 +03:00
|
|
|
|
|
2016-11-16 04:29:42 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-08-30 17:44:34 +03:00
|
|
|
|
fn validate_block(
|
2018-09-05 12:51:29 +03:00
|
|
|
|
block: &Block,
|
2018-09-27 11:35:25 +03:00
|
|
|
|
batch: &mut store::Batch,
|
2018-08-30 17:44:34 +03:00
|
|
|
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
|
|
|
|
) -> Result<(), Error> {
|
2018-09-27 11:35:25 +03:00
|
|
|
|
let prev = batch.get_block_header(&block.header.previous)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
block
|
|
|
|
|
.validate(
|
|
|
|
|
&prev.total_kernel_offset,
|
|
|
|
|
&prev.total_kernel_sum,
|
|
|
|
|
verifier_cache,
|
2018-09-28 19:27:31 +03:00
|
|
|
|
).map_err(|e| ErrorKind::InvalidBlockProof(e))?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
|
/// TODO - This can move into the utxo_view.
|
2018-09-05 12:51:29 +03:00
|
|
|
|
/// Verify the block is not attempting to spend coinbase outputs
|
|
|
|
|
/// before they have sufficiently matured.
|
|
|
|
|
/// Note: requires a txhashset extension.
|
|
|
|
|
fn verify_coinbase_maturity(block: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
|
|
|
|
ext.verify_coinbase_maturity(&block.inputs(), block.header.height)?;
|
2018-05-07 16:21:41 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-20 11:19:32 +03:00
|
|
|
|
/// Some "real magick" verification logic.
|
|
|
|
|
/// The (BlockSums, Block) tuple implements Committed...
|
|
|
|
|
/// This allows us to verify kernel sums across the full utxo and kernel sets
|
|
|
|
|
/// based on block_sums of previous block, accounting for the inputs|outputs|kernels
|
|
|
|
|
/// of the new block.
|
|
|
|
|
fn verify_block_sums(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
|
|
|
|
// Retrieve the block_sums for the previous block.
|
|
|
|
|
let block_sums = ext.batch.get_block_sums(&b.header.previous)?;
|
|
|
|
|
|
|
|
|
|
{
|
|
|
|
|
// Now that we have block_sums the total_kernel_sum on the block_header is redundant.
|
|
|
|
|
let prev = ext.batch.get_block_header(&b.header.previous)?;
|
|
|
|
|
if prev.total_kernel_sum != block_sums.kernel_sum {
|
|
|
|
|
return Err(
|
|
|
|
|
ErrorKind::Other(format!("total_kernel_sum in header does not match")).into(),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Overage is based purely on the new block.
|
|
|
|
|
// Previous block_sums have taken all previous overage into account.
|
|
|
|
|
let overage = b.header.overage();
|
|
|
|
|
|
|
|
|
|
// Offset on the other hand is the total kernel offset from the new block.
|
|
|
|
|
let offset = b.header.total_kernel_offset();
|
|
|
|
|
|
|
|
|
|
// Verify the kernel sums for the block_sums with the new block applied.
|
2018-09-28 19:27:31 +03:00
|
|
|
|
let (utxo_sum, kernel_sum) =
|
|
|
|
|
(block_sums, b as &Committed).verify_kernel_sums(overage, offset)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
|
|
// Save the new block_sums for the new block to the db via the batch.
|
|
|
|
|
ext.batch.save_block_sums(
|
|
|
|
|
&b.header.hash(),
|
|
|
|
|
&BlockSums {
|
|
|
|
|
utxo_sum,
|
|
|
|
|
kernel_sum,
|
|
|
|
|
},
|
|
|
|
|
)?;
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
/// Fully validate the block by applying it to the txhashset extension.
|
|
|
|
|
/// Check both the txhashset roots and sizes are correct after applying the block.
|
2018-09-05 12:51:29 +03:00
|
|
|
|
fn apply_block_to_txhashset(block: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
|
|
|
|
ext.apply_block(block)?;
|
2018-09-26 11:59:00 +03:00
|
|
|
|
ext.validate_roots()?;
|
|
|
|
|
ext.validate_sizes()?;
|
2016-11-16 04:29:42 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
|
/// Officially adds the block to our chain.
|
2018-08-20 22:34:12 +03:00
|
|
|
|
fn add_block(b: &Block, batch: &mut store::Batch) -> Result<(), Error> {
|
|
|
|
|
// Save the block itself to the db (via the batch).
|
2018-06-22 11:08:06 +03:00
|
|
|
|
batch
|
2017-11-01 02:32:33 +03:00
|
|
|
|
.save_block(b)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save block".to_owned()))?;
|
2018-08-20 22:34:12 +03:00
|
|
|
|
|
|
|
|
|
// Build the block_input_bitmap, save to the db (via the batch) and cache locally.
|
|
|
|
|
batch.build_and_cache_block_input_bitmap(&b)?;
|
2018-06-18 18:18:38 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-02-08 00:50:01 +03:00
|
|
|
|
/// Officially adds the block header to our header chain.
|
2018-06-22 11:08:06 +03:00
|
|
|
|
fn add_block_header(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Error> {
|
|
|
|
|
batch
|
2017-11-01 02:32:33 +03:00
|
|
|
|
.save_block_header(bh)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header".to_owned()).into())
|
2017-02-08 00:50:01 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
|
/// Directly updates the head if we've just appended a new block to it or handle
|
|
|
|
|
/// the situation where we've just added enough work to have a fork with more
|
|
|
|
|
/// work than the head.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
fn update_head(
|
|
|
|
|
b: &Block,
|
|
|
|
|
ctx: &BlockContext,
|
|
|
|
|
batch: &mut store::Batch,
|
|
|
|
|
) -> Result<Option<Tip>, Error> {
|
2017-01-10 02:16:44 +03:00
|
|
|
|
// if we made a fork with more work than the head (which should also be true
|
2017-11-30 18:27:50 +03:00
|
|
|
|
// when extending the head), update it
|
2018-09-05 12:51:29 +03:00
|
|
|
|
if block_has_more_work(&b.header, &ctx.head) {
|
2017-04-28 07:59:53 +03:00
|
|
|
|
// update the block height index
|
2018-06-22 11:08:06 +03:00
|
|
|
|
batch
|
2017-12-16 03:27:37 +03:00
|
|
|
|
.setup_height(&b.header, &ctx.head)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe setup height".to_owned()))?;
|
2017-11-01 02:32:33 +03:00
|
|
|
|
|
|
|
|
|
// in sync mode, only update the "body chain", otherwise update both the
|
2017-11-30 18:27:50 +03:00
|
|
|
|
// "header chain" and "body chain", updating the header chain in sync resets
|
|
|
|
|
// all additional "future" headers we've received
|
2018-06-22 11:08:06 +03:00
|
|
|
|
let tip = Tip::from_block(&b.header);
|
2018-02-05 22:43:54 +03:00
|
|
|
|
if ctx.opts.contains(Options::SYNC) {
|
2018-06-22 11:08:06 +03:00
|
|
|
|
batch
|
2017-11-01 02:32:33 +03:00
|
|
|
|
.save_body_head(&tip)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
|
2017-11-01 02:32:33 +03:00
|
|
|
|
} else {
|
2018-06-22 11:08:06 +03:00
|
|
|
|
batch
|
2017-11-01 02:32:33 +03:00
|
|
|
|
.save_head(&tip)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save head".to_owned()))?;
|
2017-11-01 02:32:33 +03:00
|
|
|
|
}
|
2018-06-22 11:08:06 +03:00
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"pipe: chain head {} @ {}",
|
|
|
|
|
b.hash(),
|
|
|
|
|
b.header.height
|
|
|
|
|
);
|
2017-01-10 02:16:44 +03:00
|
|
|
|
Ok(Some(tip))
|
|
|
|
|
} else {
|
|
|
|
|
Ok(None)
|
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
|
// Whether the provided block totals more work than the chain tip
|
2018-09-05 12:51:29 +03:00
|
|
|
|
fn block_has_more_work(header: &BlockHeader, tip: &Tip) -> bool {
|
|
|
|
|
let block_tip = Tip::from_block(header);
|
2018-06-22 11:08:06 +03:00
|
|
|
|
block_tip.total_difficulty > tip.total_difficulty
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
|
/// Update the sync head so we can keep syncing from where we left off.
|
2018-06-22 11:08:06 +03:00
|
|
|
|
fn update_sync_head(
|
|
|
|
|
bh: &BlockHeader,
|
|
|
|
|
ctx: &mut BlockContext,
|
|
|
|
|
batch: &mut store::Batch,
|
2018-09-27 11:35:25 +03:00
|
|
|
|
) -> Result<Tip, Error> {
|
2017-12-04 22:16:57 +03:00
|
|
|
|
let tip = Tip::from_block(bh);
|
2018-06-22 11:08:06 +03:00
|
|
|
|
batch
|
2017-12-04 22:16:57 +03:00
|
|
|
|
.save_sync_head(&tip)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
|
2018-09-27 13:44:50 +03:00
|
|
|
|
ctx.sync_head = tip.clone();
|
2018-06-22 11:08:06 +03:00
|
|
|
|
debug!(LOGGER, "sync head {} @ {}", bh.hash(), bh.height);
|
2018-09-27 11:35:25 +03:00
|
|
|
|
Ok(tip)
|
2017-12-04 22:16:57 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
|
fn update_header_head(
|
|
|
|
|
bh: &BlockHeader,
|
|
|
|
|
ctx: &mut BlockContext,
|
|
|
|
|
batch: &mut store::Batch,
|
|
|
|
|
) -> Result<Option<Tip>, Error> {
|
2017-02-08 00:50:01 +03:00
|
|
|
|
let tip = Tip::from_block(bh);
|
2018-09-27 13:44:50 +03:00
|
|
|
|
if tip.total_difficulty > ctx.header_head.total_difficulty {
|
2018-06-22 11:08:06 +03:00
|
|
|
|
batch
|
2017-11-01 02:32:33 +03:00
|
|
|
|
.save_header_head(&tip)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
|
2018-09-27 13:44:50 +03:00
|
|
|
|
ctx.header_head = tip.clone();
|
2018-06-22 11:08:06 +03:00
|
|
|
|
debug!(LOGGER, "header head {} @ {}", bh.hash(), bh.height);
|
2017-02-08 00:50:01 +03:00
|
|
|
|
Ok(Some(tip))
|
|
|
|
|
} else {
|
|
|
|
|
Ok(None)
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2018-01-08 04:23:23 +03:00
|
|
|
|
/// Utility function to handle forks. From the forked block, jump backward
|
2018-03-05 22:33:44 +03:00
|
|
|
|
/// to find to fork root. Rewind the txhashset to the root and apply all the
|
|
|
|
|
/// forked blocks prior to the one being processed to set the txhashset in
|
2018-01-08 04:23:23 +03:00
|
|
|
|
/// the expected state.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
2018-01-07 22:01:17 +03:00
|
|
|
|
// extending a fork, first identify the block where forking occurred
|
|
|
|
|
// keeping the hashes of blocks along the fork
|
|
|
|
|
let mut current = b.header.previous;
|
2018-06-18 18:18:38 +03:00
|
|
|
|
let mut fork_hashes = vec![];
|
2018-01-07 22:01:17 +03:00
|
|
|
|
loop {
|
2018-09-27 11:35:25 +03:00
|
|
|
|
let curr_header = ext.batch.get_block_header(¤t)?;
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2018-09-27 11:35:25 +03:00
|
|
|
|
if let Ok(_) = ext.batch.is_on_current_chain(&curr_header) {
|
2018-01-07 22:01:17 +03:00
|
|
|
|
break;
|
|
|
|
|
} else {
|
2018-06-18 18:18:38 +03:00
|
|
|
|
fork_hashes.insert(0, (curr_header.height, curr_header.hash()));
|
2018-01-07 22:01:17 +03:00
|
|
|
|
current = curr_header.previous;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-27 11:35:25 +03:00
|
|
|
|
let forked_header = ext.batch.get_block_header(¤t)?;
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
|
trace!(
|
2018-01-07 22:01:17 +03:00
|
|
|
|
LOGGER,
|
2018-03-27 18:11:21 +03:00
|
|
|
|
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
|
2018-06-18 18:18:38 +03:00
|
|
|
|
forked_header.height,
|
|
|
|
|
forked_header.hash(),
|
2018-03-27 18:11:21 +03:00
|
|
|
|
b.header.height,
|
|
|
|
|
b.header.hash()
|
2018-01-19 01:47:42 +03:00
|
|
|
|
);
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// Rewind the txhashset state back to the block where we forked from the most work chain.
|
2018-08-20 22:34:12 +03:00
|
|
|
|
ext.rewind(&forked_header)?;
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
|
trace!(
|
2018-03-27 18:11:21 +03:00
|
|
|
|
LOGGER,
|
2018-05-30 23:57:13 +03:00
|
|
|
|
"rewind_and_apply_fork: blocks on fork: {:?}",
|
2018-06-18 18:18:38 +03:00
|
|
|
|
fork_hashes,
|
2018-03-27 18:11:21 +03:00
|
|
|
|
);
|
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// Now re-apply all blocks on this fork.
|
2018-06-18 18:18:38 +03:00
|
|
|
|
for (_, h) in fork_hashes {
|
2018-09-27 11:35:25 +03:00
|
|
|
|
let fb = ext
|
|
|
|
|
.batch
|
2018-03-04 03:19:54 +03:00
|
|
|
|
.get_block(&h)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked blocks")))?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
|
|
// Re-verify coinbase maturity along this fork.
|
|
|
|
|
verify_coinbase_maturity(&fb, ext)?;
|
2018-09-25 13:01:19 +03:00
|
|
|
|
// Validate the block against the UTXO set.
|
|
|
|
|
validate_utxo(&fb, ext)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
// Re-verify block_sums to set the block_sums up on this fork correctly.
|
|
|
|
|
verify_block_sums(&fb, ext)?;
|
|
|
|
|
// Re-apply the blocks.
|
|
|
|
|
apply_block_to_txhashset(&fb, ext)?;
|
2018-01-07 22:01:17 +03:00
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2018-09-25 13:01:19 +03:00
|
|
|
|
|
|
|
|
|
fn validate_utxo(block: &Block, ext: &txhashset::Extension) -> Result<(), Error> {
|
|
|
|
|
let utxo = ext.utxo_view();
|
|
|
|
|
utxo.validate_block(block)?;
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|