2020-01-20 14:40:58 +03:00
|
|
|
|
// Copyright 2020 The Grin Developers
|
2016-10-22 21:35:48 +03:00
|
|
|
|
//
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
2016-10-21 03:06:12 +03:00
|
|
|
|
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
|
|
|
|
|
2018-12-08 02:59:40 +03:00
|
|
|
|
use crate::core::consensus;
|
|
|
|
|
use crate::core::core::hash::Hashed;
|
|
|
|
|
use crate::core::core::verifier_cache::VerifierCache;
|
|
|
|
|
use crate::core::core::Committed;
|
|
|
|
|
use crate::core::core::{Block, BlockHeader, BlockSums};
|
|
|
|
|
use crate::core::pow;
|
|
|
|
|
use crate::error::{Error, ErrorKind};
|
|
|
|
|
use crate::store;
|
|
|
|
|
use crate::txhashset;
|
|
|
|
|
use crate::types::{Options, Tip};
|
|
|
|
|
use crate::util::RwLock;
|
2017-12-04 22:16:57 +03:00
|
|
|
|
use grin_store;
|
2018-12-08 02:59:40 +03:00
|
|
|
|
use std::sync::Arc;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
|
|
|
|
/// Contextual information required to process a new block and either reject or
|
|
|
|
|
/// accept it.
|
2018-10-05 10:29:33 +03:00
|
|
|
|
pub struct BlockContext<'a> {
|
2017-08-10 03:54:10 +03:00
|
|
|
|
/// The options
|
2017-07-04 02:46:25 +03:00
|
|
|
|
pub opts: Options,
|
2018-10-05 10:29:33 +03:00
|
|
|
|
/// The pow verifier to use when processing a block.
|
2018-11-24 23:33:17 +03:00
|
|
|
|
pub pow_verifier: fn(&BlockHeader) -> Result<(), pow::Error>,
|
2018-10-05 10:29:33 +03:00
|
|
|
|
/// The active txhashset (rewindable MMRs) to use for block processing.
|
|
|
|
|
pub txhashset: &'a mut txhashset::TxHashSet,
|
2019-09-07 02:28:26 +03:00
|
|
|
|
/// The active header MMR handle.
|
|
|
|
|
pub header_pmmr: &'a mut txhashset::PMMRHandle<BlockHeader>,
|
2018-10-05 10:29:33 +03:00
|
|
|
|
/// The active batch to use for block processing.
|
|
|
|
|
pub batch: store::Batch<'a>,
|
2018-10-02 18:13:02 +03:00
|
|
|
|
/// The verifier cache (caching verifier for rangeproofs and kernel signatures)
|
2018-12-08 02:59:40 +03:00
|
|
|
|
pub verifier_cache: Arc<RwLock<dyn VerifierCache>>,
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-11-25 13:22:19 +03:00
|
|
|
|
// Check if we already know about this block for various reasons
|
|
|
|
|
// from cheapest to most expensive (delay hitting the db until last).
|
2019-07-04 14:30:22 +03:00
|
|
|
|
fn check_known(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
|
|
|
|
check_known_head(header, ctx)?;
|
|
|
|
|
check_known_store(header, ctx)?;
|
2018-11-25 13:22:19 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2019-08-03 11:07:01 +03:00
|
|
|
|
// Validate only the proof of work in a block header.
|
2019-12-17 18:17:45 +03:00
|
|
|
|
// Used to cheaply validate pow before checking if orphan or continuing block validation.
|
2019-08-03 11:07:01 +03:00
|
|
|
|
fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
2019-12-17 18:17:45 +03:00
|
|
|
|
if ctx.opts.contains(Options::SKIP_POW) {
|
|
|
|
|
// Some of our tests require this check to be skipped (we should revisit this).
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
2019-08-03 11:07:01 +03:00
|
|
|
|
if !header.pow.is_primary() && !header.pow.is_secondary() {
|
|
|
|
|
return Err(ErrorKind::LowEdgebits.into());
|
|
|
|
|
}
|
2020-02-05 19:02:07 +03:00
|
|
|
|
if (ctx.pow_verifier)(header).is_err() {
|
2019-08-03 11:07:01 +03:00
|
|
|
|
error!(
|
|
|
|
|
"pipe: error validating header with cuckoo edge_bits {}",
|
2019-12-17 18:17:45 +03:00
|
|
|
|
header.pow.edge_bits(),
|
2019-08-03 11:07:01 +03:00
|
|
|
|
);
|
|
|
|
|
return Err(ErrorKind::InvalidPow.into());
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-21 04:35:04 +03:00
|
|
|
|
/// Runs the block processing pipeline, including validation and finding a
|
2018-10-05 10:29:33 +03:00
|
|
|
|
/// place for the new block in the chain.
|
|
|
|
|
/// Returns new head if chain head updated.
|
2018-12-08 02:59:40 +03:00
|
|
|
|
pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip>, Error> {
|
2017-11-18 23:34:05 +03:00
|
|
|
|
debug!(
|
2018-12-11 17:11:58 +03:00
|
|
|
|
"pipe: process_block {} at {} [in/out/kern: {}/{}/{}]",
|
2017-07-28 00:13:34 +03:00
|
|
|
|
b.hash(),
|
|
|
|
|
b.header.height,
|
2018-08-16 00:14:48 +03:00
|
|
|
|
b.inputs().len(),
|
|
|
|
|
b.outputs().len(),
|
|
|
|
|
b.kernels().len(),
|
2017-07-28 00:13:34 +03:00
|
|
|
|
);
|
2018-09-03 13:09:53 +03:00
|
|
|
|
|
2018-11-28 20:48:50 +03:00
|
|
|
|
// Check if we have already processed this block previously.
|
2019-07-04 14:30:22 +03:00
|
|
|
|
check_known(&b.header, ctx)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
|
2019-12-17 18:17:45 +03:00
|
|
|
|
// Quick pow validation. No point proceeding if this is invalid.
|
|
|
|
|
// We want to do this before we add the block to the orphan pool so we
|
|
|
|
|
// want to do this now and not later during header validation.
|
|
|
|
|
validate_pow_only(&b.header, ctx)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
|
2019-12-17 18:17:45 +03:00
|
|
|
|
let head = ctx.batch.head()?;
|
|
|
|
|
let prev = prev_header_store(&b.header, &mut ctx.batch)?;
|
2018-10-15 21:24:01 +03:00
|
|
|
|
|
2018-11-25 13:22:19 +03:00
|
|
|
|
// Block is an orphan if we do not know about the previous full block.
|
|
|
|
|
// Skip this check if we have just processed the previous block
|
|
|
|
|
// or the full txhashset state (fast sync) at the previous block height.
|
2019-12-17 18:17:45 +03:00
|
|
|
|
{
|
|
|
|
|
let is_next = b.header.prev_hash == head.last_block_h;
|
|
|
|
|
if !is_next && !ctx.batch.block_exists(&prev.hash())? {
|
|
|
|
|
return Err(ErrorKind::Orphan.into());
|
|
|
|
|
}
|
2018-09-05 12:51:29 +03:00
|
|
|
|
}
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
// Process the header for the block.
|
|
|
|
|
// Note: We still want to process the full block if we have seen this header before
|
|
|
|
|
// as we may have processed it "header first" and not yet processed the full block.
|
|
|
|
|
process_block_header(&b.header, ctx)?;
|
2017-12-04 22:16:57 +03:00
|
|
|
|
|
2018-09-20 11:19:32 +03:00
|
|
|
|
// Validate the block itself, make sure it is internally consistent.
|
|
|
|
|
// Use the verifier_cache for verifying rangeproofs and kernel signatures.
|
2018-10-05 10:29:33 +03:00
|
|
|
|
validate_block(b, ctx)?;
|
2018-06-22 11:08:06 +03:00
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// Start a chain extension unit of work dependent on the success of the
|
2017-12-19 00:18:36 +03:00
|
|
|
|
// internal validation and saving operations
|
2019-09-07 02:28:26 +03:00
|
|
|
|
let ref mut header_pmmr = &mut ctx.header_pmmr;
|
|
|
|
|
let ref mut txhashset = &mut ctx.txhashset;
|
|
|
|
|
let ref mut batch = &mut ctx.batch;
|
2020-01-28 20:23:11 +03:00
|
|
|
|
let block_sums = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| {
|
|
|
|
|
rewind_and_apply_fork(&prev, ext, batch)?;
|
2018-05-30 23:57:13 +03:00
|
|
|
|
|
2018-09-05 12:51:29 +03:00
|
|
|
|
// Check any coinbase being spent have matured sufficiently.
|
|
|
|
|
// This needs to be done within the context of a potentially
|
|
|
|
|
// rewound txhashset extension to reflect chain state prior
|
|
|
|
|
// to applying the new block.
|
2020-01-28 20:23:11 +03:00
|
|
|
|
verify_coinbase_maturity(b, ext, batch)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
|
// Validate the block against the UTXO set.
|
2020-01-28 20:23:11 +03:00
|
|
|
|
validate_utxo(b, ext, batch)?;
|
2018-09-25 13:01:19 +03:00
|
|
|
|
|
2018-09-20 11:19:32 +03:00
|
|
|
|
// Using block_sums (utxo_sum, kernel_sum) for the previous block from the db
|
|
|
|
|
// we can verify_kernel_sums across the full UTXO sum and full kernel sum
|
|
|
|
|
// accounting for inputs/outputs/kernels in this new block.
|
|
|
|
|
// We know there are no double-spends etc. if this verifies successfully.
|
2019-07-26 10:36:24 +03:00
|
|
|
|
// Remember to save these to the db later on (regardless of extension rollback)
|
2020-01-28 20:23:11 +03:00
|
|
|
|
let block_sums = verify_block_sums(b, batch)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
// Apply the block to the txhashset state.
|
|
|
|
|
// Validate the txhashset roots and sizes against the block header.
|
|
|
|
|
// Block is invalid if there are any discrepencies.
|
2020-01-28 20:23:11 +03:00
|
|
|
|
apply_block_to_txhashset(b, ext, batch)?;
|
2018-09-03 18:55:09 +03:00
|
|
|
|
|
|
|
|
|
// If applying this block does not increase the work on the chain then
|
|
|
|
|
// we know we have not yet updated the chain to produce a new chain head.
|
2020-01-28 20:23:11 +03:00
|
|
|
|
let head = batch.head()?;
|
2018-10-05 10:29:33 +03:00
|
|
|
|
if !has_more_work(&b.header, &head) {
|
2019-09-07 02:28:26 +03:00
|
|
|
|
ext.extension.force_rollback();
|
2017-09-28 02:46:32 +03:00
|
|
|
|
}
|
2018-09-03 18:55:09 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
Ok(block_sums)
|
2018-07-08 14:42:21 +03:00
|
|
|
|
})?;
|
2018-03-03 12:08:36 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
// Add the validated block to the db along with the corresponding block_sums.
|
2018-11-01 12:51:32 +03:00
|
|
|
|
// We do this even if we have not increased the total cumulative work
|
|
|
|
|
// so we can maintain multiple (in progress) forks.
|
2019-07-26 10:36:24 +03:00
|
|
|
|
add_block(b, &block_sums, &ctx.batch)?;
|
2018-09-03 18:55:09 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
// If we have no "tail" then set it now.
|
2018-11-10 06:27:52 +03:00
|
|
|
|
if ctx.batch.tail().is_err() {
|
|
|
|
|
update_body_tail(&b.header, &ctx.batch)?;
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
if has_more_work(&b.header, &head) {
|
|
|
|
|
let head = Tip::from_header(&b.header);
|
|
|
|
|
update_head(&head, &mut ctx.batch)?;
|
|
|
|
|
Ok(Some(head))
|
|
|
|
|
} else {
|
|
|
|
|
Ok(None)
|
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
/// Sync a chunk of block headers.
|
|
|
|
|
/// This is only used during header sync.
|
2018-09-27 11:35:25 +03:00
|
|
|
|
pub fn sync_block_headers(
|
2018-12-05 19:50:32 +03:00
|
|
|
|
headers: &[BlockHeader],
|
2018-12-08 02:59:40 +03:00
|
|
|
|
ctx: &mut BlockContext<'_>,
|
2019-07-26 10:36:24 +03:00
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
if headers.is_empty() {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
|
|
|
|
let last_header = headers.last().expect("last header");
|
|
|
|
|
|
|
|
|
|
// Check if we know about all these headers. If so we can accept them quickly.
|
|
|
|
|
// If they *do not* increase total work on the sync chain we are done.
|
|
|
|
|
// If they *do* increase total work then we should process them to update sync_head.
|
2019-10-29 19:47:08 +03:00
|
|
|
|
let sync_head = {
|
|
|
|
|
let hash = ctx.header_pmmr.head_hash()?;
|
|
|
|
|
let header = ctx.batch.get_block_header(&hash)?;
|
|
|
|
|
Tip::from_header(&header)
|
|
|
|
|
};
|
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
if let Ok(existing) = ctx.batch.get_block_header(&last_header.hash()) {
|
|
|
|
|
if !has_more_work(&existing, &sync_head) {
|
|
|
|
|
return Ok(());
|
2019-03-17 15:32:48 +03:00
|
|
|
|
}
|
2019-07-26 10:36:24 +03:00
|
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
2019-09-07 02:28:26 +03:00
|
|
|
|
// Validate each header in the chunk and add to our db.
|
|
|
|
|
// Note: This batch may be rolled back later if the MMR does not validate successfully.
|
2019-07-26 10:36:24 +03:00
|
|
|
|
for header in headers {
|
|
|
|
|
validate_header(header, ctx)?;
|
2019-09-07 02:28:26 +03:00
|
|
|
|
add_block_header(header, &ctx.batch)?;
|
2019-07-26 10:36:24 +03:00
|
|
|
|
}
|
2018-10-15 21:24:01 +03:00
|
|
|
|
|
2019-10-29 19:47:08 +03:00
|
|
|
|
// Now apply this entire chunk of headers to the sync MMR (ctx is sync MMR specific).
|
2020-01-28 20:23:11 +03:00
|
|
|
|
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| {
|
|
|
|
|
rewind_and_apply_header_fork(&last_header, ext, batch)?;
|
2019-09-07 02:28:26 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
})?;
|
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2018-11-01 12:51:32 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
/// Process a block header. Update the header MMR and corresponding header_head if this header
|
|
|
|
|
/// increases the total work relative to header_head.
|
|
|
|
|
/// Note: In contrast to processing a full block we treat "already known" as success
|
|
|
|
|
/// to allow processing to continue (for header itself).
|
|
|
|
|
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
|
|
|
|
// Check this header is not an orphan, we must know about the previous header to continue.
|
|
|
|
|
let prev_header = ctx.batch.get_previous_header(&header)?;
|
2018-10-15 21:24:01 +03:00
|
|
|
|
|
2019-09-07 02:28:26 +03:00
|
|
|
|
// Check if we know about the full block for this header.
|
2019-07-26 10:36:24 +03:00
|
|
|
|
if check_known(header, ctx).is_err() {
|
|
|
|
|
return Ok(());
|
|
|
|
|
}
|
2018-11-01 12:51:32 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
// If we have not yet seen the full block then check if we have seen this header.
|
|
|
|
|
// If it does not increase total_difficulty beyond our current header_head
|
|
|
|
|
// then we can (re)accept this header and process the full block (or request it).
|
|
|
|
|
// This header is on a fork and we should still accept it as the fork may eventually win.
|
2019-10-29 19:47:08 +03:00
|
|
|
|
let header_head = {
|
|
|
|
|
let hash = ctx.header_pmmr.head_hash()?;
|
|
|
|
|
let header = ctx.batch.get_block_header(&hash)?;
|
|
|
|
|
Tip::from_header(&header)
|
|
|
|
|
};
|
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
if let Ok(existing) = ctx.batch.get_block_header(&header.hash()) {
|
|
|
|
|
if !has_more_work(&existing, &header_head) {
|
|
|
|
|
return Ok(());
|
2018-11-01 12:51:32 +03:00
|
|
|
|
}
|
2018-10-02 06:13:26 +03:00
|
|
|
|
}
|
|
|
|
|
|
2020-01-28 20:23:11 +03:00
|
|
|
|
txhashset::header_extending(&mut ctx.header_pmmr, &mut ctx.batch, |ext, batch| {
|
|
|
|
|
rewind_and_apply_header_fork(&prev_header, ext, batch)?;
|
2019-09-07 02:28:26 +03:00
|
|
|
|
ext.validate_root(header)?;
|
|
|
|
|
ext.apply_header(header)?;
|
2019-07-26 10:36:24 +03:00
|
|
|
|
if !has_more_work(&header, &header_head) {
|
2019-09-07 02:28:26 +03:00
|
|
|
|
ext.force_rollback();
|
2019-07-26 10:36:24 +03:00
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
})?;
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
validate_header(header, ctx)?;
|
|
|
|
|
add_block_header(header, &ctx.batch)?;
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-09-05 12:51:29 +03:00
|
|
|
|
/// Quick in-memory check to fast-reject any block handled recently.
|
|
|
|
|
/// Keeps duplicates from the network in check.
|
|
|
|
|
/// Checks against the last_block_h and prev_block_h of the chain head.
|
2018-12-08 02:59:40 +03:00
|
|
|
|
fn check_known_head(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
2018-10-05 10:29:33 +03:00
|
|
|
|
let head = ctx.batch.head()?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
let bh = header.hash();
|
2018-10-05 10:29:33 +03:00
|
|
|
|
if bh == head.last_block_h || bh == head.prev_block_h {
|
2018-09-05 12:51:29 +03:00
|
|
|
|
return Err(ErrorKind::Unfit("already known in head".to_string()).into());
|
2016-12-21 04:35:04 +03:00
|
|
|
|
}
|
2018-09-05 12:51:29 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check if this block is in the store already.
|
2018-12-08 02:59:40 +03:00
|
|
|
|
fn check_known_store(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
2018-10-05 10:29:33 +03:00
|
|
|
|
match ctx.batch.block_exists(&header.hash()) {
|
2018-09-05 12:51:29 +03:00
|
|
|
|
Ok(true) => {
|
2018-10-05 10:29:33 +03:00
|
|
|
|
let head = ctx.batch.head()?;
|
|
|
|
|
if header.height < head.height.saturating_sub(50) {
|
2018-09-05 12:51:29 +03:00
|
|
|
|
// TODO - we flag this as an "abusive peer" but only in the case
|
|
|
|
|
// where we have the full block in our store.
|
|
|
|
|
// So this is not a particularly exhaustive check.
|
|
|
|
|
Err(ErrorKind::OldBlock.into())
|
|
|
|
|
} else {
|
|
|
|
|
Err(ErrorKind::Unfit("already known in store".to_string()).into())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(false) => {
|
|
|
|
|
// Not yet processed this block, we can proceed.
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2020-02-12 21:35:33 +03:00
|
|
|
|
Err(e) => Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into()),
|
2018-09-05 12:51:29 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-11-25 13:22:19 +03:00
|
|
|
|
// Find the previous header from the store.
|
|
|
|
|
// Return an Orphan error if we cannot find the previous header.
|
2018-12-08 02:59:40 +03:00
|
|
|
|
fn prev_header_store(
|
|
|
|
|
header: &BlockHeader,
|
|
|
|
|
batch: &mut store::Batch<'_>,
|
|
|
|
|
) -> Result<BlockHeader, Error> {
|
2018-11-25 13:22:19 +03:00
|
|
|
|
let prev = batch.get_previous_header(&header).map_err(|e| match e {
|
|
|
|
|
grin_store::Error::NotFoundErr(_) => ErrorKind::Orphan,
|
|
|
|
|
_ => ErrorKind::StoreErr(e, "check prev header".into()),
|
|
|
|
|
})?;
|
|
|
|
|
Ok(prev)
|
2018-09-05 12:51:29 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-12-16 06:19:04 +03:00
|
|
|
|
/// First level of block validation that only needs to act on the block header
|
2016-10-21 03:06:12 +03:00
|
|
|
|
/// to make it as cheap as possible. The different validations are also
|
|
|
|
|
/// arranged by order of cost to have as little DoS surface as possible.
|
2018-12-08 02:59:40 +03:00
|
|
|
|
fn validate_header(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
2018-11-25 13:22:19 +03:00
|
|
|
|
// First I/O cost, delayed as late as possible.
|
|
|
|
|
let prev = prev_header_store(header, &mut ctx.batch)?;
|
2016-11-17 04:03:23 +03:00
|
|
|
|
|
2019-12-05 14:55:10 +03:00
|
|
|
|
// This header height must increase the height from the previous header by exactly 1.
|
2017-01-10 02:16:44 +03:00
|
|
|
|
if header.height != prev.height + 1 {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::InvalidBlockHeight.into());
|
2017-01-10 02:16:44 +03:00
|
|
|
|
}
|
2017-12-16 06:19:04 +03:00
|
|
|
|
|
2019-12-05 14:55:10 +03:00
|
|
|
|
// This header must have a valid header version for its height.
|
|
|
|
|
if !consensus::valid_header_version(header.height, header.version) {
|
|
|
|
|
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
|
|
|
|
|
}
|
|
|
|
|
|
2019-07-25 12:08:24 +03:00
|
|
|
|
if header.timestamp <= prev.timestamp {
|
2016-11-30 05:45:39 +03:00
|
|
|
|
// prevent time warp attacks and some timestamp manipulations by forcing strict
|
2019-07-25 12:08:24 +03:00
|
|
|
|
// time progression
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::InvalidBlockTime.into());
|
2016-11-17 04:03:23 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-03-15 22:16:34 +03:00
|
|
|
|
// verify the proof of work and related parameters
|
|
|
|
|
// at this point we have a previous block header
|
|
|
|
|
// we know the height increased by one
|
|
|
|
|
// so now we can check the total_difficulty increase is also valid
|
|
|
|
|
// check the pow hash shows a difficulty at least as large
|
|
|
|
|
// as the target difficulty
|
2018-02-05 22:43:54 +03:00
|
|
|
|
if !ctx.opts.contains(Options::SKIP_POW) {
|
2019-12-17 18:17:45 +03:00
|
|
|
|
// Quick check of this header in isolation. No point proceeding if this fails.
|
|
|
|
|
// We can do this without needing to iterate over previous headers.
|
|
|
|
|
validate_pow_only(header, ctx)?;
|
|
|
|
|
|
2018-09-11 01:36:57 +03:00
|
|
|
|
if header.total_difficulty() <= prev.total_difficulty() {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::DifficultyTooLow.into());
|
2018-03-15 22:16:34 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-11 01:36:57 +03:00
|
|
|
|
let target_difficulty = header.total_difficulty() - prev.total_difficulty();
|
2018-03-15 22:16:34 +03:00
|
|
|
|
|
2018-11-29 01:05:55 +03:00
|
|
|
|
if header.pow.to_difficulty(header.height) < target_difficulty {
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::DifficultyTooLow.into());
|
2018-03-15 22:16:34 +03:00
|
|
|
|
}
|
2016-12-27 02:39:31 +03:00
|
|
|
|
|
2017-12-16 06:19:04 +03:00
|
|
|
|
// explicit check to ensure total_difficulty has increased by exactly
|
2018-01-12 21:35:37 +03:00
|
|
|
|
// the _network_ difficulty of the previous block
|
|
|
|
|
// (during testnet1 we use _block_ difficulty here)
|
2018-10-05 10:29:33 +03:00
|
|
|
|
let child_batch = ctx.batch.child()?;
|
2018-11-01 12:51:32 +03:00
|
|
|
|
let diff_iter = store::DifficultyIter::from_batch(prev.hash(), child_batch);
|
2018-10-13 23:57:01 +03:00
|
|
|
|
let next_header_info = consensus::next_difficulty(header.height, diff_iter);
|
|
|
|
|
if target_difficulty != next_header_info.difficulty {
|
|
|
|
|
info!(
|
2018-09-19 01:12:57 +03:00
|
|
|
|
"validate_header: header target difficulty {} != {}",
|
2018-06-01 22:41:26 +03:00
|
|
|
|
target_difficulty.to_num(),
|
2018-10-13 23:57:01 +03:00
|
|
|
|
next_header_info.difficulty.to_num()
|
2018-01-12 21:35:37 +03:00
|
|
|
|
);
|
2018-07-01 01:36:38 +03:00
|
|
|
|
return Err(ErrorKind::WrongTotalDifficulty.into());
|
2017-01-10 07:30:02 +03:00
|
|
|
|
}
|
2018-10-13 23:57:01 +03:00
|
|
|
|
// check the secondary PoW scaling factor if applicable
|
2018-10-19 22:39:54 +03:00
|
|
|
|
if header.pow.secondary_scaling != next_header_info.secondary_scaling {
|
|
|
|
|
info!(
|
|
|
|
|
"validate_header: header secondary scaling {} != {}",
|
2018-11-01 12:51:32 +03:00
|
|
|
|
header.pow.secondary_scaling, next_header_info.secondary_scaling
|
2018-10-19 22:39:54 +03:00
|
|
|
|
);
|
2018-10-13 23:57:01 +03:00
|
|
|
|
return Err(ErrorKind::InvalidScaling.into());
|
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
2016-11-17 04:03:23 +03:00
|
|
|
|
|
2016-11-16 04:29:42 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-12-08 02:59:40 +03:00
|
|
|
|
fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error> {
|
2018-11-01 12:51:32 +03:00
|
|
|
|
let prev = ctx.batch.get_previous_header(&block.header)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
block
|
2018-10-13 23:57:01 +03:00
|
|
|
|
.validate(&prev.total_kernel_offset, ctx.verifier_cache.clone())
|
2020-02-05 19:02:07 +03:00
|
|
|
|
.map_err(ErrorKind::InvalidBlockProof)?;
|
2018-09-05 12:51:29 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-12-16 12:26:39 +03:00
|
|
|
|
/// Verify the block is not spending coinbase outputs before they have sufficiently matured.
|
2019-09-07 02:28:26 +03:00
|
|
|
|
fn verify_coinbase_maturity(
|
|
|
|
|
block: &Block,
|
|
|
|
|
ext: &txhashset::ExtensionPair<'_>,
|
2020-01-28 20:23:11 +03:00
|
|
|
|
batch: &store::Batch<'_>,
|
2019-09-07 02:28:26 +03:00
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
let ref extension = ext.extension;
|
|
|
|
|
let ref header_extension = ext.header_extension;
|
|
|
|
|
extension
|
|
|
|
|
.utxo_view(header_extension)
|
2020-01-28 20:23:11 +03:00
|
|
|
|
.verify_coinbase_maturity(&block.inputs(), block.header.height, batch)
|
2018-05-07 16:21:41 +03:00
|
|
|
|
}
|
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
/// Verify kernel sums across the full utxo and kernel sets based on block_sums
|
|
|
|
|
/// of previous block accounting for the inputs|outputs|kernels of the new block.
|
|
|
|
|
fn verify_block_sums(b: &Block, batch: &store::Batch<'_>) -> Result<BlockSums, Error> {
|
2018-09-20 11:19:32 +03:00
|
|
|
|
// Retrieve the block_sums for the previous block.
|
2019-07-26 10:36:24 +03:00
|
|
|
|
let block_sums = batch.get_block_sums(&b.header.prev_hash)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
|
|
// Overage is based purely on the new block.
|
|
|
|
|
// Previous block_sums have taken all previous overage into account.
|
|
|
|
|
let overage = b.header.overage();
|
|
|
|
|
|
|
|
|
|
// Offset on the other hand is the total kernel offset from the new block.
|
|
|
|
|
let offset = b.header.total_kernel_offset();
|
|
|
|
|
|
|
|
|
|
// Verify the kernel sums for the block_sums with the new block applied.
|
2018-09-28 19:27:31 +03:00
|
|
|
|
let (utxo_sum, kernel_sum) =
|
2018-12-08 02:59:40 +03:00
|
|
|
|
(block_sums, b as &dyn Committed).verify_kernel_sums(overage, offset)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
Ok(BlockSums {
|
|
|
|
|
utxo_sum,
|
|
|
|
|
kernel_sum,
|
|
|
|
|
})
|
2018-09-20 11:19:32 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-09-03 18:55:09 +03:00
|
|
|
|
/// Fully validate the block by applying it to the txhashset extension.
|
|
|
|
|
/// Check both the txhashset roots and sizes are correct after applying the block.
|
2018-12-08 02:59:40 +03:00
|
|
|
|
fn apply_block_to_txhashset(
|
|
|
|
|
block: &Block,
|
2019-09-07 02:28:26 +03:00
|
|
|
|
ext: &mut txhashset::ExtensionPair<'_>,
|
2020-01-28 20:23:11 +03:00
|
|
|
|
batch: &store::Batch<'_>,
|
2018-12-08 02:59:40 +03:00
|
|
|
|
) -> Result<(), Error> {
|
2020-01-28 20:23:11 +03:00
|
|
|
|
ext.extension.apply_block(block, batch)?;
|
|
|
|
|
ext.extension.validate_roots(&block.header)?;
|
|
|
|
|
ext.extension.validate_sizes(&block.header)?;
|
2016-11-16 04:29:42 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
|
/// Officially adds the block to our chain.
|
2018-11-01 12:51:32 +03:00
|
|
|
|
/// Header must be added separately (assume this has been done previously).
|
2019-07-26 10:36:24 +03:00
|
|
|
|
fn add_block(b: &Block, block_sums: &BlockSums, batch: &store::Batch<'_>) -> Result<(), Error> {
|
2018-11-01 12:51:32 +03:00
|
|
|
|
batch
|
2017-11-01 02:32:33 +03:00
|
|
|
|
.save_block(b)
|
2018-07-01 01:36:38 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save block".to_owned()))?;
|
2019-07-26 10:36:24 +03:00
|
|
|
|
batch.save_block_sums(&b.hash(), block_sums)?;
|
2018-06-18 18:18:38 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-11-10 06:27:52 +03:00
|
|
|
|
/// Update the block chain tail so we can know the exact tail of full blocks in this node
|
2018-12-08 02:59:40 +03:00
|
|
|
|
fn update_body_tail(bh: &BlockHeader, batch: &store::Batch<'_>) -> Result<(), Error> {
|
2018-11-10 06:27:52 +03:00
|
|
|
|
let tip = Tip::from_header(bh);
|
|
|
|
|
batch
|
|
|
|
|
.save_body_tail(&tip)
|
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body tail".to_owned()))?;
|
|
|
|
|
debug!("body tail {} @ {}", bh.hash(), bh.height);
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-08 00:50:01 +03:00
|
|
|
|
/// Officially adds the block header to our header chain.
|
2018-12-08 02:59:40 +03:00
|
|
|
|
fn add_block_header(bh: &BlockHeader, batch: &store::Batch<'_>) -> Result<(), Error> {
|
2018-11-01 12:51:32 +03:00
|
|
|
|
batch
|
2017-11-01 02:32:33 +03:00
|
|
|
|
.save_block_header(bh)
|
2018-11-01 12:51:32 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header".to_owned()))?;
|
|
|
|
|
Ok(())
|
2017-02-08 00:50:01 +03:00
|
|
|
|
}
|
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
fn update_head(head: &Tip, batch: &mut store::Batch<'_>) -> Result<(), Error> {
|
|
|
|
|
batch
|
|
|
|
|
.save_body_head(&head)
|
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
|
2018-10-05 10:29:33 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
debug!("head updated to {} at {}", head.last_block_h, head.height);
|
2018-10-05 10:29:33 +03:00
|
|
|
|
|
2019-07-26 10:36:24 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
|
// Whether the provided block totals more work than the chain tip
|
2018-10-05 10:29:33 +03:00
|
|
|
|
fn has_more_work(header: &BlockHeader, head: &Tip) -> bool {
|
|
|
|
|
header.total_difficulty() > head.total_difficulty
|
2018-06-22 11:08:06 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-11-01 12:51:32 +03:00
|
|
|
|
/// Rewind the header chain and reapply headers on a fork.
|
|
|
|
|
pub fn rewind_and_apply_header_fork(
|
|
|
|
|
header: &BlockHeader,
|
2018-12-08 02:59:40 +03:00
|
|
|
|
ext: &mut txhashset::HeaderExtension<'_>,
|
2020-01-28 20:23:11 +03:00
|
|
|
|
batch: &store::Batch<'_>,
|
2018-11-01 12:51:32 +03:00
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
let mut fork_hashes = vec![];
|
2019-07-26 10:36:24 +03:00
|
|
|
|
let mut current = header.clone();
|
2020-02-05 19:02:07 +03:00
|
|
|
|
while current.height > 0 && ext.is_on_current_chain(¤t, batch).is_err() {
|
2018-11-01 12:51:32 +03:00
|
|
|
|
fork_hashes.push(current.hash());
|
2020-01-28 20:23:11 +03:00
|
|
|
|
current = batch.get_previous_header(¤t)?;
|
2018-11-01 12:51:32 +03:00
|
|
|
|
}
|
|
|
|
|
fork_hashes.reverse();
|
|
|
|
|
|
|
|
|
|
let forked_header = current;
|
|
|
|
|
|
|
|
|
|
// Rewind the txhashset state back to the block where we forked from the most work chain.
|
|
|
|
|
ext.rewind(&forked_header)?;
|
|
|
|
|
|
|
|
|
|
// Re-apply all headers on this fork.
|
|
|
|
|
for h in fork_hashes {
|
2020-01-28 20:23:11 +03:00
|
|
|
|
let header = batch
|
2018-11-01 12:51:32 +03:00
|
|
|
|
.get_block_header(&h)
|
2020-02-12 21:35:33 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "getting forked headers".to_string()))?;
|
2019-09-07 02:28:26 +03:00
|
|
|
|
ext.validate_root(&header)?;
|
2018-11-01 12:51:32 +03:00
|
|
|
|
ext.apply_header(&header)?;
|
|
|
|
|
}
|
2019-07-26 10:36:24 +03:00
|
|
|
|
|
2018-11-01 12:51:32 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2018-01-08 04:23:23 +03:00
|
|
|
|
/// Utility function to handle forks. From the forked block, jump backward
|
2019-07-26 10:36:24 +03:00
|
|
|
|
/// to find to fork point. Rewind the txhashset to the fork point and apply all
|
|
|
|
|
/// necessary blocks prior to the one being processed to set the txhashset in
|
2018-01-08 04:23:23 +03:00
|
|
|
|
/// the expected state.
|
2019-07-26 10:36:24 +03:00
|
|
|
|
pub fn rewind_and_apply_fork(
|
|
|
|
|
header: &BlockHeader,
|
2019-09-07 02:28:26 +03:00
|
|
|
|
ext: &mut txhashset::ExtensionPair<'_>,
|
2020-01-28 20:23:11 +03:00
|
|
|
|
batch: &store::Batch<'_>,
|
2019-07-26 10:36:24 +03:00
|
|
|
|
) -> Result<(), Error> {
|
2019-09-07 02:28:26 +03:00
|
|
|
|
let ref mut extension = ext.extension;
|
|
|
|
|
let ref mut header_extension = ext.header_extension;
|
2019-07-26 10:36:24 +03:00
|
|
|
|
|
2019-09-07 02:28:26 +03:00
|
|
|
|
// Prepare the header MMR.
|
2020-01-28 20:23:11 +03:00
|
|
|
|
rewind_and_apply_header_fork(header, header_extension, batch)?;
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2019-09-07 02:28:26 +03:00
|
|
|
|
// Rewind the txhashset extension back to common ancestor based on header MMR.
|
|
|
|
|
let mut current = batch.head_header()?;
|
2020-01-28 20:23:11 +03:00
|
|
|
|
while current.height > 0
|
2020-02-05 19:02:07 +03:00
|
|
|
|
&& header_extension
|
2020-01-28 20:23:11 +03:00
|
|
|
|
.is_on_current_chain(¤t, batch)
|
2020-02-05 19:02:07 +03:00
|
|
|
|
.is_err()
|
2020-01-28 20:23:11 +03:00
|
|
|
|
{
|
2019-09-07 02:28:26 +03:00
|
|
|
|
current = batch.get_previous_header(¤t)?;
|
|
|
|
|
}
|
|
|
|
|
let fork_point = current;
|
2020-01-28 20:23:11 +03:00
|
|
|
|
extension.rewind(&fork_point, batch)?;
|
2019-09-07 02:28:26 +03:00
|
|
|
|
|
|
|
|
|
// Then apply all full blocks since this common ancestor
|
|
|
|
|
// to put txhashet extension in a state to accept the new block.
|
|
|
|
|
let mut fork_hashes = vec![];
|
|
|
|
|
let mut current = header.clone();
|
|
|
|
|
while current.height > fork_point.height {
|
|
|
|
|
fork_hashes.push(current.hash());
|
|
|
|
|
current = batch.get_previous_header(¤t)?;
|
|
|
|
|
}
|
|
|
|
|
fork_hashes.reverse();
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2018-11-01 12:51:32 +03:00
|
|
|
|
for h in fork_hashes {
|
2019-09-07 02:28:26 +03:00
|
|
|
|
let fb = batch
|
2018-03-04 03:19:54 +03:00
|
|
|
|
.get_block(&h)
|
2020-02-05 19:02:07 +03:00
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "getting forked blocks".to_string()))?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
|
|
// Re-verify coinbase maturity along this fork.
|
2020-01-28 20:23:11 +03:00
|
|
|
|
verify_coinbase_maturity(&fb, ext, batch)?;
|
2018-09-25 13:01:19 +03:00
|
|
|
|
// Validate the block against the UTXO set.
|
2020-01-28 20:23:11 +03:00
|
|
|
|
validate_utxo(&fb, ext, batch)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
// Re-verify block_sums to set the block_sums up on this fork correctly.
|
2019-09-07 02:28:26 +03:00
|
|
|
|
verify_block_sums(&fb, batch)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
// Re-apply the blocks.
|
2020-01-28 20:23:11 +03:00
|
|
|
|
apply_block_to_txhashset(&fb, ext, batch)?;
|
2018-01-07 22:01:17 +03:00
|
|
|
|
}
|
2019-07-26 10:36:24 +03:00
|
|
|
|
|
2018-01-07 22:01:17 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2018-09-25 13:01:19 +03:00
|
|
|
|
|
2020-01-28 20:23:11 +03:00
|
|
|
|
fn validate_utxo(
|
|
|
|
|
block: &Block,
|
|
|
|
|
ext: &mut txhashset::ExtensionPair<'_>,
|
|
|
|
|
batch: &store::Batch<'_>,
|
|
|
|
|
) -> Result<(), Error> {
|
2019-09-07 02:28:26 +03:00
|
|
|
|
let ref mut extension = ext.extension;
|
|
|
|
|
let ref mut header_extension = ext.header_extension;
|
2020-01-28 20:23:11 +03:00
|
|
|
|
extension
|
|
|
|
|
.utxo_view(header_extension)
|
|
|
|
|
.validate_block(block, batch)
|
2018-09-25 13:01:19 +03:00
|
|
|
|
}
|