2016-10-22 21:35:48 +03:00
|
|
|
// Copyright 2016 The Grin Developers
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-10-21 03:06:12 +03:00
|
|
|
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
use std::sync::{Arc, RwLock};
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2016-10-21 03:06:12 +03:00
|
|
|
use secp;
|
2016-11-27 23:31:15 +03:00
|
|
|
use time;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
2016-11-16 04:29:42 +03:00
|
|
|
use core::consensus;
|
2016-12-27 02:39:31 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2017-08-10 03:54:10 +03:00
|
|
|
use core::core::{BlockHeader, Block};
|
2017-09-12 20:24:24 +03:00
|
|
|
use core::core::transaction;
|
2017-07-04 02:46:25 +03:00
|
|
|
use types::*;
|
2016-10-21 03:06:12 +03:00
|
|
|
use store;
|
2017-09-28 02:46:32 +03:00
|
|
|
use sumtree;
|
2017-08-09 19:40:23 +03:00
|
|
|
use core::global;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
|
|
/// Contextual information required to process a new block and either reject or
|
|
|
|
/// accept it.
|
2016-12-19 02:51:54 +03:00
|
|
|
pub struct BlockContext {
|
2017-08-10 03:54:10 +03:00
|
|
|
/// The options
|
2017-07-04 02:46:25 +03:00
|
|
|
pub opts: Options,
|
2017-08-10 03:54:10 +03:00
|
|
|
/// The store
|
2017-07-04 02:46:25 +03:00
|
|
|
pub store: Arc<ChainStore>,
|
2017-08-10 03:54:10 +03:00
|
|
|
/// The adapter
|
2017-07-04 02:46:25 +03:00
|
|
|
pub adapter: Arc<ChainAdapter>,
|
2017-08-10 03:54:10 +03:00
|
|
|
/// The head
|
2017-07-04 02:46:25 +03:00
|
|
|
pub head: Tip,
|
2017-08-22 21:23:54 +03:00
|
|
|
/// The POW verification function
|
|
|
|
pub pow_verifier: fn(&BlockHeader, u32) -> bool,
|
2017-09-28 02:46:32 +03:00
|
|
|
/// MMR sum tree states
|
|
|
|
pub sumtrees: Arc<RwLock<sumtree::SumTrees>>,
|
2016-10-21 03:06:12 +03:00
|
|
|
}
|
|
|
|
|
2016-12-21 04:35:04 +03:00
|
|
|
/// Runs the block processing pipeline, including validation and finding a
|
|
|
|
/// place for the new block in the chain. Returns the new
|
|
|
|
/// chain head if updated.
|
2017-07-04 02:46:25 +03:00
|
|
|
pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
|
2016-10-21 03:06:12 +03:00
|
|
|
// TODO should just take a promise for a block with a full header so we don't
|
|
|
|
// spend resources reading the full block when its header is invalid
|
|
|
|
|
2017-07-28 00:13:34 +03:00
|
|
|
info!(
|
|
|
|
"Starting validation pipeline for block {} at {} with {} inputs and {} outputs.",
|
|
|
|
b.hash(),
|
|
|
|
b.header.height,
|
|
|
|
b.inputs.len(),
|
|
|
|
b.outputs.len()
|
|
|
|
);
|
2017-07-04 02:46:25 +03:00
|
|
|
check_known(b.hash(), &mut ctx)?;
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
|
|
if !ctx.opts.intersects(SYNC) {
|
|
|
|
// in sync mode, the header has already been validated
|
2017-07-04 02:46:25 +03:00
|
|
|
validate_header(&b.header, &mut ctx)?;
|
2017-02-08 00:50:01 +03:00
|
|
|
}
|
2017-09-12 20:24:24 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
// take the lock on the sum trees and start a chain extension unit of work
|
|
|
|
// dependent on the success of the internal validation and saving operations
|
|
|
|
let local_sumtrees = ctx.sumtrees.clone();
|
|
|
|
let mut sumtrees = local_sumtrees.write().unwrap();
|
|
|
|
sumtree::extending(&mut sumtrees, |mut extension| {
|
|
|
|
|
|
|
|
validate_block(b, &mut ctx, &mut extension)?;
|
|
|
|
debug!(
|
|
|
|
"Block at {} with hash {} is valid, going to save and append.",
|
|
|
|
b.header.height,
|
|
|
|
b.hash()
|
|
|
|
);
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
add_block(b, &mut ctx)?;
|
|
|
|
let h = update_head(b, &mut ctx)?;
|
|
|
|
if h.is_none() {
|
|
|
|
extension.force_rollback();
|
|
|
|
}
|
|
|
|
Ok(h)
|
|
|
|
})
|
2016-10-21 03:06:12 +03:00
|
|
|
}
|
|
|
|
|
2017-08-10 03:54:10 +03:00
|
|
|
/// Process the block header
|
2017-07-04 02:46:25 +03:00
|
|
|
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
|
2017-02-08 00:50:01 +03:00
|
|
|
|
2017-07-28 00:13:34 +03:00
|
|
|
info!(
|
|
|
|
"Starting validation pipeline for block header {} at {}.",
|
|
|
|
bh.hash(),
|
|
|
|
bh.height
|
|
|
|
);
|
2017-07-04 02:46:25 +03:00
|
|
|
check_known(bh.hash(), &mut ctx)?;
|
|
|
|
validate_header(&bh, &mut ctx)?;
|
|
|
|
add_block_header(bh, &mut ctx)?;
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
// just taking the shared lock
|
|
|
|
let _ = ctx.sumtrees.write().unwrap();
|
|
|
|
|
2017-02-08 00:50:01 +03:00
|
|
|
update_header_head(bh, &mut ctx)
|
|
|
|
}
|
|
|
|
|
2016-12-21 04:35:04 +03:00
|
|
|
/// Quick in-memory check to fast-reject any block we've already handled
|
|
|
|
/// recently. Keeps duplicates from the network in check.
|
|
|
|
fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
2017-01-10 02:16:44 +03:00
|
|
|
// TODO ring buffer of the last few blocks that came through here
|
2016-12-21 04:35:04 +03:00
|
|
|
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
|
|
|
return Err(Error::Unfit("already known".to_string()));
|
|
|
|
}
|
2017-04-28 07:59:53 +03:00
|
|
|
if let Ok(b) = ctx.store.get_block(&bh) {
|
|
|
|
// there is a window where a block can be saved but the chain head not
|
|
|
|
// updated yet, we plug that window here by re-accepting the block
|
|
|
|
if b.header.total_difficulty <= ctx.head.total_difficulty {
|
|
|
|
return Err(Error::Unfit("already in store".to_string()));
|
|
|
|
}
|
|
|
|
}
|
2016-12-21 04:35:04 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
|
|
/// First level of black validation that only needs to act on the block header
|
|
|
|
/// to make it as cheap as possible. The different validations are also
|
|
|
|
/// arranged by order of cost to have as little DoS surface as possible.
|
2016-11-27 23:31:15 +03:00
|
|
|
/// TODO require only the block header (with length information)
|
2017-02-08 00:50:01 +03:00
|
|
|
fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
2016-10-21 03:06:12 +03:00
|
|
|
if header.height > ctx.head.height + 1 {
|
2017-07-27 22:08:48 +03:00
|
|
|
return Err(Error::Orphan);
|
2016-10-21 03:06:12 +03:00
|
|
|
}
|
|
|
|
|
2017-07-28 00:13:34 +03:00
|
|
|
let prev = try!(ctx.store.get_block_header(&header.previous).map_err(
|
|
|
|
&Error::StoreErr,
|
|
|
|
));
|
2016-11-17 04:03:23 +03:00
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
if header.height != prev.height + 1 {
|
|
|
|
return Err(Error::InvalidBlockHeight);
|
|
|
|
}
|
2017-09-29 21:44:25 +03:00
|
|
|
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
|
2016-11-30 05:45:39 +03:00
|
|
|
// prevent time warp attacks and some timestamp manipulations by forcing strict
|
2017-08-09 19:40:23 +03:00
|
|
|
// time progression (but not in CI mode)
|
2016-11-17 04:03:23 +03:00
|
|
|
return Err(Error::InvalidBlockTime);
|
|
|
|
}
|
2016-11-30 05:45:39 +03:00
|
|
|
if header.timestamp >
|
2017-08-26 19:31:27 +03:00
|
|
|
time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
|
2017-07-28 00:13:34 +03:00
|
|
|
{
|
2016-12-01 03:26:04 +03:00
|
|
|
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
|
2016-11-30 05:45:39 +03:00
|
|
|
// TODO add warning in p2p code if local time is too different from peers
|
2016-11-27 23:31:15 +03:00
|
|
|
return Err(Error::InvalidBlockTime);
|
2016-11-30 05:45:39 +03:00
|
|
|
}
|
2016-11-17 04:03:23 +03:00
|
|
|
|
2017-01-10 07:30:02 +03:00
|
|
|
if !ctx.opts.intersects(SKIP_POW) {
|
|
|
|
// verify the proof of work and related parameters
|
2016-12-27 02:39:31 +03:00
|
|
|
|
2017-01-10 07:30:02 +03:00
|
|
|
if header.total_difficulty != prev.total_difficulty.clone() + prev.pow.to_difficulty() {
|
|
|
|
return Err(Error::WrongTotalDifficulty);
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
|
2017-07-28 00:13:34 +03:00
|
|
|
let difficulty = consensus::next_difficulty(diff_iter).map_err(|e| {
|
|
|
|
Error::Other(e.to_string())
|
|
|
|
})?;
|
2017-01-10 07:30:02 +03:00
|
|
|
if header.difficulty < difficulty {
|
|
|
|
return Err(Error::DifficultyTooLow);
|
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
|
|
|
|
let cycle_size = if ctx.opts.intersects(EASY_POW) {
|
2017-08-09 19:40:23 +03:00
|
|
|
global::sizeshift()
|
2017-06-19 18:59:56 +03:00
|
|
|
} else {
|
2017-08-10 03:54:10 +03:00
|
|
|
consensus::DEFAULT_SIZESHIFT
|
2017-06-19 18:59:56 +03:00
|
|
|
};
|
|
|
|
debug!("Validating block with cuckoo size {}", cycle_size);
|
2017-08-22 21:23:54 +03:00
|
|
|
if !(ctx.pow_verifier)(header, cycle_size as u32) {
|
2016-11-16 04:29:42 +03:00
|
|
|
return Err(Error::InvalidPow);
|
2016-10-21 03:06:12 +03:00
|
|
|
}
|
|
|
|
}
|
2016-11-17 04:03:23 +03:00
|
|
|
|
2016-11-16 04:29:42 +03:00
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
}
|
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
/// Fully validate the block content.
|
2017-09-29 21:44:25 +03:00
|
|
|
fn validate_block(
|
|
|
|
b: &Block,
|
|
|
|
ctx: &mut BlockContext,
|
|
|
|
ext: &mut sumtree::Extension,
|
|
|
|
) -> Result<(), Error> {
|
2017-09-28 02:46:32 +03:00
|
|
|
if b.header.height > ctx.head.height + 1 {
|
2017-07-28 02:47:33 +03:00
|
|
|
return Err(Error::Orphan);
|
2017-04-28 07:59:53 +03:00
|
|
|
}
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
// main isolated block validation, checks all commitment sums and sigs
|
2016-10-21 03:06:12 +03:00
|
|
|
let curve = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
2017-09-28 02:46:32 +03:00
|
|
|
try!(b.validate(&curve).map_err(&Error::InvalidBlockProof));
|
2017-09-12 20:24:24 +03:00
|
|
|
|
|
|
|
// check that all the outputs of the block are "new" -
|
|
|
|
// that they do not clobber any existing unspent outputs (by their commitment)
|
|
|
|
//
|
2017-09-29 21:44:25 +03:00
|
|
|
// TODO - do we need to do this here (and can we do this here if we need access
|
|
|
|
// to the chain)
|
|
|
|
// see check_duplicate_outputs in pool for the analogous operation on
|
|
|
|
// transaction outputs
|
2017-09-12 20:24:24 +03:00
|
|
|
// for output in &block.outputs {
|
2017-09-29 21:44:25 +03:00
|
|
|
// here we would check that the output is not a duplicate output based on the
|
|
|
|
// current chain
|
2017-09-12 20:24:24 +03:00
|
|
|
// };
|
|
|
|
|
2017-02-08 00:50:01 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
// apply the new block to the MMR trees and check the new root hashes
|
|
|
|
if b.header.previous == ctx.head.last_block_h {
|
|
|
|
// standard head extension
|
|
|
|
ext.apply_block(b)?;
|
|
|
|
} else {
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
// extending a fork, first identify the block where forking occurred
|
|
|
|
// keeping the hashes of blocks along the fork
|
|
|
|
let mut current = b.header.previous;
|
|
|
|
let mut hashes = vec![];
|
|
|
|
loop {
|
|
|
|
let curr_header = ctx.store.get_block_header(¤t)?;
|
|
|
|
let height_header = ctx.store.get_header_by_height(curr_header.height)?;
|
|
|
|
if curr_header.hash() != height_header.hash() {
|
|
|
|
hashes.insert(0, curr_header.hash());
|
|
|
|
current = curr_header.previous;
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// rewind the sum trees up the forking block, providing the height of the
|
|
|
|
// forked block and the last commitment we want to rewind to
|
|
|
|
let forked_block = ctx.store.get_block(¤t)?;
|
|
|
|
if forked_block.header.height > 0 {
|
|
|
|
let last_output = &forked_block.outputs[forked_block.outputs.len() - 1];
|
|
|
|
let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1];
|
2017-09-29 21:44:25 +03:00
|
|
|
ext.rewind(
|
|
|
|
forked_block.header.height,
|
|
|
|
last_output,
|
|
|
|
last_kernel,
|
|
|
|
)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// apply all forked blocks, including this new one
|
|
|
|
for h in hashes {
|
|
|
|
let fb = ctx.store.get_block(&h)?;
|
|
|
|
ext.apply_block(&fb)?;
|
|
|
|
}
|
|
|
|
ext.apply_block(&b)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
let (utxo_root, rproof_root, kernel_root) = ext.roots();
|
2017-09-29 21:44:25 +03:00
|
|
|
if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root ||
|
|
|
|
kernel_root.hash != b.header.kernel_root
|
|
|
|
{
|
2017-09-28 02:46:32 +03:00
|
|
|
|
|
|
|
return Err(Error::InvalidRoot);
|
|
|
|
}
|
2017-04-28 07:59:53 +03:00
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
// check that any coinbase outputs are spendable (that they have matured
|
|
|
|
// sufficiently)
|
2017-09-28 02:46:32 +03:00
|
|
|
for input in &b.inputs {
|
2017-09-12 20:24:24 +03:00
|
|
|
if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) {
|
|
|
|
if output.features.contains(transaction::COINBASE_OUTPUT) {
|
2017-09-29 21:44:25 +03:00
|
|
|
if let Ok(output_header) =
|
|
|
|
ctx.store.get_block_header_by_output_commit(
|
|
|
|
&input.commitment(),
|
|
|
|
)
|
|
|
|
{
|
|
|
|
|
|
|
|
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction
|
|
|
|
// validation rule
|
2017-10-04 20:44:22 +03:00
|
|
|
if b.header.height <= output_header.height + global::coinbase_maturity() {
|
2017-09-12 20:24:24 +03:00
|
|
|
return Err(Error::ImmatureCoinbase);
|
|
|
|
}
|
|
|
|
};
|
|
|
|
};
|
|
|
|
};
|
2017-09-29 21:44:25 +03:00
|
|
|
}
|
2017-09-12 20:24:24 +03:00
|
|
|
|
2016-11-16 04:29:42 +03:00
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
}
|
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
/// Officially adds the block to our chain.
|
2016-11-16 04:29:42 +03:00
|
|
|
fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
|
2017-02-09 22:41:46 +03:00
|
|
|
ctx.store.save_block(b).map_err(&Error::StoreErr)?;
|
2016-12-21 04:35:04 +03:00
|
|
|
|
2017-04-28 07:59:53 +03:00
|
|
|
if !ctx.opts.intersects(SYNC) {
|
|
|
|
// broadcast the block
|
|
|
|
let adapter = ctx.adapter.clone();
|
|
|
|
adapter.block_accepted(b);
|
|
|
|
}
|
2016-12-21 04:35:04 +03:00
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
}
|
|
|
|
|
2017-02-08 00:50:01 +03:00
|
|
|
/// Officially adds the block header to our header chain.
|
|
|
|
fn add_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
2017-02-09 22:41:46 +03:00
|
|
|
ctx.store.save_block_header(bh).map_err(&Error::StoreErr)
|
2017-02-08 00:50:01 +03:00
|
|
|
}
|
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
/// Directly updates the head if we've just appended a new block to it or handle
|
|
|
|
/// the situation where we've just added enough work to have a fork with more
|
|
|
|
/// work than the head.
|
|
|
|
fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
|
|
|
|
// if we made a fork with more work than the head (which should also be true
|
|
|
|
// when extending the head), update it
|
2017-02-08 00:50:01 +03:00
|
|
|
let tip = Tip::from_block(&b.header);
|
2017-01-10 02:16:44 +03:00
|
|
|
if tip.total_difficulty > ctx.head.total_difficulty {
|
2017-04-28 07:59:53 +03:00
|
|
|
|
|
|
|
// update the block height index
|
2017-02-08 00:50:01 +03:00
|
|
|
ctx.store.setup_height(&b.header).map_err(&Error::StoreErr)?;
|
2017-04-28 07:59:53 +03:00
|
|
|
|
|
|
|
// in sync mode, only update the "body chain", otherwise update both the
|
|
|
|
// "header chain" and "body chain"
|
|
|
|
if ctx.opts.intersects(SYNC) {
|
|
|
|
ctx.store.save_body_head(&tip).map_err(&Error::StoreErr)?;
|
|
|
|
} else {
|
|
|
|
ctx.store.save_head(&tip).map_err(&Error::StoreErr)?;
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
// TODO if we're switching branch, make sure to backtrack the sum trees
|
2017-02-08 00:50:01 +03:00
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
ctx.head = tip.clone();
|
2017-01-10 07:30:02 +03:00
|
|
|
info!("Updated head to {} at {}.", b.hash(), b.header.height);
|
2017-01-10 02:16:44 +03:00
|
|
|
Ok(Some(tip))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
}
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
|
|
/// Directly updates the head if we've just appended a new block to it or handle
|
|
|
|
/// the situation where we've just added enough work to have a fork with more
|
|
|
|
/// work than the head.
|
|
|
|
fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
|
|
|
|
// if we made a fork with more work than the head (which should also be true
|
|
|
|
// when extending the head), update it
|
|
|
|
let tip = Tip::from_block(bh);
|
|
|
|
if tip.total_difficulty > ctx.head.total_difficulty {
|
|
|
|
ctx.store.save_header_head(&tip).map_err(&Error::StoreErr)?;
|
|
|
|
|
|
|
|
ctx.head = tip.clone();
|
2017-07-28 00:13:34 +03:00
|
|
|
info!(
|
|
|
|
"Updated block header head to {} at {}.",
|
|
|
|
bh.hash(),
|
|
|
|
bh.height
|
|
|
|
);
|
2017-02-08 00:50:01 +03:00
|
|
|
Ok(Some(tip))
|
|
|
|
} else {
|
|
|
|
Ok(None)
|
|
|
|
}
|
|
|
|
}
|