2018-03-05 22:33:44 +03:00
|
|
|
|
// Copyright 2018 The Grin Developers
|
2016-10-22 21:35:48 +03:00
|
|
|
|
//
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
|
//
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
|
//
|
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
2016-10-21 03:06:12 +03:00
|
|
|
|
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
|
use std::sync::{Arc, RwLock};
|
2016-12-19 02:51:54 +03:00
|
|
|
|
|
2016-11-27 23:31:15 +03:00
|
|
|
|
use time;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
2016-11-16 04:29:42 +03:00
|
|
|
|
use core::consensus;
|
2016-12-27 02:39:31 +03:00
|
|
|
|
use core::core::hash::{Hash, Hashed};
|
2017-11-14 03:45:10 +03:00
|
|
|
|
use core::core::target::Difficulty;
|
2018-05-30 23:57:13 +03:00
|
|
|
|
use core::core::{Block, BlockHeader};
|
|
|
|
|
use core::global;
|
2017-12-04 22:16:57 +03:00
|
|
|
|
use grin_store;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
use store;
|
2018-03-05 22:33:44 +03:00
|
|
|
|
use txhashset;
|
2018-05-30 23:57:13 +03:00
|
|
|
|
use types::*;
|
2017-10-12 19:56:44 +03:00
|
|
|
|
use util::LOGGER;
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
|
|
|
|
/// Contextual information required to process a new block and either reject or
|
|
|
|
|
/// accept it.
|
2016-12-19 02:51:54 +03:00
|
|
|
|
pub struct BlockContext {
|
2017-08-10 03:54:10 +03:00
|
|
|
|
/// The options
|
2017-07-04 02:46:25 +03:00
|
|
|
|
pub opts: Options,
|
2017-08-10 03:54:10 +03:00
|
|
|
|
/// The store
|
2017-07-04 02:46:25 +03:00
|
|
|
|
pub store: Arc<ChainStore>,
|
2017-08-10 03:54:10 +03:00
|
|
|
|
/// The head
|
2017-07-04 02:46:25 +03:00
|
|
|
|
pub head: Tip,
|
2017-08-22 21:23:54 +03:00
|
|
|
|
/// The POW verification function
|
2018-04-24 11:18:24 +03:00
|
|
|
|
pub pow_verifier: fn(&BlockHeader, u8) -> bool,
|
2017-09-28 02:46:32 +03:00
|
|
|
|
/// MMR sum tree states
|
2018-03-05 22:33:44 +03:00
|
|
|
|
pub txhashset: Arc<RwLock<txhashset::TxHashSet>>,
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2016-12-21 04:35:04 +03:00
|
|
|
|
/// Runs the block processing pipeline, including validation and finding a
|
|
|
|
|
/// place for the new block in the chain. Returns the new
|
|
|
|
|
/// chain head if updated.
|
2017-07-04 02:46:25 +03:00
|
|
|
|
pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
|
2016-10-21 03:06:12 +03:00
|
|
|
|
// TODO should just take a promise for a block with a full header so we don't
|
2017-12-19 00:18:36 +03:00
|
|
|
|
// spend resources reading the full block when its header is invalid
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
2017-11-18 23:34:05 +03:00
|
|
|
|
debug!(
|
2017-10-12 19:56:44 +03:00
|
|
|
|
LOGGER,
|
2018-01-31 23:39:55 +03:00
|
|
|
|
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
|
2017-07-28 00:13:34 +03:00
|
|
|
|
b.hash(),
|
|
|
|
|
b.header.height,
|
|
|
|
|
b.inputs.len(),
|
2018-01-31 23:39:55 +03:00
|
|
|
|
b.outputs.len(),
|
|
|
|
|
b.kernels.len(),
|
2017-07-28 00:13:34 +03:00
|
|
|
|
);
|
2017-07-04 02:46:25 +03:00
|
|
|
|
check_known(b.hash(), &mut ctx)?;
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
2017-10-18 10:19:44 +03:00
|
|
|
|
validate_header(&b.header, &mut ctx)?;
|
2017-09-12 20:24:24 +03:00
|
|
|
|
|
2018-05-07 16:21:41 +03:00
|
|
|
|
// now check we actually have the previous block in the store
|
2017-12-04 22:16:57 +03:00
|
|
|
|
// not just the header but the block itself
|
2018-02-10 01:32:16 +03:00
|
|
|
|
// short circuit the test first both for performance (in-mem vs db access)
|
|
|
|
|
// but also for the specific case of the first fast sync full block
|
|
|
|
|
if b.header.previous != ctx.head.last_block_h {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
// we cannot assume we can use the chain head for this as we may be dealing
|
|
|
|
|
// with a fork we cannot use heights here as the fork may have jumped in
|
|
|
|
|
// height
|
2018-02-10 01:32:16 +03:00
|
|
|
|
match ctx.store.block_exists(&b.header.previous) {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
Ok(true) => {}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
Ok(false) => {
|
|
|
|
|
return Err(Error::Orphan);
|
2018-03-04 03:19:54 +03:00
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
Err(e) => {
|
|
|
|
|
return Err(Error::StoreErr(e, "pipe get previous".to_owned()));
|
|
|
|
|
}
|
2017-12-04 22:16:57 +03:00
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
}
|
2017-12-04 22:16:57 +03:00
|
|
|
|
|
2018-05-07 16:21:41 +03:00
|
|
|
|
// validate the block itself
|
|
|
|
|
// we can do this now before interact with the txhashset
|
|
|
|
|
validate_block(b, &mut ctx)?;
|
|
|
|
|
|
|
|
|
|
// header and block both valid, and we have a previous block
|
|
|
|
|
// so take the lock on the txhashset
|
2018-03-05 22:33:44 +03:00
|
|
|
|
let local_txhashset = ctx.txhashset.clone();
|
|
|
|
|
let mut txhashset = local_txhashset.write().unwrap();
|
2017-10-22 10:11:45 +03:00
|
|
|
|
|
|
|
|
|
// update head now that we're in the lock
|
2017-11-01 02:32:33 +03:00
|
|
|
|
ctx.head = ctx.store
|
|
|
|
|
.head()
|
|
|
|
|
.map_err(|e| Error::StoreErr(e, "pipe reload head".to_owned()))?;
|
2017-10-22 10:11:45 +03:00
|
|
|
|
|
|
|
|
|
// start a chain extension unit of work dependent on the success of the
|
2017-12-19 00:18:36 +03:00
|
|
|
|
// internal validation and saving operations
|
2018-03-05 22:33:44 +03:00
|
|
|
|
let result = txhashset::extending(&mut txhashset, |mut extension| {
|
2018-05-30 23:57:13 +03:00
|
|
|
|
// First we rewind the txhashset extension if necessary
|
|
|
|
|
// to put it into a consistent state for validating the block.
|
|
|
|
|
// We can skip this step if the previous header is the latest header we saw.
|
|
|
|
|
if b.header.previous != ctx.head.last_block_h {
|
|
|
|
|
rewind_and_apply_fork(b, ctx.store.clone(), extension)?;
|
|
|
|
|
}
|
|
|
|
|
validate_block_via_txhashset(b, &mut extension)?;
|
|
|
|
|
|
2018-03-25 19:41:12 +03:00
|
|
|
|
trace!(
|
2017-10-12 19:56:44 +03:00
|
|
|
|
LOGGER,
|
2018-01-30 17:42:04 +03:00
|
|
|
|
"pipe: process_block: {} at {} is valid, save and append.",
|
2017-11-30 18:27:50 +03:00
|
|
|
|
b.hash(),
|
2017-09-28 02:46:32 +03:00
|
|
|
|
b.header.height,
|
|
|
|
|
);
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
|
add_block(b, &mut ctx)?;
|
|
|
|
|
let h = update_head(b, &mut ctx)?;
|
|
|
|
|
if h.is_none() {
|
|
|
|
|
extension.force_rollback();
|
|
|
|
|
}
|
|
|
|
|
Ok(h)
|
2018-03-03 12:08:36 +03:00
|
|
|
|
});
|
|
|
|
|
|
2018-04-24 22:53:01 +03:00
|
|
|
|
result
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
|
/// Process the block header.
|
|
|
|
|
/// This is only ever used during sync and uses a context based on sync_head.
|
|
|
|
|
pub fn sync_block_header(
|
|
|
|
|
bh: &BlockHeader,
|
|
|
|
|
mut sync_ctx: BlockContext,
|
|
|
|
|
mut header_ctx: BlockContext,
|
|
|
|
|
) -> Result<Option<Tip>, Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"pipe: sync_block_header: {} at {}",
|
|
|
|
|
bh.hash(),
|
|
|
|
|
bh.height
|
|
|
|
|
);
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
|
validate_header(&bh, &mut sync_ctx)?;
|
|
|
|
|
add_block_header(bh, &mut sync_ctx)?;
|
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
|
// now update the header_head (if new header with most work) and the sync_head
|
|
|
|
|
// (always)
|
2017-12-11 19:03:21 +03:00
|
|
|
|
update_header_head(bh, &mut header_ctx)?;
|
2017-12-04 22:16:57 +03:00
|
|
|
|
update_sync_head(bh, &mut sync_ctx)
|
2017-02-08 00:50:01 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
|
/// Process block header as part of "header first" block propagation.
|
2018-05-30 23:57:13 +03:00
|
|
|
|
/// We validate the header but we do not store it or update header head based
|
|
|
|
|
/// on this. We will update these once we get the block back after requesting
|
|
|
|
|
/// it.
|
2018-03-27 23:48:09 +03:00
|
|
|
|
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<(), Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
2018-03-29 18:56:46 +03:00
|
|
|
|
"pipe: process_block_header at {} [{}]",
|
|
|
|
|
bh.height,
|
|
|
|
|
bh.hash()
|
|
|
|
|
); // keep this
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
|
|
check_header_known(bh.hash(), &mut ctx)?;
|
2018-03-27 23:48:09 +03:00
|
|
|
|
validate_header(&bh, &mut ctx)
|
2018-01-30 17:42:04 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Quick in-memory check to fast-reject any block header we've already handled
|
|
|
|
|
/// recently. Keeps duplicates from the network in check.
|
|
|
|
|
/// ctx here is specific to the header_head (tip of the header chain)
|
|
|
|
|
fn check_header_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
|
|
|
|
// TODO ring buffer of the last few blocks that came through here
|
|
|
|
|
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
|
|
|
|
return Err(Error::Unfit("already known".to_string()));
|
|
|
|
|
}
|
|
|
|
|
if let Ok(h) = ctx.store.get_block_header(&bh) {
|
|
|
|
|
// there is a window where a block header can be saved but the chain head not
|
|
|
|
|
// updated yet, we plug that window here by re-accepting the block
|
|
|
|
|
if h.total_difficulty <= ctx.head.total_difficulty {
|
|
|
|
|
return Err(Error::Unfit("already in store".to_string()));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
2016-12-21 04:35:04 +03:00
|
|
|
|
/// Quick in-memory check to fast-reject any block we've already handled
|
|
|
|
|
/// recently. Keeps duplicates from the network in check.
|
|
|
|
|
fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
2017-01-10 02:16:44 +03:00
|
|
|
|
// TODO ring buffer of the last few blocks that came through here
|
2016-12-21 04:35:04 +03:00
|
|
|
|
if bh == ctx.head.last_block_h || bh == ctx.head.prev_block_h {
|
|
|
|
|
return Err(Error::Unfit("already known".to_string()));
|
|
|
|
|
}
|
2017-04-28 07:59:53 +03:00
|
|
|
|
if let Ok(b) = ctx.store.get_block(&bh) {
|
|
|
|
|
// there is a window where a block can be saved but the chain head not
|
2017-11-30 18:27:50 +03:00
|
|
|
|
// updated yet, we plug that window here by re-accepting the block
|
2017-04-28 07:59:53 +03:00
|
|
|
|
if b.header.total_difficulty <= ctx.head.total_difficulty {
|
|
|
|
|
return Err(Error::Unfit("already in store".to_string()));
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-12-21 04:35:04 +03:00
|
|
|
|
Ok(())
|
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
|
2017-12-16 06:19:04 +03:00
|
|
|
|
/// First level of block validation that only needs to act on the block header
|
2016-10-21 03:06:12 +03:00
|
|
|
|
/// to make it as cheap as possible. The different validations are also
|
|
|
|
|
/// arranged by order of cost to have as little DoS surface as possible.
|
2016-11-27 23:31:15 +03:00
|
|
|
|
/// TODO require only the block header (with length information)
|
2017-02-08 00:50:01 +03:00
|
|
|
|
fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
2017-10-10 03:08:17 +03:00
|
|
|
|
// check version, enforces scheduled hard fork
|
|
|
|
|
if !consensus::valid_header_version(header.height, header.version) {
|
2017-10-11 21:12:01 +03:00
|
|
|
|
error!(
|
2017-10-12 19:56:44 +03:00
|
|
|
|
LOGGER,
|
2018-03-04 03:19:54 +03:00
|
|
|
|
"Invalid block header version received ({}), maybe update Grin?", header.version
|
2017-10-11 21:12:01 +03:00
|
|
|
|
);
|
2017-10-10 03:08:17 +03:00
|
|
|
|
return Err(Error::InvalidBlockVersion(header.version));
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-26 14:07:04 +03:00
|
|
|
|
// TODO: remove CI check from here somehow
|
2017-11-01 02:32:33 +03:00
|
|
|
|
if header.timestamp
|
|
|
|
|
> time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
|
2018-03-26 14:07:04 +03:00
|
|
|
|
&& !global::is_automated_testing_mode()
|
2017-10-17 00:23:10 +03:00
|
|
|
|
{
|
2017-10-10 03:08:17 +03:00
|
|
|
|
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
|
2017-12-16 06:19:04 +03:00
|
|
|
|
// TODO add warning in p2p code if local time is too different from peers
|
2017-10-10 03:08:17 +03:00
|
|
|
|
return Err(Error::InvalidBlockTime);
|
|
|
|
|
}
|
|
|
|
|
|
2018-02-05 22:43:54 +03:00
|
|
|
|
if !ctx.opts.contains(Options::SKIP_POW) {
|
2018-04-24 11:18:24 +03:00
|
|
|
|
let n = global::sizeshift();
|
2018-01-19 01:47:42 +03:00
|
|
|
|
if !(ctx.pow_verifier)(header, n) {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
error!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"pipe: validate_header failed for cuckoo shift size {}", n
|
|
|
|
|
);
|
2017-10-10 03:08:17 +03:00
|
|
|
|
return Err(Error::InvalidPow);
|
|
|
|
|
}
|
2018-01-19 01:47:42 +03:00
|
|
|
|
if header.height % 500 == 0 {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"Validating header validated, using cuckoo shift size {}", n
|
|
|
|
|
);
|
2018-01-19 01:47:42 +03:00
|
|
|
|
}
|
2017-10-10 03:08:17 +03:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// first I/O cost, better as late as possible
|
2017-12-14 00:52:21 +03:00
|
|
|
|
let prev = match ctx.store.get_block_header(&header.previous) {
|
|
|
|
|
Ok(prev) => Ok(prev),
|
|
|
|
|
Err(grin_store::Error::NotFoundErr) => Err(Error::Orphan),
|
2018-03-04 03:19:54 +03:00
|
|
|
|
Err(e) => Err(Error::StoreErr(
|
|
|
|
|
e,
|
|
|
|
|
format!("previous header {}", header.previous),
|
|
|
|
|
)),
|
2017-12-14 00:52:21 +03:00
|
|
|
|
}?;
|
2016-11-17 04:03:23 +03:00
|
|
|
|
|
2018-03-15 22:16:34 +03:00
|
|
|
|
// make sure this header has a height exactly one higher than the previous
|
|
|
|
|
// header
|
2017-01-10 02:16:44 +03:00
|
|
|
|
if header.height != prev.height + 1 {
|
|
|
|
|
return Err(Error::InvalidBlockHeight);
|
|
|
|
|
}
|
2017-12-16 06:19:04 +03:00
|
|
|
|
|
|
|
|
|
// TODO - get rid of the automated testing mode check here somehow
|
2017-09-29 21:44:25 +03:00
|
|
|
|
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
|
2016-11-30 05:45:39 +03:00
|
|
|
|
// prevent time warp attacks and some timestamp manipulations by forcing strict
|
2017-12-14 00:52:21 +03:00
|
|
|
|
// time progression (but not in CI mode)
|
2016-11-17 04:03:23 +03:00
|
|
|
|
return Err(Error::InvalidBlockTime);
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-15 22:16:34 +03:00
|
|
|
|
// verify the proof of work and related parameters
|
|
|
|
|
// at this point we have a previous block header
|
|
|
|
|
// we know the height increased by one
|
|
|
|
|
// so now we can check the total_difficulty increase is also valid
|
|
|
|
|
// check the pow hash shows a difficulty at least as large
|
|
|
|
|
// as the target difficulty
|
2018-02-05 22:43:54 +03:00
|
|
|
|
if !ctx.opts.contains(Options::SKIP_POW) {
|
2018-03-15 22:16:34 +03:00
|
|
|
|
if header.total_difficulty.clone() <= prev.total_difficulty.clone() {
|
|
|
|
|
return Err(Error::DifficultyTooLow);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let target_difficulty = header.total_difficulty.clone() - prev.total_difficulty.clone();
|
|
|
|
|
|
|
|
|
|
if header.pow.clone().to_difficulty() < target_difficulty {
|
|
|
|
|
return Err(Error::DifficultyTooLow);
|
|
|
|
|
}
|
2016-12-27 02:39:31 +03:00
|
|
|
|
|
2017-11-14 03:45:10 +03:00
|
|
|
|
// explicit check to ensure we are not below the minimum difficulty
|
|
|
|
|
// we will also check difficulty based on next_difficulty later on
|
2018-03-15 22:16:34 +03:00
|
|
|
|
if target_difficulty < Difficulty::one() {
|
2017-11-14 03:45:10 +03:00
|
|
|
|
return Err(Error::DifficultyTooLow);
|
|
|
|
|
}
|
|
|
|
|
|
2017-12-16 06:19:04 +03:00
|
|
|
|
// explicit check to ensure total_difficulty has increased by exactly
|
2018-01-12 21:35:37 +03:00
|
|
|
|
// the _network_ difficulty of the previous block
|
|
|
|
|
// (during testnet1 we use _block_ difficulty here)
|
2018-03-15 22:16:34 +03:00
|
|
|
|
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
|
|
|
|
|
let network_difficulty =
|
|
|
|
|
consensus::next_difficulty(diff_iter).map_err(|e| Error::Other(e.to_string()))?;
|
|
|
|
|
if target_difficulty != network_difficulty.clone() {
|
2018-01-12 21:35:37 +03:00
|
|
|
|
error!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"validate_header: BANNABLE OFFENCE: header cumulative difficulty {} != {}",
|
2018-03-15 22:16:34 +03:00
|
|
|
|
target_difficulty.into_num(),
|
|
|
|
|
prev.total_difficulty.into_num() + network_difficulty.into_num()
|
2018-01-12 21:35:37 +03:00
|
|
|
|
);
|
2017-01-10 07:30:02 +03:00
|
|
|
|
return Err(Error::WrongTotalDifficulty);
|
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
2016-11-17 04:03:23 +03:00
|
|
|
|
|
2016-11-16 04:29:42 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2018-05-07 16:21:41 +03:00
|
|
|
|
fn validate_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
|
|
|
|
|
// If this is the first block then we have no previous block sums stored.
|
|
|
|
|
let block_sums = if b.header.height == 1 {
|
|
|
|
|
BlockSums::default()
|
|
|
|
|
} else {
|
|
|
|
|
ctx.store.get_block_sums(&b.header.previous)?
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let (new_output_sum, new_kernel_sum) =
|
|
|
|
|
b.validate(&block_sums.output_sum, &block_sums.kernel_sum)
|
|
|
|
|
.map_err(&Error::InvalidBlockProof)?;
|
|
|
|
|
|
|
|
|
|
ctx.store.save_block_sums(
|
|
|
|
|
&b.hash(),
|
|
|
|
|
&BlockSums {
|
|
|
|
|
output_sum: new_output_sum,
|
|
|
|
|
kernel_sum: new_kernel_sum,
|
|
|
|
|
},
|
|
|
|
|
)?;
|
|
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Fully validate the block by applying it to the txhashset extension
|
|
|
|
|
/// and checking the roots.
|
|
|
|
|
/// Rewind and reapply forked blocks if necessary to put the txhashset extension
|
|
|
|
|
/// in the correct state to accept the block.
|
2018-05-30 23:57:13 +03:00
|
|
|
|
fn validate_block_via_txhashset(b: &Block, ext: &mut txhashset::Extension) -> Result<(), Error> {
|
|
|
|
|
// First check we are not attempting to spend any coinbase outputs
|
|
|
|
|
// before they have matured sufficiently.
|
|
|
|
|
ext.verify_coinbase_maturity(&b.inputs, b.header.height)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
|
|
2018-01-07 22:01:17 +03:00
|
|
|
|
// apply the new block to the MMR trees and check the new root hashes
|
|
|
|
|
ext.apply_block(&b)?;
|
|
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
|
let roots = ext.roots();
|
2018-03-05 22:33:44 +03:00
|
|
|
|
if roots.output_root != b.header.output_root || roots.rproof_root != b.header.range_proof_root
|
2018-02-22 16:45:13 +03:00
|
|
|
|
|| roots.kernel_root != b.header.kernel_root
|
2017-10-17 00:23:10 +03:00
|
|
|
|
{
|
2017-10-22 10:11:45 +03:00
|
|
|
|
ext.dump(false);
|
2017-11-15 23:37:40 +03:00
|
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
2018-05-07 16:21:41 +03:00
|
|
|
|
"validate_block_via_txhashset: output roots - {:?}, {:?}",
|
|
|
|
|
roots.output_root,
|
|
|
|
|
b.header.output_root,
|
2017-11-15 23:37:40 +03:00
|
|
|
|
);
|
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
2018-05-07 16:21:41 +03:00
|
|
|
|
"validate_block_via_txhashset: rproof roots - {:?}, {:?}",
|
2018-02-22 16:45:13 +03:00
|
|
|
|
roots.rproof_root,
|
2017-11-15 23:37:40 +03:00
|
|
|
|
b.header.range_proof_root,
|
|
|
|
|
);
|
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
2018-05-07 16:21:41 +03:00
|
|
|
|
"validate_block_via_txhashset: kernel roots - {:?}, {:?}",
|
|
|
|
|
roots.kernel_root,
|
|
|
|
|
b.header.kernel_root,
|
2017-11-15 23:37:40 +03:00
|
|
|
|
);
|
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
|
return Err(Error::InvalidRoot);
|
|
|
|
|
}
|
2017-04-28 07:59:53 +03:00
|
|
|
|
|
2016-11-16 04:29:42 +03:00
|
|
|
|
Ok(())
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
|
/// Officially adds the block to our chain.
|
2016-11-16 04:29:42 +03:00
|
|
|
|
fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
|
ctx.store
|
|
|
|
|
.save_block(b)
|
|
|
|
|
.map_err(|e| Error::StoreErr(e, "pipe save block".to_owned()))
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-02-08 00:50:01 +03:00
|
|
|
|
/// Officially adds the block header to our header chain.
|
|
|
|
|
fn add_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
|
ctx.store
|
|
|
|
|
.save_block_header(bh)
|
|
|
|
|
.map_err(|e| Error::StoreErr(e, "pipe save header".to_owned()))
|
2017-02-08 00:50:01 +03:00
|
|
|
|
}
|
|
|
|
|
|
2017-01-10 02:16:44 +03:00
|
|
|
|
/// Directly updates the head if we've just appended a new block to it or handle
|
|
|
|
|
/// the situation where we've just added enough work to have a fork with more
|
|
|
|
|
/// work than the head.
|
|
|
|
|
fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
|
|
|
|
|
// if we made a fork with more work than the head (which should also be true
|
2017-11-30 18:27:50 +03:00
|
|
|
|
// when extending the head), update it
|
2017-02-08 00:50:01 +03:00
|
|
|
|
let tip = Tip::from_block(&b.header);
|
2017-01-10 02:16:44 +03:00
|
|
|
|
if tip.total_difficulty > ctx.head.total_difficulty {
|
2017-04-28 07:59:53 +03:00
|
|
|
|
// update the block height index
|
2017-11-01 02:32:33 +03:00
|
|
|
|
ctx.store
|
2017-12-16 03:27:37 +03:00
|
|
|
|
.setup_height(&b.header, &ctx.head)
|
2017-11-01 02:32:33 +03:00
|
|
|
|
.map_err(|e| Error::StoreErr(e, "pipe setup height".to_owned()))?;
|
|
|
|
|
|
|
|
|
|
// in sync mode, only update the "body chain", otherwise update both the
|
2017-11-30 18:27:50 +03:00
|
|
|
|
// "header chain" and "body chain", updating the header chain in sync resets
|
|
|
|
|
// all additional "future" headers we've received
|
2018-02-05 22:43:54 +03:00
|
|
|
|
if ctx.opts.contains(Options::SYNC) {
|
2017-11-01 02:32:33 +03:00
|
|
|
|
ctx.store
|
|
|
|
|
.save_body_head(&tip)
|
|
|
|
|
.map_err(|e| Error::StoreErr(e, "pipe save body".to_owned()))?;
|
|
|
|
|
} else {
|
|
|
|
|
ctx.store
|
|
|
|
|
.save_head(&tip)
|
|
|
|
|
.map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?;
|
|
|
|
|
}
|
2017-01-10 02:16:44 +03:00
|
|
|
|
ctx.head = tip.clone();
|
2018-01-19 01:47:42 +03:00
|
|
|
|
if b.header.height % 100 == 0 {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
info!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"pipe: chain head reached {} @ {} [{}]",
|
|
|
|
|
b.header.height,
|
2018-03-15 22:16:34 +03:00
|
|
|
|
b.header.total_difficulty,
|
2018-03-04 03:19:54 +03:00
|
|
|
|
b.hash()
|
|
|
|
|
);
|
2018-01-19 01:47:42 +03:00
|
|
|
|
} else {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"pipe: chain head reached {} @ {} [{}]",
|
|
|
|
|
b.header.height,
|
2018-03-15 22:16:34 +03:00
|
|
|
|
b.header.total_difficulty,
|
2018-03-04 03:19:54 +03:00
|
|
|
|
b.hash()
|
|
|
|
|
);
|
2017-12-18 16:17:11 +03:00
|
|
|
|
}
|
2017-01-10 02:16:44 +03:00
|
|
|
|
Ok(Some(tip))
|
|
|
|
|
} else {
|
|
|
|
|
Ok(None)
|
|
|
|
|
}
|
2016-10-21 03:06:12 +03:00
|
|
|
|
}
|
2017-02-08 00:50:01 +03:00
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
|
/// Update the sync head so we can keep syncing from where we left off.
|
|
|
|
|
fn update_sync_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
|
|
|
|
|
let tip = Tip::from_block(bh);
|
|
|
|
|
ctx.store
|
|
|
|
|
.save_sync_head(&tip)
|
|
|
|
|
.map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?;
|
|
|
|
|
ctx.head = tip.clone();
|
2018-01-19 01:47:42 +03:00
|
|
|
|
if bh.height % 100 == 0 {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
info!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"sync head {} @ {} [{}]",
|
|
|
|
|
bh.total_difficulty,
|
|
|
|
|
bh.height,
|
|
|
|
|
bh.hash()
|
|
|
|
|
);
|
2018-01-19 01:47:42 +03:00
|
|
|
|
} else {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"sync head {} @ {} [{}]",
|
|
|
|
|
bh.total_difficulty,
|
|
|
|
|
bh.height,
|
|
|
|
|
bh.hash()
|
|
|
|
|
);
|
2017-12-18 16:17:11 +03:00
|
|
|
|
}
|
2017-12-04 22:16:57 +03:00
|
|
|
|
Ok(Some(tip))
|
|
|
|
|
}
|
|
|
|
|
|
2017-02-08 00:50:01 +03:00
|
|
|
|
fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
|
|
|
|
|
let tip = Tip::from_block(bh);
|
|
|
|
|
if tip.total_difficulty > ctx.head.total_difficulty {
|
2017-11-01 02:32:33 +03:00
|
|
|
|
ctx.store
|
|
|
|
|
.save_header_head(&tip)
|
|
|
|
|
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
|
2017-02-08 00:50:01 +03:00
|
|
|
|
ctx.head = tip.clone();
|
2018-01-19 01:47:42 +03:00
|
|
|
|
if bh.height % 100 == 0 {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
info!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"header head {} @ {} [{}]",
|
|
|
|
|
bh.total_difficulty,
|
|
|
|
|
bh.height,
|
|
|
|
|
bh.hash()
|
|
|
|
|
);
|
2018-01-19 01:47:42 +03:00
|
|
|
|
} else {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
debug!(
|
|
|
|
|
LOGGER,
|
|
|
|
|
"header head {} @ {} [{}]",
|
|
|
|
|
bh.total_difficulty,
|
|
|
|
|
bh.height,
|
|
|
|
|
bh.hash()
|
|
|
|
|
);
|
2018-01-19 01:47:42 +03:00
|
|
|
|
}
|
2017-02-08 00:50:01 +03:00
|
|
|
|
Ok(Some(tip))
|
|
|
|
|
} else {
|
|
|
|
|
Ok(None)
|
|
|
|
|
}
|
|
|
|
|
}
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2018-01-08 04:23:23 +03:00
|
|
|
|
/// Utility function to handle forks. From the forked block, jump backward
|
2018-03-05 22:33:44 +03:00
|
|
|
|
/// to find to fork root. Rewind the txhashset to the root and apply all the
|
|
|
|
|
/// forked blocks prior to the one being processed to set the txhashset in
|
2018-01-08 04:23:23 +03:00
|
|
|
|
/// the expected state.
|
|
|
|
|
pub fn rewind_and_apply_fork(
|
2018-01-07 22:01:17 +03:00
|
|
|
|
b: &Block,
|
|
|
|
|
store: Arc<ChainStore>,
|
2018-03-05 22:33:44 +03:00
|
|
|
|
ext: &mut txhashset::Extension,
|
2018-01-07 22:01:17 +03:00
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
|
// extending a fork, first identify the block where forking occurred
|
|
|
|
|
// keeping the hashes of blocks along the fork
|
|
|
|
|
let mut current = b.header.previous;
|
|
|
|
|
let mut hashes = vec![];
|
|
|
|
|
loop {
|
|
|
|
|
let curr_header = store.get_block_header(¤t)?;
|
|
|
|
|
|
|
|
|
|
if let Ok(_) = store.is_on_current_chain(&curr_header) {
|
|
|
|
|
break;
|
|
|
|
|
} else {
|
2018-03-27 18:11:21 +03:00
|
|
|
|
hashes.insert(0, (curr_header.height, curr_header.hash()));
|
2018-01-07 22:01:17 +03:00
|
|
|
|
current = curr_header.previous;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2018-03-09 00:36:51 +03:00
|
|
|
|
let forked_block = store.get_block_header(¤t)?;
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
|
trace!(
|
2018-01-07 22:01:17 +03:00
|
|
|
|
LOGGER,
|
2018-03-27 18:11:21 +03:00
|
|
|
|
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
|
2018-03-09 00:36:51 +03:00
|
|
|
|
forked_block.height,
|
|
|
|
|
forked_block.hash(),
|
2018-03-27 18:11:21 +03:00
|
|
|
|
b.header.height,
|
|
|
|
|
b.header.hash()
|
2018-01-19 01:47:42 +03:00
|
|
|
|
);
|
2018-01-07 22:01:17 +03:00
|
|
|
|
|
|
|
|
|
// rewind the sum trees up to the forking block
|
|
|
|
|
ext.rewind(&forked_block)?;
|
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
|
trace!(
|
2018-03-27 18:11:21 +03:00
|
|
|
|
LOGGER,
|
2018-05-30 23:57:13 +03:00
|
|
|
|
"rewind_and_apply_fork: blocks on fork: {:?}",
|
|
|
|
|
hashes,
|
2018-03-27 18:11:21 +03:00
|
|
|
|
);
|
|
|
|
|
|
2018-01-07 22:01:17 +03:00
|
|
|
|
// apply all forked blocks, including this new one
|
2018-03-27 18:11:21 +03:00
|
|
|
|
for (_, h) in hashes {
|
2018-03-04 03:19:54 +03:00
|
|
|
|
let fb = store
|
|
|
|
|
.get_block(&h)
|
|
|
|
|
.map_err(|e| Error::StoreErr(e, format!("getting forked blocks")))?;
|
2018-01-07 22:01:17 +03:00
|
|
|
|
ext.apply_block(&fb)?;
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|