2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2017-07-04 02:46:25 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
//! Facade and handler for the rest of the blockchain implementation
|
|
|
|
//! and mostly the chain pipeline.
|
|
|
|
|
2017-12-22 01:29:24 +03:00
|
|
|
use std::collections::HashMap;
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
2017-09-28 02:46:32 +03:00
|
|
|
use std::sync::{Arc, Mutex, RwLock};
|
2017-12-29 03:49:27 +03:00
|
|
|
use std::time::{Duration, Instant};
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
use core::core::{Block, BlockHeader, Input, OutputFeatures, OutputIdentifier, OutputStoreable,
|
|
|
|
TxKernel};
|
2018-02-22 16:45:13 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-03-02 23:47:27 +03:00
|
|
|
use core::core::pmmr::MerkleProof;
|
2017-07-04 02:46:25 +03:00
|
|
|
use core::core::target::Difficulty;
|
2018-03-02 23:47:27 +03:00
|
|
|
use core::global;
|
2017-08-29 19:32:45 +03:00
|
|
|
use grin_store::Error::NotFoundErr;
|
2017-07-04 02:46:25 +03:00
|
|
|
use pipe;
|
|
|
|
use store;
|
2018-03-05 22:33:44 +03:00
|
|
|
use txhashset;
|
2017-07-04 02:46:25 +03:00
|
|
|
use types::*;
|
2018-02-22 16:45:13 +03:00
|
|
|
use util::secp::pedersen::RangeProof;
|
2017-10-12 19:56:44 +03:00
|
|
|
use util::LOGGER;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2017-12-29 03:49:27 +03:00
|
|
|
const MAX_ORPHAN_AGE_SECS: u64 = 30;
|
2017-12-22 01:29:24 +03:00
|
|
|
|
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
struct Orphan {
|
|
|
|
block: Block,
|
|
|
|
opts: Options,
|
2017-12-29 03:49:27 +03:00
|
|
|
added: Instant,
|
2017-12-22 01:29:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
struct OrphanBlockPool {
|
|
|
|
// blocks indexed by their hash
|
|
|
|
orphans: RwLock<HashMap<Hash, Orphan>>,
|
|
|
|
// additional index of previous -> hash
|
|
|
|
// so we can efficiently identify a child block (ex-orphan) after processing a block
|
|
|
|
prev_idx: RwLock<HashMap<Hash, Hash>>,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl OrphanBlockPool {
|
|
|
|
fn new() -> OrphanBlockPool {
|
|
|
|
OrphanBlockPool {
|
|
|
|
orphans: RwLock::new(HashMap::new()),
|
|
|
|
prev_idx: RwLock::new(HashMap::new()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn len(&self) -> usize {
|
|
|
|
let orphans = self.orphans.read().unwrap();
|
|
|
|
orphans.len()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn add(&self, orphan: Orphan) {
|
|
|
|
{
|
|
|
|
let mut orphans = self.orphans.write().unwrap();
|
|
|
|
let mut prev_idx = self.prev_idx.write().unwrap();
|
|
|
|
orphans.insert(orphan.block.hash(), orphan.clone());
|
|
|
|
prev_idx.insert(orphan.block.header.previous, orphan.block.hash());
|
|
|
|
}
|
|
|
|
|
2017-12-29 03:49:27 +03:00
|
|
|
{
|
|
|
|
let mut orphans = self.orphans.write().unwrap();
|
|
|
|
let mut prev_idx = self.prev_idx.write().unwrap();
|
2018-03-04 03:19:54 +03:00
|
|
|
orphans.retain(|_, ref mut x| {
|
|
|
|
x.added.elapsed() < Duration::from_secs(MAX_ORPHAN_AGE_SECS)
|
|
|
|
});
|
2017-12-29 03:49:27 +03:00
|
|
|
prev_idx.retain(|_, &mut x| orphans.contains_key(&x));
|
2017-12-22 01:29:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn remove(&self, hash: &Hash) -> Option<Orphan> {
|
|
|
|
let mut orphans = self.orphans.write().unwrap();
|
|
|
|
let mut prev_idx = self.prev_idx.write().unwrap();
|
|
|
|
let orphan = orphans.remove(hash);
|
|
|
|
if let Some(x) = orphan.clone() {
|
|
|
|
prev_idx.remove(&x.block.header.previous);
|
|
|
|
}
|
|
|
|
orphan
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get an orphan from the pool indexed by the hash of its parent
|
|
|
|
fn get_by_previous(&self, hash: &Hash) -> Option<Orphan> {
|
|
|
|
let orphans = self.orphans.read().unwrap();
|
|
|
|
let prev_idx = self.prev_idx.read().unwrap();
|
|
|
|
if let Some(hash) = prev_idx.get(hash) {
|
|
|
|
orphans.get(hash).cloned()
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn contains(&self, hash: &Hash) -> bool {
|
|
|
|
let orphans = self.orphans.read().unwrap();
|
|
|
|
orphans.contains_key(hash)
|
|
|
|
}
|
|
|
|
}
|
2017-07-18 23:57:09 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Facade to the blockchain block processing pipeline and storage. Provides
|
2018-03-05 22:33:44 +03:00
|
|
|
/// the current view of the TxHashSet according to the chain state. Also
|
2017-07-04 02:46:25 +03:00
|
|
|
/// maintains locking for the pipeline to avoid conflicting processing.
|
|
|
|
pub struct Chain {
|
2018-02-10 01:32:16 +03:00
|
|
|
db_root: String,
|
2017-07-04 02:46:25 +03:00
|
|
|
store: Arc<ChainStore>,
|
|
|
|
adapter: Arc<ChainAdapter>,
|
2017-07-27 22:08:48 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
head: Arc<Mutex<Tip>>,
|
2017-12-22 01:29:24 +03:00
|
|
|
orphans: Arc<OrphanBlockPool>,
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset: Arc<RwLock<txhashset::TxHashSet>>,
|
2017-08-22 21:23:54 +03:00
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
// POW verification function
|
2017-08-22 21:23:54 +03:00
|
|
|
pow_verifier: fn(&BlockHeader, u32) -> bool,
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl Sync for Chain {}
|
|
|
|
unsafe impl Send for Chain {}
|
|
|
|
|
|
|
|
impl Chain {
|
2017-08-22 21:23:54 +03:00
|
|
|
/// Check whether the chain exists. If not, the call to 'init' will
|
|
|
|
/// expect an already mined genesis block. This keeps the chain free
|
|
|
|
/// from needing to know about the mining implementation
|
2017-09-29 21:44:25 +03:00
|
|
|
pub fn chain_exists(db_root: String) -> bool {
|
2017-08-22 21:23:54 +03:00
|
|
|
let chain_store = store::ChainKVStore::new(db_root).unwrap();
|
|
|
|
match chain_store.head() {
|
2017-09-29 21:44:25 +03:00
|
|
|
Ok(_) => true,
|
2017-08-29 19:32:45 +03:00
|
|
|
Err(NotFoundErr) => false,
|
2017-08-22 21:23:54 +03:00
|
|
|
Err(_) => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-11-13 18:24:49 +03:00
|
|
|
/// Initializes the blockchain and returns a new Chain instance. Does a check
|
2017-07-04 02:46:25 +03:00
|
|
|
/// on the current chain head to make sure it exists and creates one based
|
2017-11-13 18:24:49 +03:00
|
|
|
/// on the genesis block if necessary.
|
2017-10-13 07:45:07 +03:00
|
|
|
pub fn init(
|
|
|
|
db_root: String,
|
|
|
|
adapter: Arc<ChainAdapter>,
|
2017-11-20 20:35:52 +03:00
|
|
|
genesis: Block,
|
2017-10-13 07:45:07 +03:00
|
|
|
pow_verifier: fn(&BlockHeader, u32) -> bool,
|
|
|
|
) -> Result<Chain, Error> {
|
2017-09-28 02:46:32 +03:00
|
|
|
let chain_store = store::ChainKVStore::new(db_root.clone())?;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2018-03-01 06:08:35 +03:00
|
|
|
let store = Arc::new(chain_store);
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
// check if we have a head in store, otherwise the genesis block is it
|
2018-03-03 12:08:36 +03:00
|
|
|
let head = store.head();
|
2018-03-05 22:33:44 +03:00
|
|
|
let txhashset_md = match head {
|
2018-03-09 20:17:48 +03:00
|
|
|
Ok(h) => {
|
|
|
|
// Add the height to the metadata for the use of the rewind log, as this isn't
|
|
|
|
// stored
|
|
|
|
let mut ts = store.get_block_pmmr_file_metadata(&h.last_block_h)?;
|
|
|
|
ts.output_file_md.block_height = h.height;
|
|
|
|
ts.rproof_file_md.block_height = h.height;
|
|
|
|
ts.kernel_file_md.block_height = h.height;
|
|
|
|
Some(ts)
|
|
|
|
}
|
2018-03-03 12:08:36 +03:00
|
|
|
Err(NotFoundErr) => None,
|
|
|
|
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
|
|
|
|
};
|
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
let mut txhashset =
|
|
|
|
txhashset::TxHashSet::open(db_root.clone(), store.clone(), txhashset_md)?;
|
2018-03-03 12:08:36 +03:00
|
|
|
|
|
|
|
let head = store.head();
|
|
|
|
let head = match head {
|
|
|
|
Ok(h) => h,
|
2017-08-29 19:32:45 +03:00
|
|
|
Err(NotFoundErr) => {
|
2017-12-16 03:27:37 +03:00
|
|
|
let tip = Tip::new(genesis.hash());
|
2018-03-01 06:08:35 +03:00
|
|
|
store.save_block(&genesis)?;
|
|
|
|
store.setup_height(&genesis.header, &tip)?;
|
|
|
|
if genesis.kernels.len() > 0 {
|
2018-03-06 20:58:33 +03:00
|
|
|
txhashset::extending(&mut txhashset, |extension| {
|
|
|
|
extension.apply_block(&genesis)
|
|
|
|
})?;
|
2018-03-01 06:08:35 +03:00
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
|
|
// saving a new tip based on genesis
|
2018-03-01 06:08:35 +03:00
|
|
|
store.save_head(&tip)?;
|
2017-11-13 18:24:49 +03:00
|
|
|
info!(
|
|
|
|
LOGGER,
|
2017-11-20 20:35:52 +03:00
|
|
|
"Saved genesis block: {:?}, nonce: {:?}, pow: {:?}",
|
|
|
|
genesis.hash(),
|
|
|
|
genesis.header.nonce,
|
|
|
|
genesis.header.pow,
|
2017-11-13 18:24:49 +03:00
|
|
|
);
|
2018-03-05 22:33:44 +03:00
|
|
|
pipe::save_pmmr_metadata(&tip, &txhashset, store.clone())?;
|
2017-07-04 02:46:25 +03:00
|
|
|
tip
|
|
|
|
}
|
2017-10-22 10:11:45 +03:00
|
|
|
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
|
2017-07-04 02:46:25 +03:00
|
|
|
};
|
|
|
|
|
2017-12-29 03:49:27 +03:00
|
|
|
// Reset sync_head and header_head to head of current chain.
|
|
|
|
// Make sure sync_head is available for later use when needed.
|
2018-03-01 06:08:35 +03:00
|
|
|
store.reset_head()?;
|
2017-12-04 22:16:57 +03:00
|
|
|
|
2018-03-01 06:08:35 +03:00
|
|
|
debug!(LOGGER, "Chain init: {:?}", head);
|
2017-08-29 19:32:45 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
Ok(Chain {
|
2018-02-10 01:32:16 +03:00
|
|
|
db_root: db_root,
|
2017-09-28 02:46:32 +03:00
|
|
|
store: store,
|
2017-07-04 02:46:25 +03:00
|
|
|
adapter: adapter,
|
|
|
|
head: Arc::new(Mutex::new(head)),
|
2017-12-22 01:29:24 +03:00
|
|
|
orphans: Arc::new(OrphanBlockPool::new()),
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset: Arc::new(RwLock::new(txhashset)),
|
2017-08-22 21:23:54 +03:00
|
|
|
pow_verifier: pow_verifier,
|
2017-07-04 02:46:25 +03:00
|
|
|
})
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
|
|
|
/// Processes a single block, then checks for orphans, processing
|
|
|
|
/// those as well if they're found
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn process_block(
|
|
|
|
&self,
|
|
|
|
b: Block,
|
|
|
|
opts: Options,
|
|
|
|
) -> Result<(Option<Tip>, Option<Block>), Error> {
|
|
|
|
let res = self.process_block_no_orphans(b, opts);
|
|
|
|
match res {
|
|
|
|
Ok((t, b)) => {
|
|
|
|
// We accepted a block, so see if we can accept any orphans
|
|
|
|
if let Some(ref b) = b {
|
|
|
|
self.check_orphans(b.hash());
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
Ok((t, b))
|
2018-01-24 21:20:34 +03:00
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
Err(e) => Err(e),
|
2018-01-24 21:20:34 +03:00
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
|
|
/// Attempt to add a new block to the chain. Returns the new chain tip if it
|
|
|
|
/// has been added to the longest chain, None if it's added to an (as of
|
2017-07-27 22:08:48 +03:00
|
|
|
/// now) orphan chain.
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn process_block_no_orphans(
|
|
|
|
&self,
|
|
|
|
b: Block,
|
|
|
|
opts: Options,
|
|
|
|
) -> Result<(Option<Tip>, Option<Block>), Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
let head = self.store
|
|
|
|
.head()
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?;
|
2017-07-04 02:46:25 +03:00
|
|
|
let ctx = self.ctx_from_head(head, opts);
|
|
|
|
|
2017-07-27 22:08:48 +03:00
|
|
|
let res = pipe::process_block(&b, ctx);
|
|
|
|
|
2017-07-28 00:13:34 +03:00
|
|
|
match res {
|
|
|
|
Ok(Some(ref tip)) => {
|
|
|
|
// block got accepted and extended the head, updating our head
|
|
|
|
let chain_head = self.head.clone();
|
2017-07-28 02:47:33 +03:00
|
|
|
{
|
|
|
|
let mut head = chain_head.lock().unwrap();
|
|
|
|
*head = tip.clone();
|
|
|
|
}
|
2017-10-15 23:38:41 +03:00
|
|
|
|
|
|
|
// notifying other parts of the system of the update
|
2018-02-05 22:43:54 +03:00
|
|
|
if !opts.contains(Options::SYNC) {
|
2017-10-15 23:38:41 +03:00
|
|
|
// broadcast the block
|
|
|
|
let adapter = self.adapter.clone();
|
2018-01-30 17:42:04 +03:00
|
|
|
adapter.block_accepted(&b, opts);
|
2017-10-15 23:38:41 +03:00
|
|
|
}
|
2018-01-24 21:20:34 +03:00
|
|
|
Ok((Some(tip.clone()), Some(b.clone())))
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-12-19 00:18:36 +03:00
|
|
|
Ok(None) => {
|
|
|
|
// block got accepted but we did not extend the head
|
|
|
|
// so its on a fork (or is the start of a new fork)
|
|
|
|
// broadcast the block out so everyone knows about the fork
|
|
|
|
//
|
|
|
|
// TODO - This opens us to an amplification attack on blocks
|
|
|
|
// mined at a low difficulty. We should suppress really old blocks
|
|
|
|
// or less relevant blocks somehow.
|
2018-03-04 03:19:54 +03:00
|
|
|
// We should also probably consider banning nodes that send us really old
|
|
|
|
// blocks.
|
2017-12-19 00:18:36 +03:00
|
|
|
//
|
2018-02-05 22:43:54 +03:00
|
|
|
if !opts.contains(Options::SYNC) {
|
2017-12-19 00:18:36 +03:00
|
|
|
// broadcast the block
|
|
|
|
let adapter = self.adapter.clone();
|
2018-01-30 17:42:04 +03:00
|
|
|
adapter.block_accepted(&b, opts);
|
2017-12-19 00:18:36 +03:00
|
|
|
}
|
2018-01-24 21:20:34 +03:00
|
|
|
Ok((None, Some(b.clone())))
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-12-16 06:19:04 +03:00
|
|
|
Err(Error::Orphan) => {
|
|
|
|
let block_hash = b.hash();
|
2017-12-29 03:49:27 +03:00
|
|
|
let orphan = Orphan {
|
|
|
|
block: b.clone(),
|
|
|
|
opts: opts,
|
|
|
|
added: Instant::now(),
|
|
|
|
};
|
|
|
|
|
|
|
|
// In the case of a fork - it is possible to have multiple blocks
|
|
|
|
// that are children of a given block.
|
|
|
|
// We do not handle this currently for orphans (future enhancement?).
|
|
|
|
// We just assume "last one wins" for now.
|
|
|
|
&self.orphans.add(orphan);
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"process_block: orphan: {:?}, # orphans {}",
|
|
|
|
block_hash,
|
|
|
|
self.orphans.len(),
|
|
|
|
);
|
2018-01-24 21:20:34 +03:00
|
|
|
Err(Error::Orphan)
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-11-17 02:17:56 +03:00
|
|
|
Err(Error::Unfit(ref msg)) => {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Block {} at {} is unfit at this time: {}",
|
|
|
|
b.hash(),
|
|
|
|
b.header.height,
|
|
|
|
msg
|
|
|
|
);
|
2018-01-24 21:20:34 +03:00
|
|
|
Err(Error::Unfit(msg.clone()))
|
2017-11-17 02:17:56 +03:00
|
|
|
}
|
2018-01-24 21:20:34 +03:00
|
|
|
Err(e) => {
|
2017-09-29 21:44:25 +03:00
|
|
|
info!(
|
2017-10-12 19:56:44 +03:00
|
|
|
LOGGER,
|
2017-10-15 23:38:41 +03:00
|
|
|
"Rejected block {} at {}: {:?}",
|
2017-09-29 21:44:25 +03:00
|
|
|
b.hash(),
|
|
|
|
b.header.height,
|
|
|
|
e
|
|
|
|
);
|
2018-01-24 21:20:34 +03:00
|
|
|
Err(e)
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
2017-07-28 00:13:34 +03:00
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
/// Process a block header received during "header first" propagation.
|
|
|
|
pub fn process_block_header(
|
|
|
|
&self,
|
|
|
|
bh: &BlockHeader,
|
|
|
|
opts: Options,
|
|
|
|
) -> Result<Option<Tip>, Error> {
|
|
|
|
let header_head = self.get_header_head()?;
|
|
|
|
let ctx = self.ctx_from_head(header_head, opts);
|
|
|
|
pipe::process_block_header(bh, ctx)
|
|
|
|
}
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
/// Attempt to add a new header to the header chain.
|
|
|
|
/// This is only ever used during sync and uses sync_head.
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn sync_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<Option<Tip>, Error> {
|
2017-12-04 22:16:57 +03:00
|
|
|
let sync_head = self.get_sync_head()?;
|
|
|
|
let header_head = self.get_header_head()?;
|
|
|
|
let sync_ctx = self.ctx_from_head(sync_head, opts);
|
|
|
|
let header_ctx = self.ctx_from_head(header_head, opts);
|
|
|
|
pipe::sync_block_header(bh, sync_ctx, header_ctx)
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fn ctx_from_head(&self, head: Tip, opts: Options) -> pipe::BlockContext {
|
|
|
|
pipe::BlockContext {
|
2017-11-16 00:49:15 +03:00
|
|
|
opts: opts,
|
2017-07-04 02:46:25 +03:00
|
|
|
store: self.store.clone(),
|
|
|
|
head: head,
|
2017-08-22 21:23:54 +03:00
|
|
|
pow_verifier: self.pow_verifier,
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset: self.txhashset.clone(),
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-16 06:19:04 +03:00
|
|
|
/// Check if hash is for a known orphan.
|
2017-12-14 20:23:35 +03:00
|
|
|
pub fn is_orphan(&self, hash: &Hash) -> bool {
|
2017-12-22 01:29:24 +03:00
|
|
|
self.orphans.contains(hash)
|
2017-12-14 20:23:35 +03:00
|
|
|
}
|
|
|
|
|
2018-01-24 21:20:34 +03:00
|
|
|
/// Check for orphans, once a block is successfully added
|
2018-02-10 01:32:16 +03:00
|
|
|
pub fn check_orphans(&self, mut last_block_hash: Hash) {
|
2017-12-22 01:29:24 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"chain: check_orphans: # orphans {}",
|
|
|
|
self.orphans.len(),
|
|
|
|
);
|
|
|
|
// Is there an orphan in our orphans that we can now process?
|
|
|
|
// We just processed the given block, are there any orphans that have this block
|
|
|
|
// as their "previous" block?
|
2018-01-24 21:20:34 +03:00
|
|
|
loop {
|
|
|
|
if let Some(orphan) = self.orphans.get_by_previous(&last_block_hash) {
|
|
|
|
self.orphans.remove(&orphan.block.hash());
|
|
|
|
let res = self.process_block_no_orphans(orphan.block, orphan.opts);
|
|
|
|
match res {
|
|
|
|
Ok((_, b)) => {
|
|
|
|
// We accepted a block, so see if we can accept any orphans
|
|
|
|
if b.is_some() {
|
|
|
|
last_block_hash = b.unwrap().hash();
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-01-24 21:20:34 +03:00
|
|
|
Err(_) => {
|
|
|
|
break;
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-01-24 21:20:34 +03:00
|
|
|
};
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
2017-07-28 00:13:34 +03:00
|
|
|
}
|
|
|
|
}
|
2017-07-27 22:08:48 +03:00
|
|
|
|
2018-01-17 06:03:40 +03:00
|
|
|
/// For the given commitment find the unspent output and return the associated
|
|
|
|
/// Return an error if the output does not exist or has been spent.
|
|
|
|
/// This querying is done in a way that is consistent with the current chain state,
|
|
|
|
/// specifically the current winning (valid, most work) fork.
|
2018-03-02 23:47:27 +03:00
|
|
|
pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.is_unspent(output_ref)
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-02-17 20:56:22 +03:00
|
|
|
/// Validate the current chain state.
|
2018-02-10 01:32:16 +03:00
|
|
|
pub fn validate(&self) -> Result<(), Error> {
|
|
|
|
let header = self.store.head_header()?;
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset::extending(&mut txhashset, |extension| extension.validate(&header))
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-01-17 06:03:40 +03:00
|
|
|
/// Check if the input has matured sufficiently for the given block height.
|
|
|
|
/// This only applies to inputs spending coinbase outputs.
|
|
|
|
/// An input spending a non-coinbase output will always pass this check.
|
|
|
|
pub fn is_matured(&self, input: &Input, height: u64) -> Result<(), Error> {
|
2018-03-02 23:47:27 +03:00
|
|
|
if input.features.contains(OutputFeatures::COINBASE_OUTPUT) {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
2018-03-02 23:47:27 +03:00
|
|
|
let output = OutputIdentifier::from_input(&input);
|
2018-03-05 22:33:44 +03:00
|
|
|
let hash = txhashset.is_unspent(&output)?;
|
2018-03-02 23:47:27 +03:00
|
|
|
let header = self.get_block_header(&input.block_hash())?;
|
|
|
|
input.verify_maturity(hash, &header, height)?;
|
|
|
|
}
|
|
|
|
Ok(())
|
2017-11-20 13:38:49 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Sets the txhashset roots on a brand new block by applying the block on the
|
|
|
|
/// current txhashset state.
|
|
|
|
pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> {
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
2018-01-08 04:23:23 +03:00
|
|
|
let store = self.store.clone();
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
let roots = txhashset::extending(&mut txhashset, |extension| {
|
|
|
|
// apply the block on the txhashset and check the resulting root
|
2018-01-08 04:23:23 +03:00
|
|
|
if is_fork {
|
|
|
|
pipe::rewind_and_apply_fork(b, store, extension)?;
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
extension.apply_block(b)?;
|
|
|
|
extension.force_rollback();
|
|
|
|
Ok(extension.roots())
|
|
|
|
})?;
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
b.header.output_root = roots.output_root;
|
2018-02-22 16:45:13 +03:00
|
|
|
b.header.range_proof_root = roots.rproof_root;
|
|
|
|
b.header.kernel_root = roots.kernel_root;
|
2017-09-28 02:46:32 +03:00
|
|
|
Ok(())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
/// Return a pre-built Merkle proof for the given commitment from the store.
|
|
|
|
pub fn get_merkle_proof(
|
|
|
|
&self,
|
|
|
|
output: &OutputIdentifier,
|
2018-03-09 00:36:51 +03:00
|
|
|
block_header: &BlockHeader,
|
2018-03-02 23:47:27 +03:00
|
|
|
) -> Result<MerkleProof, Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
2018-03-02 23:47:27 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
let merkle_proof = txhashset::extending(&mut txhashset, |extension| {
|
2018-03-02 23:47:27 +03:00
|
|
|
extension.force_rollback();
|
2018-03-09 00:36:51 +03:00
|
|
|
extension.merkle_proof_via_rewind(output, block_header)
|
2018-03-02 23:47:27 +03:00
|
|
|
})?;
|
|
|
|
|
|
|
|
Ok(merkle_proof)
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Returns current txhashset roots
|
|
|
|
pub fn get_txhashset_roots(&self) -> (Hash, Hash, Hash) {
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.roots()
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Provides a reading view into the current txhashset state as well as
|
2018-02-10 01:32:16 +03:00
|
|
|
/// the required indexes for a consumer to rewind to a consistent state
|
|
|
|
/// at the provided block hash.
|
2018-03-05 22:33:44 +03:00
|
|
|
pub fn txhashset_read(&self, h: Hash) -> Result<(u64, u64, File), Error> {
|
2018-02-10 01:32:16 +03:00
|
|
|
// get the indexes for the block
|
|
|
|
let out_index: u64;
|
|
|
|
let kernel_index: u64;
|
|
|
|
{
|
2018-03-05 22:33:44 +03:00
|
|
|
let txhashset = self.txhashset.read().unwrap();
|
2018-03-09 00:36:51 +03:00
|
|
|
let (oi, ki) = txhashset.indexes_at(&h)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
out_index = oi;
|
|
|
|
kernel_index = ki;
|
|
|
|
}
|
|
|
|
|
|
|
|
// prepares the zip and return the corresponding Read
|
2018-03-05 22:33:44 +03:00
|
|
|
let txhashset_reader = txhashset::zip_read(self.db_root.clone())?;
|
|
|
|
Ok((out_index, kernel_index, txhashset_reader))
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Writes a reading view on a txhashset state that's been provided to us.
|
2018-02-10 01:32:16 +03:00
|
|
|
/// If we're willing to accept that new state, the data stream will be
|
|
|
|
/// read as a zip file, unzipped and the resulting state files should be
|
|
|
|
/// rewound to the provided indexes.
|
2018-03-05 22:33:44 +03:00
|
|
|
pub fn txhashset_write(
|
2018-02-10 01:32:16 +03:00
|
|
|
&self,
|
|
|
|
h: Hash,
|
|
|
|
rewind_to_output: u64,
|
|
|
|
rewind_to_kernel: u64,
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset_data: File,
|
2018-02-10 01:32:16 +03:00
|
|
|
) -> Result<(), Error> {
|
|
|
|
let head = self.head().unwrap();
|
|
|
|
let header_head = self.get_header_head().unwrap();
|
|
|
|
if header_head.height - head.height < global::cut_through_horizon() as u64 {
|
2018-03-05 22:33:44 +03:00
|
|
|
return Err(Error::InvalidTxHashSet("not needed".to_owned()));
|
2018-02-16 18:42:27 +03:00
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
|
|
|
let header = self.store.get_block_header(&h)?;
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset::zip_write(self.db_root.clone(), txhashset_data)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
let mut txhashset =
|
|
|
|
txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone(), None)?;
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset::extending(&mut txhashset, |extension| {
|
2018-02-10 01:32:16 +03:00
|
|
|
extension.rewind_pos(header.height, rewind_to_output, rewind_to_kernel)?;
|
|
|
|
extension.validate(&header)?;
|
2018-03-05 22:33:44 +03:00
|
|
|
// TODO validate kernels and their sums with Outputs
|
2018-02-10 01:32:16 +03:00
|
|
|
extension.rebuild_index()?;
|
|
|
|
Ok(())
|
|
|
|
})?;
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
// replace the chain txhashset with the newly built one
|
2018-02-10 01:32:16 +03:00
|
|
|
{
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset_ref = self.txhashset.write().unwrap();
|
|
|
|
*txhashset_ref = txhashset;
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// setup new head
|
|
|
|
{
|
|
|
|
let mut head = self.head.lock().unwrap();
|
|
|
|
*head = Tip::from_block(&header);
|
2018-02-22 16:45:13 +03:00
|
|
|
let _ = self.store.save_body_head(&head);
|
2018-02-10 01:32:16 +03:00
|
|
|
self.store.save_header_height(&header)?;
|
|
|
|
}
|
|
|
|
|
|
|
|
self.check_orphans(header.hash());
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-06 20:58:33 +03:00
|
|
|
/// Triggers chain compaction, cleaning up some unecessary historical
|
|
|
|
/// information. We introduce a chain depth called horizon, which is
|
|
|
|
/// typically in the range of a couple days. Before that horizon, this
|
|
|
|
/// method will:
|
|
|
|
///
|
|
|
|
/// * compact the MMRs data files and flushing the corresponding remove logs
|
|
|
|
/// * delete old records from the k/v store (older blocks, indexes, etc.)
|
|
|
|
///
|
|
|
|
/// This operation can be resource intensive and takes some time to execute.
|
|
|
|
/// Meanwhile, the chain will not be able to accept new blocks. It should
|
|
|
|
/// therefore be called judiciously.
|
|
|
|
pub fn compact(&self) -> Result<(), Error> {
|
|
|
|
let mut sumtrees = self.txhashset.write().unwrap();
|
|
|
|
sumtrees.compact()?;
|
|
|
|
|
|
|
|
let horizon = global::cut_through_horizon() as u64;
|
|
|
|
let head = self.head()?;
|
|
|
|
let mut current = self.store.get_header_by_height(head.height - horizon - 1)?;
|
|
|
|
loop {
|
|
|
|
match self.store.get_block(¤t.hash()) {
|
|
|
|
Ok(b) => {
|
|
|
|
self.store.delete_block(&b.hash())?;
|
|
|
|
self.store.delete_block_pmmr_file_metadata(&b.hash())?;
|
2018-03-09 00:36:51 +03:00
|
|
|
self.store.delete_block_marker(&b.hash())?;
|
2018-03-06 20:58:33 +03:00
|
|
|
}
|
|
|
|
Err(NotFoundErr) => {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
Err(e) => return Err(Error::StoreErr(e, "retrieving block to compact".to_owned())),
|
|
|
|
}
|
|
|
|
if current.height <= 1 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
match self.store.get_block_header(¤t.previous) {
|
|
|
|
Ok(h) => current = h,
|
|
|
|
Err(NotFoundErr) => break,
|
|
|
|
Err(e) => return Err(From::from(e)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// returns the last n nodes inserted into the output sum tree
|
|
|
|
pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, Option<OutputStoreable>)> {
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.last_n_output(distance)
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for rangeproofs
|
2018-02-22 16:45:13 +03:00
|
|
|
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, Option<RangeProof>)> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.last_n_rangeproof(distance)
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for kernels
|
2018-02-22 16:45:13 +03:00
|
|
|
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, Option<TxKernel>)> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.last_n_kernel(distance)
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Total difficulty at the head of the chain
|
|
|
|
pub fn total_difficulty(&self) -> Difficulty {
|
|
|
|
self.head.lock().unwrap().clone().total_difficulty
|
|
|
|
}
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
/// Total difficulty at the head of the header chain
|
|
|
|
pub fn total_header_difficulty(&self) -> Result<Difficulty, Error> {
|
|
|
|
Ok(self.store.get_header_head()?.total_difficulty)
|
|
|
|
}
|
|
|
|
|
2017-12-29 03:49:27 +03:00
|
|
|
/// Reset header_head and sync_head to head of current body chain
|
|
|
|
pub fn reset_head(&self) -> Result<(), Error> {
|
|
|
|
self.store
|
|
|
|
.reset_head()
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain reset_head".to_owned()))
|
|
|
|
}
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Get the tip that's also the head of the chain
|
|
|
|
pub fn head(&self) -> Result<Tip, Error> {
|
|
|
|
Ok(self.head.lock().unwrap().clone())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Block header for the chain head
|
|
|
|
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.store
|
|
|
|
.head_header()
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain head header".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a block header by hash
|
|
|
|
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.store
|
|
|
|
.get_block(h)
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain get block".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a block header by hash
|
|
|
|
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.store
|
|
|
|
.get_block_header(h)
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the block header at the provided height
|
|
|
|
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.store
|
|
|
|
.get_header_by_height(height)
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain get header by height".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2017-12-05 21:32:57 +03:00
|
|
|
/// Verifies the given block header is actually on the current chain.
|
|
|
|
/// Checks the header_by_height index to verify the header is where we say it is
|
|
|
|
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.store
|
|
|
|
.is_on_current_chain(header)
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain is_on_current_chain".to_owned()))
|
2017-12-05 21:32:57 +03:00
|
|
|
}
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
/// Get the tip of the current "sync" header chain.
|
|
|
|
/// This may be significantly different to current header chain.
|
|
|
|
pub fn get_sync_head(&self) -> Result<Tip, Error> {
|
|
|
|
self.store
|
|
|
|
.get_sync_head()
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain get sync head".to_owned()))
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the tip of the header chain.
|
2017-07-04 02:46:25 +03:00
|
|
|
pub fn get_header_head(&self) -> Result<Tip, Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.store
|
|
|
|
.get_header_head()
|
|
|
|
.map_err(|e| Error::StoreErr(e, "chain get header head".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Builds an iterator on blocks starting from the current chain head and
|
|
|
|
/// running backward. Specialized to return information pertaining to block
|
|
|
|
/// difficulty calculation (timestamp and previous difficulties).
|
|
|
|
pub fn difficulty_iter(&self) -> store::DifficultyIter {
|
|
|
|
let head = self.head.lock().unwrap();
|
|
|
|
store::DifficultyIter::from(head.last_block_h, self.store.clone())
|
|
|
|
}
|
2018-02-24 05:48:02 +03:00
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
/// Check whether we have a block without reading it
|
|
|
|
pub fn block_exists(&self, h: Hash) -> Result<bool, Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.store
|
|
|
|
.block_exists(&h)
|
2018-03-02 23:47:27 +03:00
|
|
|
.map_err(|e| Error::StoreErr(e, "chain block exists".to_owned()))
|
|
|
|
}
|
2018-03-03 12:08:36 +03:00
|
|
|
|
|
|
|
/// Retrieve the file index metadata for a given block
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn get_block_pmmr_file_metadata(
|
|
|
|
&self,
|
|
|
|
h: &Hash,
|
|
|
|
) -> Result<PMMRFileMetadataCollection, Error> {
|
|
|
|
self.store
|
|
|
|
.get_block_pmmr_file_metadata(h)
|
2018-03-03 12:08:36 +03:00
|
|
|
.map_err(|e| Error::StoreErr(e, "retrieve block pmmr metadata".to_owned()))
|
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|