2017-07-04 02:46:25 +03:00
|
|
|
// Copyright 2016 The Grin Developers
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
//! Facade and handler for the rest of the blockchain implementation
|
|
|
|
//! and mostly the chain pipeline.
|
|
|
|
|
2017-07-27 22:08:48 +03:00
|
|
|
use std::collections::VecDeque;
|
2017-09-28 02:46:32 +03:00
|
|
|
use std::sync::{Arc, Mutex, RwLock};
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2017-11-01 02:20:55 +03:00
|
|
|
use util::secp::pedersen::{Commitment, RangeProof};
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2017-10-28 00:57:04 +03:00
|
|
|
use core::core::{SumCommit};
|
|
|
|
use core::core::pmmr::{NoSum, HashSum};
|
|
|
|
|
|
|
|
use core::core::{Block, BlockHeader, Output, TxKernel};
|
2017-07-04 02:46:25 +03:00
|
|
|
use core::core::target::Difficulty;
|
|
|
|
use core::core::hash::Hash;
|
2017-08-29 19:32:45 +03:00
|
|
|
use grin_store::Error::NotFoundErr;
|
2017-07-04 02:46:25 +03:00
|
|
|
use pipe;
|
|
|
|
use store;
|
2017-09-28 02:46:32 +03:00
|
|
|
use sumtree;
|
2017-07-04 02:46:25 +03:00
|
|
|
use types::*;
|
2017-10-12 19:56:44 +03:00
|
|
|
use util::LOGGER;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
|
2017-08-09 19:40:23 +03:00
|
|
|
|
2017-07-27 22:08:48 +03:00
|
|
|
const MAX_ORPHANS: usize = 20;
|
2017-07-18 23:57:09 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Facade to the blockchain block processing pipeline and storage. Provides
|
|
|
|
/// the current view of the UTXO set according to the chain state. Also
|
|
|
|
/// maintains locking for the pipeline to avoid conflicting processing.
|
|
|
|
pub struct Chain {
|
|
|
|
store: Arc<ChainStore>,
|
|
|
|
adapter: Arc<ChainAdapter>,
|
2017-07-27 22:08:48 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
head: Arc<Mutex<Tip>>,
|
2017-07-28 05:21:00 +03:00
|
|
|
orphans: Arc<Mutex<VecDeque<(Options, Block)>>>,
|
2017-09-28 02:46:32 +03:00
|
|
|
sumtrees: Arc<RwLock<sumtree::SumTrees>>,
|
2017-08-22 21:23:54 +03:00
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
// POW verification function
|
2017-08-22 21:23:54 +03:00
|
|
|
pow_verifier: fn(&BlockHeader, u32) -> bool,
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl Sync for Chain {}
|
|
|
|
unsafe impl Send for Chain {}
|
|
|
|
|
|
|
|
impl Chain {
|
2017-08-22 21:23:54 +03:00
|
|
|
/// Check whether the chain exists. If not, the call to 'init' will
|
|
|
|
/// expect an already mined genesis block. This keeps the chain free
|
|
|
|
/// from needing to know about the mining implementation
|
2017-09-29 21:44:25 +03:00
|
|
|
pub fn chain_exists(db_root: String) -> bool {
|
2017-08-22 21:23:54 +03:00
|
|
|
let chain_store = store::ChainKVStore::new(db_root).unwrap();
|
|
|
|
match chain_store.head() {
|
2017-09-29 21:44:25 +03:00
|
|
|
Ok(_) => true,
|
2017-08-29 19:32:45 +03:00
|
|
|
Err(NotFoundErr) => false,
|
2017-08-22 21:23:54 +03:00
|
|
|
Err(_) => false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Initializes the blockchain and returns a new Chain instance. Does a
|
|
|
|
/// check
|
|
|
|
/// on the current chain head to make sure it exists and creates one based
|
|
|
|
/// on
|
|
|
|
/// the genesis block if necessary.
|
2017-10-13 07:45:07 +03:00
|
|
|
pub fn init(
|
|
|
|
db_root: String,
|
|
|
|
adapter: Arc<ChainAdapter>,
|
|
|
|
gen_block: Option<Block>,
|
|
|
|
pow_verifier: fn(&BlockHeader, u32) -> bool,
|
|
|
|
) -> Result<Chain, Error> {
|
2017-09-28 02:46:32 +03:00
|
|
|
let chain_store = store::ChainKVStore::new(db_root.clone())?;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
|
|
// check if we have a head in store, otherwise the genesis block is it
|
|
|
|
let head = match chain_store.head() {
|
|
|
|
Ok(tip) => tip,
|
2017-08-29 19:32:45 +03:00
|
|
|
Err(NotFoundErr) => {
|
2017-08-22 21:23:54 +03:00
|
|
|
if let None = gen_block {
|
|
|
|
return Err(Error::GenesisBlockRequired);
|
|
|
|
}
|
|
|
|
|
|
|
|
let gen = gen_block.unwrap();
|
2017-07-04 02:46:25 +03:00
|
|
|
chain_store.save_block(&gen)?;
|
2017-09-28 02:46:32 +03:00
|
|
|
chain_store.setup_height(&gen.header)?;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
|
|
// saving a new tip based on genesis
|
|
|
|
let tip = Tip::new(gen.hash());
|
|
|
|
chain_store.save_head(&tip)?;
|
2017-10-12 19:56:44 +03:00
|
|
|
info!(LOGGER, "Saved genesis block with hash {}", gen.hash());
|
2017-07-04 02:46:25 +03:00
|
|
|
tip
|
|
|
|
}
|
2017-10-22 10:11:45 +03:00
|
|
|
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
|
2017-07-04 02:46:25 +03:00
|
|
|
};
|
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
let store = Arc::new(chain_store);
|
|
|
|
let sumtrees = sumtree::SumTrees::open(db_root, store.clone())?;
|
2017-08-29 19:32:45 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
Ok(Chain {
|
2017-09-28 02:46:32 +03:00
|
|
|
store: store,
|
2017-07-04 02:46:25 +03:00
|
|
|
adapter: adapter,
|
|
|
|
head: Arc::new(Mutex::new(head)),
|
2017-07-28 00:13:34 +03:00
|
|
|
orphans: Arc::new(Mutex::new(VecDeque::with_capacity(MAX_ORPHANS + 1))),
|
2017-09-28 02:46:32 +03:00
|
|
|
sumtrees: Arc::new(RwLock::new(sumtrees)),
|
2017-08-22 21:23:54 +03:00
|
|
|
pow_verifier: pow_verifier,
|
2017-07-04 02:46:25 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempt to add a new block to the chain. Returns the new chain tip if it
|
|
|
|
/// has been added to the longest chain, None if it's added to an (as of
|
2017-07-27 22:08:48 +03:00
|
|
|
/// now) orphan chain.
|
|
|
|
pub fn process_block(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
|
2017-10-22 10:11:45 +03:00
|
|
|
let head = self.store.head().map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?;
|
|
|
|
let height = head.height;
|
2017-07-04 02:46:25 +03:00
|
|
|
let ctx = self.ctx_from_head(head, opts);
|
|
|
|
|
2017-07-27 22:08:48 +03:00
|
|
|
let res = pipe::process_block(&b, ctx);
|
|
|
|
|
2017-07-28 00:13:34 +03:00
|
|
|
match res {
|
|
|
|
Ok(Some(ref tip)) => {
|
|
|
|
// block got accepted and extended the head, updating our head
|
|
|
|
let chain_head = self.head.clone();
|
2017-07-28 02:47:33 +03:00
|
|
|
{
|
|
|
|
let mut head = chain_head.lock().unwrap();
|
|
|
|
*head = tip.clone();
|
|
|
|
}
|
2017-10-15 23:38:41 +03:00
|
|
|
|
|
|
|
// notifying other parts of the system of the update
|
|
|
|
if !opts.intersects(SYNC) {
|
|
|
|
// broadcast the block
|
|
|
|
let adapter = self.adapter.clone();
|
|
|
|
adapter.block_accepted(&b);
|
|
|
|
}
|
2017-07-28 00:13:34 +03:00
|
|
|
self.check_orphans();
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
Ok(None) => {}
|
2017-07-28 00:13:34 +03:00
|
|
|
Err(Error::Orphan) => {
|
2017-10-22 10:11:45 +03:00
|
|
|
if b.header.height < height + (MAX_ORPHANS as u64) {
|
|
|
|
let mut orphans = self.orphans.lock().unwrap();
|
|
|
|
orphans.push_front((opts, b));
|
|
|
|
orphans.truncate(MAX_ORPHANS);
|
|
|
|
}
|
2017-07-28 00:13:34 +03:00
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
Err(ref e) => {
|
2017-09-29 21:44:25 +03:00
|
|
|
info!(
|
2017-10-12 19:56:44 +03:00
|
|
|
LOGGER,
|
2017-10-15 23:38:41 +03:00
|
|
|
"Rejected block {} at {}: {:?}",
|
2017-09-29 21:44:25 +03:00
|
|
|
b.hash(),
|
|
|
|
b.header.height,
|
|
|
|
e
|
|
|
|
);
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
2017-07-28 00:13:34 +03:00
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
|
|
res
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Attempt to add a new header to the header chain. Only necessary during
|
|
|
|
/// sync.
|
2017-10-13 07:45:07 +03:00
|
|
|
pub fn process_block_header(
|
|
|
|
&self,
|
|
|
|
bh: &BlockHeader,
|
|
|
|
opts: Options,
|
|
|
|
) -> Result<Option<Tip>, Error> {
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2017-10-22 10:11:45 +03:00
|
|
|
let head = self.store.get_header_head().map_err(|e| Error::StoreErr(e, "chain header head".to_owned()))?;
|
2017-07-04 02:46:25 +03:00
|
|
|
let ctx = self.ctx_from_head(head, opts);
|
|
|
|
|
|
|
|
pipe::process_block_header(bh, ctx)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn ctx_from_head(&self, head: Tip, opts: Options) -> pipe::BlockContext {
|
2017-08-09 19:40:23 +03:00
|
|
|
let opts_in = opts;
|
2017-09-29 21:44:25 +03:00
|
|
|
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
2017-08-09 19:40:23 +03:00
|
|
|
let opts_in = match *param_ref {
|
|
|
|
MiningParameterMode::AutomatedTesting => opts_in | EASY_POW,
|
|
|
|
MiningParameterMode::UserTesting => opts_in | EASY_POW,
|
|
|
|
MiningParameterMode::Production => opts_in,
|
|
|
|
};
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
pipe::BlockContext {
|
|
|
|
opts: opts_in,
|
|
|
|
store: self.store.clone(),
|
|
|
|
head: head,
|
2017-08-22 21:23:54 +03:00
|
|
|
pow_verifier: self.pow_verifier,
|
2017-09-28 02:46:32 +03:00
|
|
|
sumtrees: self.sumtrees.clone(),
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
/// Pop orphans out of the queue and check if we can now accept them.
|
2017-07-28 00:13:34 +03:00
|
|
|
fn check_orphans(&self) {
|
|
|
|
// first check how many we have to retry, unfort. we can't extend the lock
|
|
|
|
// in the loop as it needs to be freed before going in process_block
|
2017-08-10 03:54:10 +03:00
|
|
|
let orphan_count;
|
2017-07-28 00:13:34 +03:00
|
|
|
{
|
|
|
|
let orphans = self.orphans.lock().unwrap();
|
|
|
|
orphan_count = orphans.len();
|
|
|
|
}
|
2017-07-27 22:08:48 +03:00
|
|
|
|
2017-07-28 00:13:34 +03:00
|
|
|
// pop each orphan and retry, if still orphaned, will be pushed again
|
|
|
|
for _ in 0..orphan_count {
|
2017-08-10 03:54:10 +03:00
|
|
|
let popped;
|
2017-07-28 00:13:34 +03:00
|
|
|
{
|
|
|
|
let mut orphans = self.orphans.lock().unwrap();
|
|
|
|
popped = orphans.pop_back();
|
|
|
|
}
|
2017-07-28 05:21:00 +03:00
|
|
|
if let Some((opts, orphan)) = popped {
|
2017-08-10 03:54:10 +03:00
|
|
|
let _process_result = self.process_block(orphan, opts);
|
2017-07-28 00:13:34 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-07-27 22:08:48 +03:00
|
|
|
|
2017-09-28 02:46:32 +03:00
|
|
|
/// Gets an unspent output from its commitment. With return None if the
|
|
|
|
/// output doesn't exist or has been spent. This querying is done in a
|
|
|
|
/// way that's consistent with the current chain state and more
|
|
|
|
/// specifically the current winning fork.
|
2017-09-12 20:24:24 +03:00
|
|
|
pub fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, Error> {
|
2017-09-28 02:46:32 +03:00
|
|
|
let sumtrees = self.sumtrees.read().unwrap();
|
|
|
|
let is_unspent = sumtrees.is_unspent(output_ref)?;
|
|
|
|
if is_unspent {
|
2017-10-22 10:11:45 +03:00
|
|
|
self.store.get_output_by_commit(output_ref).map_err(|e|
|
|
|
|
Error::StoreErr(e, "chain get unspent".to_owned())
|
2017-10-13 07:45:07 +03:00
|
|
|
)
|
2017-09-28 02:46:32 +03:00
|
|
|
} else {
|
|
|
|
Err(Error::OutputNotFound)
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Sets the sumtree roots on a brand new block by applying the block on the
|
|
|
|
/// current sumtree state.
|
|
|
|
pub fn set_sumtree_roots(&self, b: &mut Block) -> Result<(), Error> {
|
|
|
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
2017-09-29 21:44:25 +03:00
|
|
|
|
2017-10-13 07:45:07 +03:00
|
|
|
let roots = sumtree::extending(&mut sumtrees, |extension| {
|
2017-09-28 02:46:32 +03:00
|
|
|
// apply the block on the sumtrees and check the resulting root
|
|
|
|
extension.apply_block(b)?;
|
|
|
|
extension.force_rollback();
|
|
|
|
Ok(extension.roots())
|
|
|
|
})?;
|
|
|
|
|
|
|
|
b.header.utxo_root = roots.0.hash;
|
|
|
|
b.header.range_proof_root = roots.1.hash;
|
|
|
|
b.header.kernel_root = roots.2.hash;
|
|
|
|
Ok(())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2017-10-28 00:57:04 +03:00
|
|
|
/// returs sumtree roots
|
|
|
|
pub fn get_sumtree_roots(&self) -> (HashSum<SumCommit>,
|
|
|
|
HashSum<NoSum<RangeProof>>,
|
|
|
|
HashSum<NoSum<TxKernel>>) {
|
|
|
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
|
|
|
sumtrees.roots()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// returns the last n nodes inserted into the utxo sum tree
|
|
|
|
/// returns sum tree hash plus output itself (as the sum is contained
|
|
|
|
/// in the output anyhow)
|
|
|
|
pub fn get_last_n_utxo(&self, distance: u64) -> Vec<(Hash, Output)>{
|
|
|
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
|
|
|
let mut return_vec = Vec::new();
|
|
|
|
let sum_nodes=sumtrees.last_n_utxo(distance);
|
|
|
|
for sum_commit in sum_nodes {
|
|
|
|
let output = self.store.get_output_by_commit(&sum_commit.sum.commit);
|
|
|
|
return_vec.push((sum_commit.hash, output.unwrap()));
|
|
|
|
}
|
|
|
|
return_vec
|
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for rangeproofs
|
|
|
|
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<HashSum<NoSum<RangeProof>>>{
|
|
|
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
|
|
|
sumtrees.last_n_rangeproof(distance)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for kernels
|
|
|
|
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<HashSum<NoSum<TxKernel>>>{
|
|
|
|
let mut sumtrees = self.sumtrees.write().unwrap();
|
|
|
|
sumtrees.last_n_kernel(distance)
|
|
|
|
}
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Total difficulty at the head of the chain
|
|
|
|
pub fn total_difficulty(&self) -> Difficulty {
|
|
|
|
self.head.lock().unwrap().clone().total_difficulty
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the tip that's also the head of the chain
|
|
|
|
pub fn head(&self) -> Result<Tip, Error> {
|
|
|
|
Ok(self.head.lock().unwrap().clone())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Block header for the chain head
|
|
|
|
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
2017-10-22 10:11:45 +03:00
|
|
|
self.store.head_header().map_err(|e| Error::StoreErr(e, "chain head header".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a block header by hash
|
|
|
|
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
2017-10-22 10:11:45 +03:00
|
|
|
self.store.get_block(h).map_err(|e| Error::StoreErr(e, "chain get block".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a block header by hash
|
|
|
|
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
2017-10-22 10:11:45 +03:00
|
|
|
self.store.get_block_header(h).map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets the block header at the provided height
|
|
|
|
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
2017-10-22 10:11:45 +03:00
|
|
|
self.store.get_header_by_height(height).map_err(|e|
|
|
|
|
Error::StoreErr(e, "chain get header by height".to_owned()),
|
2017-10-13 07:45:07 +03:00
|
|
|
)
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2017-09-12 20:24:24 +03:00
|
|
|
/// Gets the block header by the provided output commitment
|
2017-10-13 07:45:07 +03:00
|
|
|
pub fn get_block_header_by_output_commit(
|
|
|
|
&self,
|
|
|
|
commit: &Commitment,
|
|
|
|
) -> Result<BlockHeader, Error> {
|
2017-09-29 21:44:25 +03:00
|
|
|
self.store
|
|
|
|
.get_block_header_by_output_commit(commit)
|
2017-10-22 10:11:45 +03:00
|
|
|
.map_err(|e| Error::StoreErr(e, "chain get commitment".to_owned()))
|
2017-09-12 20:24:24 +03:00
|
|
|
}
|
2017-08-29 19:32:45 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Get the tip of the header chain
|
|
|
|
pub fn get_header_head(&self) -> Result<Tip, Error> {
|
2017-10-22 10:11:45 +03:00
|
|
|
self.store.get_header_head().map_err(|e |Error::StoreErr(e, "chain get header head".to_owned()))
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Builds an iterator on blocks starting from the current chain head and
|
|
|
|
/// running backward. Specialized to return information pertaining to block
|
|
|
|
/// difficulty calculation (timestamp and previous difficulties).
|
|
|
|
pub fn difficulty_iter(&self) -> store::DifficultyIter {
|
|
|
|
let head = self.head.lock().unwrap();
|
|
|
|
store::DifficultyIter::from(head.last_block_h, self.store.clone())
|
|
|
|
}
|
|
|
|
}
|