2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2017-07-04 02:46:25 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
//! Facade and handler for the rest of the blockchain implementation
|
|
|
|
//! and mostly the chain pipeline.
|
|
|
|
|
2018-09-28 19:27:31 +03:00
|
|
|
use std::collections::HashMap;
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
2018-09-10 22:32:20 +03:00
|
|
|
use std::sync::atomic::{AtomicUsize, Ordering};
|
2017-09-28 02:46:32 +03:00
|
|
|
use std::sync::{Arc, Mutex, RwLock};
|
2017-12-29 03:49:27 +03:00
|
|
|
use std::time::{Duration, Instant};
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
use lmdb;
|
2018-09-28 19:27:31 +03:00
|
|
|
use lru_cache::LruCache;
|
2018-06-22 11:08:06 +03:00
|
|
|
|
2018-02-22 16:45:13 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-06-20 22:18:52 +03:00
|
|
|
use core::core::merkle_proof::MerkleProof;
|
2018-08-30 17:44:34 +03:00
|
|
|
use core::core::verifier_cache::VerifierCache;
|
2018-09-20 11:19:32 +03:00
|
|
|
use core::core::{Block, BlockHeader, BlockSums, Output, OutputIdentifier, Transaction, TxKernel};
|
2018-03-02 23:47:27 +03:00
|
|
|
use core::global;
|
2018-09-28 13:53:14 +03:00
|
|
|
use core::pow::{self, Difficulty};
|
2018-07-01 01:36:38 +03:00
|
|
|
use error::{Error, ErrorKind};
|
2017-08-29 19:32:45 +03:00
|
|
|
use grin_store::Error::NotFoundErr;
|
2017-07-04 02:46:25 +03:00
|
|
|
use pipe;
|
|
|
|
use store;
|
2018-09-27 13:44:50 +03:00
|
|
|
use store::Batch;
|
2018-03-05 22:33:44 +03:00
|
|
|
use txhashset;
|
2018-07-02 02:08:39 +03:00
|
|
|
use types::{ChainAdapter, NoStatus, Options, Tip, TxHashsetWriteStatus};
|
2018-06-21 04:30:22 +03:00
|
|
|
use util::secp::pedersen::{Commitment, RangeProof};
|
2018-07-01 01:36:38 +03:00
|
|
|
use util::LOGGER;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2018-04-06 02:31:34 +03:00
|
|
|
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
|
2018-04-05 06:24:43 +03:00
|
|
|
pub const MAX_ORPHAN_SIZE: usize = 200;
|
2017-12-22 01:29:24 +03:00
|
|
|
|
2018-04-06 02:31:34 +03:00
|
|
|
/// When evicting, very old orphans are evicted first
|
|
|
|
const MAX_ORPHAN_AGE_SECS: u64 = 300;
|
|
|
|
|
2018-07-16 23:58:56 +03:00
|
|
|
/// Number of recent hashes we keep to de-duplicate block or header sends
|
2018-07-22 22:43:55 +03:00
|
|
|
const HASHES_CACHE_SIZE: usize = 200;
|
2018-07-16 23:58:56 +03:00
|
|
|
|
2017-12-22 01:29:24 +03:00
|
|
|
#[derive(Debug, Clone)]
|
|
|
|
struct Orphan {
|
|
|
|
block: Block,
|
|
|
|
opts: Options,
|
2017-12-29 03:49:27 +03:00
|
|
|
added: Instant,
|
2017-12-22 01:29:24 +03:00
|
|
|
}
|
|
|
|
|
2018-07-24 07:29:31 +03:00
|
|
|
pub struct OrphanBlockPool {
|
2017-12-22 01:29:24 +03:00
|
|
|
// blocks indexed by their hash
|
|
|
|
orphans: RwLock<HashMap<Hash, Orphan>>,
|
2018-04-05 06:24:43 +03:00
|
|
|
// additional index of height -> hash
|
2017-12-22 01:29:24 +03:00
|
|
|
// so we can efficiently identify a child block (ex-orphan) after processing a block
|
2018-04-05 06:24:43 +03:00
|
|
|
height_idx: RwLock<HashMap<u64, Vec<Hash>>>,
|
2018-09-10 22:32:20 +03:00
|
|
|
// accumulated number of evicted block because of MAX_ORPHAN_SIZE limitation
|
|
|
|
evicted: AtomicUsize,
|
2017-12-22 01:29:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl OrphanBlockPool {
|
|
|
|
fn new() -> OrphanBlockPool {
|
|
|
|
OrphanBlockPool {
|
|
|
|
orphans: RwLock::new(HashMap::new()),
|
2018-04-05 06:24:43 +03:00
|
|
|
height_idx: RwLock::new(HashMap::new()),
|
2018-09-10 22:32:20 +03:00
|
|
|
evicted: AtomicUsize::new(0),
|
2017-12-22 01:29:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn len(&self) -> usize {
|
|
|
|
let orphans = self.orphans.read().unwrap();
|
|
|
|
orphans.len()
|
|
|
|
}
|
|
|
|
|
2018-09-10 22:32:20 +03:00
|
|
|
fn len_evicted(&self) -> usize {
|
|
|
|
self.evicted.load(Ordering::Relaxed)
|
|
|
|
}
|
|
|
|
|
2017-12-22 01:29:24 +03:00
|
|
|
fn add(&self, orphan: Orphan) {
|
2018-04-05 06:24:43 +03:00
|
|
|
let mut orphans = self.orphans.write().unwrap();
|
|
|
|
let mut height_idx = self.height_idx.write().unwrap();
|
2017-12-22 01:29:24 +03:00
|
|
|
{
|
2018-04-05 06:24:43 +03:00
|
|
|
let height_hashes = height_idx
|
|
|
|
.entry(orphan.block.header.height)
|
|
|
|
.or_insert(vec![]);
|
|
|
|
height_hashes.push(orphan.block.hash());
|
|
|
|
orphans.insert(orphan.block.hash(), orphan);
|
2017-12-22 01:29:24 +03:00
|
|
|
}
|
|
|
|
|
2018-04-05 06:24:43 +03:00
|
|
|
if orphans.len() > MAX_ORPHAN_SIZE {
|
2018-09-10 22:32:20 +03:00
|
|
|
let old_len = orphans.len();
|
|
|
|
|
2018-04-05 06:24:43 +03:00
|
|
|
// evict too old
|
2018-03-04 03:19:54 +03:00
|
|
|
orphans.retain(|_, ref mut x| {
|
|
|
|
x.added.elapsed() < Duration::from_secs(MAX_ORPHAN_AGE_SECS)
|
|
|
|
});
|
2018-04-05 06:24:43 +03:00
|
|
|
// evict too far ahead
|
|
|
|
let mut heights = height_idx.keys().cloned().collect::<Vec<u64>>();
|
|
|
|
heights.sort_unstable();
|
|
|
|
for h in heights.iter().rev() {
|
2018-04-06 21:03:57 +03:00
|
|
|
if let Some(hs) = height_idx.remove(h) {
|
|
|
|
for h in hs {
|
|
|
|
let _ = orphans.remove(&h);
|
|
|
|
}
|
|
|
|
}
|
2018-04-05 06:24:43 +03:00
|
|
|
if orphans.len() < MAX_ORPHAN_SIZE {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// cleanup index
|
|
|
|
height_idx.retain(|_, ref mut xs| xs.iter().any(|x| orphans.contains_key(&x)));
|
2018-09-10 22:32:20 +03:00
|
|
|
|
|
|
|
self.evicted
|
|
|
|
.fetch_add(old_len - orphans.len(), Ordering::Relaxed);
|
2017-12-22 01:29:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-05 06:24:43 +03:00
|
|
|
/// Get an orphan from the pool indexed by the hash of its parent, removing
|
|
|
|
/// it at the same time, preventing clone
|
|
|
|
fn remove_by_height(&self, height: &u64) -> Option<Vec<Orphan>> {
|
2017-12-22 01:29:24 +03:00
|
|
|
let mut orphans = self.orphans.write().unwrap();
|
2018-04-05 06:24:43 +03:00
|
|
|
let mut height_idx = self.height_idx.write().unwrap();
|
|
|
|
height_idx
|
|
|
|
.remove(height)
|
|
|
|
.map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect())
|
2017-12-22 01:29:24 +03:00
|
|
|
}
|
|
|
|
|
2018-07-24 07:29:31 +03:00
|
|
|
pub fn contains(&self, hash: &Hash) -> bool {
|
2017-12-22 01:29:24 +03:00
|
|
|
let orphans = self.orphans.read().unwrap();
|
|
|
|
orphans.contains_key(hash)
|
|
|
|
}
|
|
|
|
}
|
2017-07-18 23:57:09 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Facade to the blockchain block processing pipeline and storage. Provides
|
2018-03-05 22:33:44 +03:00
|
|
|
/// the current view of the TxHashSet according to the chain state. Also
|
2017-07-04 02:46:25 +03:00
|
|
|
/// maintains locking for the pipeline to avoid conflicting processing.
|
|
|
|
pub struct Chain {
|
2018-02-10 01:32:16 +03:00
|
|
|
db_root: String,
|
2018-06-22 11:08:06 +03:00
|
|
|
store: Arc<store::ChainStore>,
|
2017-07-04 02:46:25 +03:00
|
|
|
adapter: Arc<ChainAdapter>,
|
2017-07-27 22:08:48 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
head: Arc<Mutex<Tip>>,
|
2017-12-22 01:29:24 +03:00
|
|
|
orphans: Arc<OrphanBlockPool>,
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset: Arc<RwLock<txhashset::TxHashSet>>,
|
2018-07-16 23:58:56 +03:00
|
|
|
// Recently processed blocks to avoid double-processing
|
2018-09-28 19:27:31 +03:00
|
|
|
block_hashes_cache: Arc<RwLock<LruCache<Hash, bool>>>,
|
2018-08-30 17:44:34 +03:00
|
|
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
2017-09-29 21:44:25 +03:00
|
|
|
// POW verification function
|
2018-09-28 13:53:14 +03:00
|
|
|
pow_verifier: fn(&BlockHeader, u8) -> Result<(), pow::Error>,
|
2018-09-10 17:59:42 +03:00
|
|
|
archive_mode: bool,
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl Sync for Chain {}
|
|
|
|
unsafe impl Send for Chain {}
|
|
|
|
|
|
|
|
impl Chain {
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Initializes the blockchain and returns a new Chain instance. Does a
|
|
|
|
/// check on the current chain head to make sure it exists and creates one
|
|
|
|
/// based on the genesis block if necessary.
|
2017-10-13 07:45:07 +03:00
|
|
|
pub fn init(
|
|
|
|
db_root: String,
|
2018-06-22 11:08:06 +03:00
|
|
|
db_env: Arc<lmdb::Environment>,
|
2017-10-13 07:45:07 +03:00
|
|
|
adapter: Arc<ChainAdapter>,
|
2017-11-20 20:35:52 +03:00
|
|
|
genesis: Block,
|
2018-09-28 13:53:14 +03:00
|
|
|
pow_verifier: fn(&BlockHeader, u8) -> Result<(), pow::Error>,
|
2018-08-30 17:44:34 +03:00
|
|
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
2018-09-10 17:59:42 +03:00
|
|
|
archive_mode: bool,
|
2017-10-13 07:45:07 +03:00
|
|
|
) -> Result<Chain, Error> {
|
2018-06-22 11:08:06 +03:00
|
|
|
let chain_store = store::ChainStore::new(db_env)?;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2018-03-01 06:08:35 +03:00
|
|
|
let store = Arc::new(chain_store);
|
|
|
|
|
2018-04-24 22:53:01 +03:00
|
|
|
// open the txhashset, creating a new one if necessary
|
2018-06-18 18:18:38 +03:00
|
|
|
let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None)?;
|
2018-04-24 22:53:01 +03:00
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
setup_head(genesis, store.clone(), &mut txhashset)?;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2018-04-24 22:53:01 +03:00
|
|
|
// Now reload the chain head (either existing head or genesis from above)
|
|
|
|
let head = store.head()?;
|
|
|
|
|
2018-03-25 19:41:12 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Chain init: {} @ {} [{}]",
|
2018-06-01 22:41:26 +03:00
|
|
|
head.total_difficulty.to_num(),
|
2018-03-25 19:41:12 +03:00
|
|
|
head.height,
|
2018-04-24 22:53:01 +03:00
|
|
|
head.last_block_h,
|
2018-03-25 19:41:12 +03:00
|
|
|
);
|
2017-08-29 19:32:45 +03:00
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
Ok(Chain {
|
2018-02-10 01:32:16 +03:00
|
|
|
db_root: db_root,
|
2017-09-28 02:46:32 +03:00
|
|
|
store: store,
|
2017-07-04 02:46:25 +03:00
|
|
|
adapter: adapter,
|
|
|
|
head: Arc::new(Mutex::new(head)),
|
2017-12-22 01:29:24 +03:00
|
|
|
orphans: Arc::new(OrphanBlockPool::new()),
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset: Arc::new(RwLock::new(txhashset)),
|
2018-08-30 17:44:34 +03:00
|
|
|
pow_verifier,
|
|
|
|
verifier_cache,
|
2018-09-28 19:27:31 +03:00
|
|
|
block_hashes_cache: Arc::new(RwLock::new(LruCache::new(HASHES_CACHE_SIZE))),
|
2018-09-10 17:59:42 +03:00
|
|
|
archive_mode,
|
2017-07-04 02:46:25 +03:00
|
|
|
})
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
|
|
|
/// Processes a single block, then checks for orphans, processing
|
|
|
|
/// those as well if they're found
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn process_block(
|
|
|
|
&self,
|
|
|
|
b: Block,
|
|
|
|
opts: Options,
|
|
|
|
) -> Result<(Option<Tip>, Option<Block>), Error> {
|
2018-09-27 11:35:25 +03:00
|
|
|
match self.process_block_no_orphans(b, opts) {
|
2018-03-04 03:19:54 +03:00
|
|
|
Ok((t, b)) => {
|
|
|
|
// We accepted a block, so see if we can accept any orphans
|
|
|
|
if let Some(ref b) = b {
|
2018-04-05 06:24:43 +03:00
|
|
|
self.check_orphans(b.header.height + 1);
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
Ok((t, b))
|
2018-01-24 21:20:34 +03:00
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
Err(e) => Err(e),
|
2018-01-24 21:20:34 +03:00
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
|
|
|
|
/// Attempt to add a new block to the chain. Returns the new chain tip if it
|
|
|
|
/// has been added to the longest chain, None if it's added to an (as of
|
2017-07-27 22:08:48 +03:00
|
|
|
/// now) orphan chain.
|
2018-03-04 03:19:54 +03:00
|
|
|
pub fn process_block_no_orphans(
|
|
|
|
&self,
|
|
|
|
b: Block,
|
|
|
|
opts: Options,
|
|
|
|
) -> Result<(Option<Tip>, Option<Block>), Error> {
|
2018-09-27 11:35:25 +03:00
|
|
|
let mut batch = self.store.batch()?;
|
2018-07-22 22:43:55 +03:00
|
|
|
let bhash = b.hash();
|
2018-09-27 13:44:50 +03:00
|
|
|
let mut ctx = self.new_ctx(opts, &mut batch)?;
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2018-09-27 11:35:25 +03:00
|
|
|
let res = pipe::process_block(&b, &mut ctx, &mut batch, self.verifier_cache.clone());
|
2018-07-22 22:25:56 +03:00
|
|
|
|
2018-07-22 22:43:55 +03:00
|
|
|
let add_to_hash_cache = || {
|
|
|
|
// only add to hash cache below if block is definitively accepted
|
|
|
|
// or rejected
|
2018-07-16 23:58:56 +03:00
|
|
|
let mut cache = self.block_hashes_cache.write().unwrap();
|
2018-09-28 19:27:31 +03:00
|
|
|
cache.insert(bhash, true);
|
2018-07-22 22:43:55 +03:00
|
|
|
};
|
2017-07-27 22:08:48 +03:00
|
|
|
|
2017-07-28 00:13:34 +03:00
|
|
|
match res {
|
|
|
|
Ok(Some(ref tip)) => {
|
2018-09-27 11:35:25 +03:00
|
|
|
batch.commit()?;
|
|
|
|
|
2017-07-28 00:13:34 +03:00
|
|
|
// block got accepted and extended the head, updating our head
|
|
|
|
let chain_head = self.head.clone();
|
2017-07-28 02:47:33 +03:00
|
|
|
{
|
|
|
|
let mut head = chain_head.lock().unwrap();
|
|
|
|
*head = tip.clone();
|
|
|
|
}
|
2018-07-22 22:43:55 +03:00
|
|
|
add_to_hash_cache();
|
2017-10-15 23:38:41 +03:00
|
|
|
|
|
|
|
// notifying other parts of the system of the update
|
2018-07-16 23:58:56 +03:00
|
|
|
self.adapter.block_accepted(&b, opts);
|
|
|
|
|
2018-03-20 00:02:02 +03:00
|
|
|
Ok((Some(tip.clone()), Some(b)))
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-12-19 00:18:36 +03:00
|
|
|
Ok(None) => {
|
2018-09-27 11:35:25 +03:00
|
|
|
batch.commit()?;
|
|
|
|
|
2018-07-22 22:43:55 +03:00
|
|
|
add_to_hash_cache();
|
|
|
|
|
2017-12-19 00:18:36 +03:00
|
|
|
// block got accepted but we did not extend the head
|
|
|
|
// so its on a fork (or is the start of a new fork)
|
|
|
|
// broadcast the block out so everyone knows about the fork
|
2018-08-10 16:56:35 +03:00
|
|
|
// broadcast the block
|
2018-07-16 23:58:56 +03:00
|
|
|
self.adapter.block_accepted(&b, opts);
|
|
|
|
|
2018-03-20 00:02:02 +03:00
|
|
|
Ok((None, Some(b)))
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-01-24 21:20:34 +03:00
|
|
|
Err(e) => {
|
2018-07-01 01:36:38 +03:00
|
|
|
match e.kind() {
|
|
|
|
ErrorKind::Orphan => {
|
|
|
|
let block_hash = b.hash();
|
|
|
|
let orphan = Orphan {
|
|
|
|
block: b,
|
|
|
|
opts: opts,
|
|
|
|
added: Instant::now(),
|
|
|
|
};
|
|
|
|
|
|
|
|
// In the case of a fork - it is possible to have multiple blocks
|
|
|
|
// that are children of a given block.
|
|
|
|
// We do not handle this currently for orphans (future enhancement?).
|
|
|
|
// We just assume "last one wins" for now.
|
|
|
|
&self.orphans.add(orphan);
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-09-10 22:32:20 +03:00
|
|
|
"process_block: orphan: {:?}, # orphans {}{}",
|
2018-07-01 01:36:38 +03:00
|
|
|
block_hash,
|
|
|
|
self.orphans.len(),
|
2018-09-10 22:32:20 +03:00
|
|
|
if self.orphans.len_evicted() > 0 {
|
|
|
|
format!(", # evicted {}", self.orphans.len_evicted())
|
|
|
|
} else {
|
|
|
|
String::new()
|
|
|
|
},
|
2018-07-01 01:36:38 +03:00
|
|
|
);
|
|
|
|
Err(ErrorKind::Orphan.into())
|
|
|
|
}
|
|
|
|
ErrorKind::Unfit(ref msg) => {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Block {} at {} is unfit at this time: {}",
|
|
|
|
b.hash(),
|
|
|
|
b.header.height,
|
|
|
|
msg
|
|
|
|
);
|
|
|
|
Err(ErrorKind::Unfit(msg.clone()).into())
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
info!(
|
|
|
|
LOGGER,
|
|
|
|
"Rejected block {} at {}: {:?}",
|
|
|
|
b.hash(),
|
|
|
|
b.header.height,
|
|
|
|
e
|
|
|
|
);
|
2018-07-22 22:43:55 +03:00
|
|
|
add_to_hash_cache();
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(ErrorKind::Other(format!("{:?}", e).to_owned()).into())
|
|
|
|
}
|
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
2017-07-28 00:13:34 +03:00
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
/// Process a block header received during "header first" propagation.
|
2018-03-27 23:48:09 +03:00
|
|
|
pub fn process_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<(), Error> {
|
2018-09-27 11:35:25 +03:00
|
|
|
let mut batch = self.store.batch()?;
|
2018-09-27 13:44:50 +03:00
|
|
|
let mut ctx = self.new_ctx(opts, &mut batch)?;
|
2018-09-27 11:35:25 +03:00
|
|
|
pipe::process_block_header(bh, &mut ctx, &mut batch)?;
|
|
|
|
batch.commit()?;
|
|
|
|
Ok(())
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
/// Attempt to add a new header to the header chain.
|
|
|
|
/// This is only ever used during sync and uses sync_head.
|
2018-09-27 11:35:25 +03:00
|
|
|
pub fn sync_block_headers(
|
|
|
|
&self,
|
|
|
|
headers: &Vec<BlockHeader>,
|
|
|
|
opts: Options,
|
2018-09-29 18:19:19 +03:00
|
|
|
) -> Result<(), Error> {
|
2018-06-22 11:08:06 +03:00
|
|
|
let mut batch = self.store.batch()?;
|
2018-09-27 13:44:50 +03:00
|
|
|
let mut ctx = self.new_ctx(opts, &mut batch)?;
|
2018-09-29 18:19:19 +03:00
|
|
|
pipe::sync_block_headers(headers, &mut ctx, &mut batch)?;
|
2018-09-27 11:35:25 +03:00
|
|
|
batch.commit()?;
|
2018-09-29 18:19:19 +03:00
|
|
|
Ok(())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2018-09-27 13:44:50 +03:00
|
|
|
fn new_ctx(&self, opts: Options, batch: &mut Batch) -> Result<pipe::BlockContext, Error> {
|
|
|
|
let head = batch.head()?;
|
|
|
|
let header_head = batch.get_header_head()?;
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok(pipe::BlockContext {
|
2018-09-27 11:35:25 +03:00
|
|
|
opts,
|
|
|
|
head,
|
2018-09-27 13:44:50 +03:00
|
|
|
header_head,
|
2017-08-22 21:23:54 +03:00
|
|
|
pow_verifier: self.pow_verifier,
|
2018-07-16 23:58:56 +03:00
|
|
|
block_hashes_cache: self.block_hashes_cache.clone(),
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset: self.txhashset.clone(),
|
2018-07-24 07:29:31 +03:00
|
|
|
orphans: self.orphans.clone(),
|
2018-06-22 11:08:06 +03:00
|
|
|
})
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2017-12-16 06:19:04 +03:00
|
|
|
/// Check if hash is for a known orphan.
|
2017-12-14 20:23:35 +03:00
|
|
|
pub fn is_orphan(&self, hash: &Hash) -> bool {
|
2017-12-22 01:29:24 +03:00
|
|
|
self.orphans.contains(hash)
|
2017-12-14 20:23:35 +03:00
|
|
|
}
|
|
|
|
|
2018-09-10 22:32:20 +03:00
|
|
|
/// Get the OrphanBlockPool accumulated evicted number of blocks
|
|
|
|
pub fn orphans_evicted_len(&self) -> usize {
|
|
|
|
self.orphans.len_evicted()
|
|
|
|
}
|
|
|
|
|
2018-01-24 21:20:34 +03:00
|
|
|
/// Check for orphans, once a block is successfully added
|
2018-04-05 06:24:43 +03:00
|
|
|
pub fn check_orphans(&self, mut height: u64) {
|
2018-09-01 22:09:38 +03:00
|
|
|
let initial_height = height;
|
|
|
|
|
2017-12-22 01:29:24 +03:00
|
|
|
// Is there an orphan in our orphans that we can now process?
|
2018-01-24 21:20:34 +03:00
|
|
|
loop {
|
2018-09-01 22:09:38 +03:00
|
|
|
trace!(
|
|
|
|
LOGGER,
|
|
|
|
"check_orphans: at {}, # orphans {}",
|
|
|
|
height,
|
|
|
|
self.orphans.len(),
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut orphan_accepted = false;
|
|
|
|
let mut height_accepted = height;
|
|
|
|
|
2018-04-05 06:24:43 +03:00
|
|
|
if let Some(orphans) = self.orphans.remove_by_height(&height) {
|
2018-09-01 22:09:38 +03:00
|
|
|
let orphans_len = orphans.len();
|
|
|
|
for (i, orphan) in orphans.into_iter().enumerate() {
|
|
|
|
debug!(
|
2018-07-22 22:25:56 +03:00
|
|
|
LOGGER,
|
2018-09-01 22:09:38 +03:00
|
|
|
"check_orphans: get block {} at {}{}",
|
2018-07-22 22:25:56 +03:00
|
|
|
orphan.block.hash(),
|
|
|
|
height,
|
2018-09-01 22:09:38 +03:00
|
|
|
if orphans_len > 1 {
|
|
|
|
format!(", no.{} of {} orphans", i, orphans_len)
|
|
|
|
} else {
|
|
|
|
String::new()
|
|
|
|
},
|
2018-07-22 22:25:56 +03:00
|
|
|
);
|
2018-07-22 22:43:55 +03:00
|
|
|
let res = self.process_block_no_orphans(orphan.block, orphan.opts);
|
2018-04-05 06:24:43 +03:00
|
|
|
if let Ok((_, Some(b))) = res {
|
2018-09-01 22:09:38 +03:00
|
|
|
orphan_accepted = true;
|
|
|
|
height_accepted = b.header.height;
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-04-05 06:24:43 +03:00
|
|
|
}
|
2018-09-01 22:09:38 +03:00
|
|
|
|
|
|
|
if orphan_accepted {
|
|
|
|
// We accepted a block, so see if we can accept any orphans
|
|
|
|
height = height_accepted + 1;
|
|
|
|
continue;
|
|
|
|
}
|
2018-01-24 21:20:34 +03:00
|
|
|
}
|
2018-09-01 22:09:38 +03:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if initial_height != height {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"check_orphans: {} blocks accepted since height {}, remaining # orphans {}",
|
|
|
|
height - initial_height,
|
|
|
|
initial_height,
|
|
|
|
self.orphans.len(),
|
|
|
|
);
|
2017-07-28 00:13:34 +03:00
|
|
|
}
|
|
|
|
}
|
2017-07-27 22:08:48 +03:00
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
/// TODO - where do we call this from? And do we need a rewind first?
|
2018-05-30 23:57:13 +03:00
|
|
|
/// For the given commitment find the unspent output and return the
|
|
|
|
/// associated Return an error if the output does not exist or has been
|
|
|
|
/// spent. This querying is done in a way that is consistent with the
|
|
|
|
/// current chain state, specifically the current winning (valid, most
|
|
|
|
/// work) fork.
|
2018-03-02 23:47:27 +03:00
|
|
|
pub fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
2018-08-20 21:02:44 +03:00
|
|
|
let res = txhashset.is_unspent(output_ref);
|
|
|
|
match res {
|
|
|
|
Err(e) => Err(e),
|
2018-08-20 23:56:32 +03:00
|
|
|
Ok((h, _)) => Ok(h),
|
2018-08-20 21:02:44 +03:00
|
|
|
}
|
2017-09-28 02:46:32 +03:00
|
|
|
}
|
|
|
|
|
2018-09-25 13:01:19 +03:00
|
|
|
/// Validate the tx against the current UTXO set.
|
|
|
|
pub fn validate_tx(&self, tx: &Transaction) -> Result<(), Error> {
|
|
|
|
let txhashset = self.txhashset.read().unwrap();
|
|
|
|
txhashset::utxo_view(&txhashset, |utxo| {
|
|
|
|
utxo.validate_tx(tx)?;
|
2018-09-24 11:24:10 +03:00
|
|
|
Ok(())
|
2018-05-30 23:57:13 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
fn next_block_height(&self) -> Result<u64, Error> {
|
|
|
|
let bh = self.head_header()?;
|
|
|
|
Ok(bh.height + 1)
|
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Verify we are not attempting to spend a coinbase output
|
|
|
|
/// that has not yet sufficiently matured.
|
|
|
|
pub fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), Error> {
|
|
|
|
let height = self.next_block_height()?;
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
2018-08-16 00:14:48 +03:00
|
|
|
extension.verify_coinbase_maturity(&tx.inputs(), height)?;
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(())
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Verify that the tx has a lock_height that is less than or equal to
|
|
|
|
/// the height of the next block.
|
|
|
|
pub fn verify_tx_lock_height(&self, tx: &Transaction) -> Result<(), Error> {
|
|
|
|
let height = self.next_block_height()?;
|
|
|
|
if tx.lock_height() <= height {
|
|
|
|
Ok(())
|
|
|
|
} else {
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(ErrorKind::TxLockHeight.into())
|
2018-05-30 23:57:13 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-02-17 20:56:22 +03:00
|
|
|
/// Validate the current chain state.
|
2018-03-21 15:28:05 +03:00
|
|
|
pub fn validate(&self, skip_rproofs: bool) -> Result<(), Error> {
|
2018-02-10 01:32:16 +03:00
|
|
|
let header = self.store.head_header()?;
|
2018-03-23 04:48:05 +03:00
|
|
|
|
|
|
|
// Lets just treat an "empty" node that just got started up as valid.
|
|
|
|
if header.height == 0 {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
2018-03-21 00:46:29 +03:00
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
// Now create an extension from the txhashset and validate against the
|
|
|
|
// latest block header. Rewind the extension to the specified header to
|
|
|
|
// ensure the view is consistent.
|
2018-04-09 19:37:46 +03:00
|
|
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
2018-08-20 22:34:12 +03:00
|
|
|
extension.rewind(&header)?;
|
2018-09-26 11:59:00 +03:00
|
|
|
extension.validate(skip_rproofs, &NoStatus)?;
|
2018-05-07 16:21:41 +03:00
|
|
|
Ok(())
|
2018-03-21 00:46:29 +03:00
|
|
|
})
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-06-22 11:44:38 +03:00
|
|
|
/// Sets the txhashset roots on a brand new block by applying the block on
|
|
|
|
/// the current txhashset state.
|
|
|
|
pub fn set_txhashset_roots(&self, b: &mut Block, is_fork: bool) -> Result<(), Error> {
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
2018-06-22 13:44:50 +03:00
|
|
|
let (roots, sizes) = txhashset::extending_readonly(&mut txhashset, |extension| {
|
2018-06-22 11:44:38 +03:00
|
|
|
if is_fork {
|
2018-09-27 11:35:25 +03:00
|
|
|
pipe::rewind_and_apply_fork(b, extension)?;
|
2018-06-22 11:44:38 +03:00
|
|
|
}
|
|
|
|
extension.apply_block(b)?;
|
2018-06-22 13:44:50 +03:00
|
|
|
Ok((extension.roots(), extension.sizes()))
|
2018-06-22 11:44:38 +03:00
|
|
|
})?;
|
|
|
|
|
|
|
|
b.header.output_root = roots.output_root;
|
|
|
|
b.header.range_proof_root = roots.rproof_root;
|
|
|
|
b.header.kernel_root = roots.kernel_root;
|
2018-06-22 13:44:50 +03:00
|
|
|
b.header.output_mmr_size = sizes.0;
|
|
|
|
b.header.kernel_mmr_size = sizes.2;
|
2018-06-22 11:44:38 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
/// Return a pre-built Merkle proof for the given commitment from the store.
|
|
|
|
pub fn get_merkle_proof(
|
|
|
|
&self,
|
|
|
|
output: &OutputIdentifier,
|
2018-03-09 00:36:51 +03:00
|
|
|
block_header: &BlockHeader,
|
2018-03-02 23:47:27 +03:00
|
|
|
) -> Result<MerkleProof, Error> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
2018-03-02 23:47:27 +03:00
|
|
|
|
2018-04-09 19:37:46 +03:00
|
|
|
let merkle_proof = txhashset::extending_readonly(&mut txhashset, |extension| {
|
2018-09-26 11:59:00 +03:00
|
|
|
extension.rewind(&block_header)?;
|
|
|
|
extension.merkle_proof(output)
|
2018-03-02 23:47:27 +03:00
|
|
|
})?;
|
|
|
|
|
|
|
|
Ok(merkle_proof)
|
|
|
|
}
|
|
|
|
|
2018-04-26 16:01:01 +03:00
|
|
|
/// Return a merkle proof valid for the current output pmmr state at the
|
|
|
|
/// given pos
|
|
|
|
pub fn get_merkle_proof_for_pos(&self, commit: Commitment) -> Result<MerkleProof, String> {
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.merkle_proof(commit)
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Returns current txhashset roots
|
|
|
|
pub fn get_txhashset_roots(&self) -> (Hash, Hash, Hash) {
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.roots()
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Provides a reading view into the current txhashset state as well as
|
2018-02-10 01:32:16 +03:00
|
|
|
/// the required indexes for a consumer to rewind to a consistent state
|
|
|
|
/// at the provided block hash.
|
2018-03-05 22:33:44 +03:00
|
|
|
pub fn txhashset_read(&self, h: Hash) -> Result<(u64, u64, File), Error> {
|
2018-06-18 18:18:38 +03:00
|
|
|
// now we want to rewind the txhashset extension and
|
|
|
|
// sync a "rewound" copy of the leaf_set files to disk
|
|
|
|
// so we can send these across as part of the zip file.
|
|
|
|
// The fast sync client does *not* have the necessary data
|
|
|
|
// to rewind after receiving the txhashset zip.
|
2018-06-23 02:36:10 +03:00
|
|
|
let header = self.store.get_block_header(&h)?;
|
2018-06-18 18:18:38 +03:00
|
|
|
{
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset::extending_readonly(&mut txhashset, |extension| {
|
2018-08-20 22:34:12 +03:00
|
|
|
extension.rewind(&header)?;
|
2018-09-26 11:59:00 +03:00
|
|
|
extension.snapshot()?;
|
2018-06-18 18:18:38 +03:00
|
|
|
Ok(())
|
|
|
|
})?;
|
|
|
|
}
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
// prepares the zip and return the corresponding Read
|
2018-08-03 05:16:16 +03:00
|
|
|
let txhashset_reader = txhashset::zip_read(self.db_root.clone(), &header)?;
|
2018-06-23 02:36:10 +03:00
|
|
|
Ok((
|
|
|
|
header.output_mmr_size,
|
|
|
|
header.kernel_mmr_size,
|
|
|
|
txhashset_reader,
|
|
|
|
))
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2018-09-26 11:59:00 +03:00
|
|
|
// Special handling to make sure the whole kernel set matches each of its
|
|
|
|
// roots in each block header, without truncation. We go back header by
|
|
|
|
// header, rewind and check each root. This fixes a potential weakness in
|
|
|
|
// fast sync where a reorg past the horizon could allow a whole rewrite of
|
|
|
|
// the kernel set.
|
|
|
|
fn validate_kernel_history(
|
|
|
|
&self,
|
|
|
|
header: &BlockHeader,
|
|
|
|
txhashset: &txhashset::TxHashSet,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"chain: validate_kernel_history: rewinding and validating kernel history (readonly)"
|
|
|
|
);
|
|
|
|
|
|
|
|
let mut count = 0;
|
|
|
|
let mut current = header.clone();
|
|
|
|
txhashset::rewindable_kernel_view(&txhashset, |view| {
|
|
|
|
while current.height > 0 {
|
|
|
|
view.rewind(¤t)?;
|
|
|
|
view.validate_root()?;
|
|
|
|
current = view.batch().get_block_header(¤t.previous)?;
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
})?;
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"chain: validate_kernel_history: validated kernel root on {} headers", count,
|
|
|
|
);
|
|
|
|
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Writes a reading view on a txhashset state that's been provided to us.
|
2018-02-10 01:32:16 +03:00
|
|
|
/// If we're willing to accept that new state, the data stream will be
|
|
|
|
/// read as a zip file, unzipped and the resulting state files should be
|
|
|
|
/// rewound to the provided indexes.
|
2018-08-28 00:22:48 +03:00
|
|
|
pub fn txhashset_write(
|
|
|
|
&self,
|
|
|
|
h: Hash,
|
|
|
|
txhashset_data: File,
|
|
|
|
status: &TxHashsetWriteStatus,
|
|
|
|
) -> Result<(), Error> {
|
2018-07-02 02:08:39 +03:00
|
|
|
status.on_setup();
|
2018-02-10 01:32:16 +03:00
|
|
|
let head = self.head().unwrap();
|
|
|
|
let header_head = self.get_header_head().unwrap();
|
|
|
|
if header_head.height - head.height < global::cut_through_horizon() as u64 {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::InvalidTxHashSet("not needed".to_owned()).into());
|
2018-02-16 18:42:27 +03:00
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
|
|
|
let header = self.store.get_block_header(&h)?;
|
2018-08-03 05:16:16 +03:00
|
|
|
txhashset::zip_write(self.db_root.clone(), txhashset_data, &header)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-06-18 18:18:38 +03:00
|
|
|
let mut txhashset =
|
|
|
|
txhashset::TxHashSet::open(self.db_root.clone(), self.store.clone(), Some(&header))?;
|
2018-04-24 22:53:01 +03:00
|
|
|
|
2018-09-26 11:59:00 +03:00
|
|
|
// Validate the full kernel history (kernel MMR root for every block header).
|
|
|
|
self.validate_kernel_history(&header, &txhashset)?;
|
2018-07-07 01:48:41 +03:00
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
// all good, prepare a new batch and update all the required records
|
2018-08-10 16:56:35 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"chain: txhashset_write: rewinding a 2nd time (writeable)"
|
|
|
|
);
|
2018-06-22 11:08:06 +03:00
|
|
|
let mut batch = self.store.batch()?;
|
|
|
|
txhashset::extending(&mut txhashset, &mut batch, |extension| {
|
2018-08-20 22:34:12 +03:00
|
|
|
extension.rewind(&header)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
// Validate the extension, generating the utxo_sum and kernel_sum.
|
2018-09-26 11:59:00 +03:00
|
|
|
let (utxo_sum, kernel_sum) = extension.validate(false, status)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
// Now that we have block_sums the total_kernel_sum on the block_header is redundant.
|
|
|
|
if header.total_kernel_sum != kernel_sum {
|
|
|
|
return Err(
|
|
|
|
ErrorKind::Other(format!("total_kernel_sum in header does not match")).into(),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Save the block_sums (utxo_sum, kernel_sum) to the db for use later.
|
|
|
|
extension.batch.save_block_sums(
|
|
|
|
&header.hash(),
|
|
|
|
&BlockSums {
|
|
|
|
utxo_sum,
|
|
|
|
kernel_sum,
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
extension.rebuild_index()?;
|
|
|
|
Ok(())
|
|
|
|
})?;
|
|
|
|
|
2018-08-10 16:56:35 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"chain: txhashset_write: finished validating and rebuilding"
|
|
|
|
);
|
2018-07-08 19:37:09 +03:00
|
|
|
|
2018-07-02 02:08:39 +03:00
|
|
|
status.on_save();
|
2018-10-02 00:12:56 +03:00
|
|
|
// Replace the chain txhashset with the newly built one.
|
2018-02-10 01:32:16 +03:00
|
|
|
{
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset_ref = self.txhashset.write().unwrap();
|
|
|
|
*txhashset_ref = txhashset;
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-10-02 00:12:56 +03:00
|
|
|
// Setup new head.
|
|
|
|
let head = {
|
2018-02-10 01:32:16 +03:00
|
|
|
let mut head = self.head.lock().unwrap();
|
|
|
|
*head = Tip::from_block(&header);
|
2018-10-02 00:12:56 +03:00
|
|
|
head.clone()
|
|
|
|
};
|
|
|
|
// Save the new head to the db and rebuild the header by height index.
|
|
|
|
{
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.save_body_head(&head)?;
|
|
|
|
batch.save_header_height(&header)?;
|
|
|
|
batch.build_by_height_index(&header, true)?;
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-10-02 00:12:56 +03:00
|
|
|
// Commit all the changes to the db.
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.commit()?;
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-08-10 16:56:35 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"chain: txhashset_write: finished committing the batch (head etc.)"
|
|
|
|
);
|
2018-07-08 19:37:09 +03:00
|
|
|
|
2018-10-02 00:12:56 +03:00
|
|
|
// Check for any orphan blocks and process them based on the new chain state.
|
2018-04-05 06:24:43 +03:00
|
|
|
self.check_orphans(header.height + 1);
|
2018-07-02 02:08:39 +03:00
|
|
|
|
|
|
|
status.on_done();
|
2018-02-10 01:32:16 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Triggers chain compaction, cleaning up some unnecessary historical
|
2018-03-06 20:58:33 +03:00
|
|
|
/// information. We introduce a chain depth called horizon, which is
|
|
|
|
/// typically in the range of a couple days. Before that horizon, this
|
|
|
|
/// method will:
|
|
|
|
///
|
|
|
|
/// * compact the MMRs data files and flushing the corresponding remove logs
|
|
|
|
/// * delete old records from the k/v store (older blocks, indexes, etc.)
|
|
|
|
///
|
|
|
|
/// This operation can be resource intensive and takes some time to execute.
|
|
|
|
/// Meanwhile, the chain will not be able to accept new blocks. It should
|
|
|
|
/// therefore be called judiciously.
|
|
|
|
pub fn compact(&self) -> Result<(), Error> {
|
2018-09-10 17:59:42 +03:00
|
|
|
if self.archive_mode {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Blockchain compaction disabled, node running in archive mode."
|
|
|
|
);
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2018-05-11 19:58:52 +03:00
|
|
|
debug!(LOGGER, "Starting blockchain compaction.");
|
|
|
|
// Compact the txhashset via the extension.
|
2018-03-13 21:22:34 +03:00
|
|
|
{
|
|
|
|
let mut txhashes = self.txhashset.write().unwrap();
|
|
|
|
txhashes.compact()?;
|
|
|
|
|
|
|
|
// print out useful debug info after compaction
|
2018-04-09 19:37:46 +03:00
|
|
|
txhashset::extending_readonly(&mut txhashes, |extension| {
|
2018-03-13 21:22:34 +03:00
|
|
|
extension.dump_output_pmmr();
|
|
|
|
Ok(())
|
|
|
|
})?;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Now check we can still successfully validate the chain state after
|
2018-05-11 19:58:52 +03:00
|
|
|
// compacting, shouldn't be necessary once all of this is well-oiled
|
|
|
|
debug!(LOGGER, "Validating state after compaction.");
|
2018-03-21 15:28:05 +03:00
|
|
|
self.validate(true)?;
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
// we need to be careful here in testing as 20 blocks is not that long
|
|
|
|
// in wall clock time
|
2018-03-06 20:58:33 +03:00
|
|
|
let horizon = global::cut_through_horizon() as u64;
|
|
|
|
let head = self.head()?;
|
2018-03-13 21:22:34 +03:00
|
|
|
|
|
|
|
if head.height <= horizon {
|
|
|
|
return Ok(());
|
|
|
|
}
|
|
|
|
|
2018-05-11 19:58:52 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Compaction remove blocks older than {}.",
|
|
|
|
head.height - horizon
|
|
|
|
);
|
|
|
|
let mut count = 0;
|
2018-03-06 20:58:33 +03:00
|
|
|
let mut current = self.store.get_header_by_height(head.height - horizon - 1)?;
|
2018-06-22 11:08:06 +03:00
|
|
|
let batch = self.store.batch()?;
|
2018-03-06 20:58:33 +03:00
|
|
|
loop {
|
|
|
|
match self.store.get_block(¤t.hash()) {
|
|
|
|
Ok(b) => {
|
2018-05-11 19:58:52 +03:00
|
|
|
count += 1;
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.delete_block(&b.hash())?;
|
|
|
|
batch.delete_block_input_bitmap(&b.hash())?;
|
2018-09-20 11:19:32 +03:00
|
|
|
batch.delete_block_sums(&b.hash())?;
|
2018-03-06 20:58:33 +03:00
|
|
|
}
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(NotFoundErr(_)) => {
|
2018-03-06 20:58:33 +03:00
|
|
|
break;
|
|
|
|
}
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(e) => {
|
|
|
|
return Err(
|
|
|
|
ErrorKind::StoreErr(e, "retrieving block to compact".to_owned()).into(),
|
|
|
|
)
|
|
|
|
}
|
2018-03-06 20:58:33 +03:00
|
|
|
}
|
|
|
|
if current.height <= 1 {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
match self.store.get_block_header(¤t.previous) {
|
|
|
|
Ok(h) => current = h,
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(NotFoundErr(_)) => break,
|
2018-03-06 20:58:33 +03:00
|
|
|
Err(e) => return Err(From::from(e)),
|
|
|
|
}
|
|
|
|
}
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.commit()?;
|
2018-05-11 19:58:52 +03:00
|
|
|
debug!(LOGGER, "Compaction removed {} blocks, done.", count);
|
2018-03-06 20:58:33 +03:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// returns the last n nodes inserted into the output sum tree
|
2018-03-24 02:33:59 +03:00
|
|
|
pub fn get_last_n_output(&self, distance: u64) -> Vec<(Hash, OutputIdentifier)> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.last_n_output(distance)
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for rangeproofs
|
2018-03-24 02:33:59 +03:00
|
|
|
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<(Hash, RangeProof)> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.last_n_rangeproof(distance)
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// as above, for kernels
|
2018-03-24 02:33:59 +03:00
|
|
|
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<(Hash, TxKernel)> {
|
2018-03-05 22:33:44 +03:00
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
txhashset.last_n_kernel(distance)
|
2017-10-28 00:57:04 +03:00
|
|
|
}
|
|
|
|
|
2018-04-11 12:02:07 +03:00
|
|
|
/// outputs by insertion index
|
|
|
|
pub fn unspent_outputs_by_insertion_index(
|
|
|
|
&self,
|
|
|
|
start_index: u64,
|
|
|
|
max: u64,
|
|
|
|
) -> Result<(u64, u64, Vec<Output>), Error> {
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
let max_index = txhashset.highest_output_insertion_index();
|
|
|
|
let outputs = txhashset.outputs_by_insertion_index(start_index, max);
|
|
|
|
let rangeproofs = txhashset.rangeproofs_by_insertion_index(start_index, max);
|
|
|
|
if outputs.0 != rangeproofs.0 || outputs.1.len() != rangeproofs.1.len() {
|
2018-07-01 01:36:38 +03:00
|
|
|
return Err(ErrorKind::TxHashSetErr(String::from(
|
2018-04-11 12:02:07 +03:00
|
|
|
"Output and rangeproof sets don't match",
|
2018-07-01 01:36:38 +03:00
|
|
|
)).into());
|
2018-04-11 12:02:07 +03:00
|
|
|
}
|
|
|
|
let mut output_vec: Vec<Output> = vec![];
|
|
|
|
for (ref x, &y) in outputs.1.iter().zip(rangeproofs.1.iter()) {
|
|
|
|
output_vec.push(Output {
|
|
|
|
commit: x.commit,
|
|
|
|
features: x.features,
|
|
|
|
proof: y,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
Ok((outputs.0, max_index, output_vec))
|
|
|
|
}
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Total difficulty at the head of the chain
|
|
|
|
pub fn total_difficulty(&self) -> Difficulty {
|
|
|
|
self.head.lock().unwrap().clone().total_difficulty
|
|
|
|
}
|
|
|
|
|
2018-04-06 02:31:34 +03:00
|
|
|
/// Orphans pool size
|
2018-04-05 06:24:43 +03:00
|
|
|
pub fn orphans_len(&self) -> usize {
|
|
|
|
self.orphans.len()
|
|
|
|
}
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
/// Total difficulty at the head of the header chain
|
|
|
|
pub fn total_header_difficulty(&self) -> Result<Difficulty, Error> {
|
|
|
|
Ok(self.store.get_header_head()?.total_difficulty)
|
|
|
|
}
|
|
|
|
|
2017-12-29 03:49:27 +03:00
|
|
|
/// Reset header_head and sync_head to head of current body chain
|
|
|
|
pub fn reset_head(&self) -> Result<(), Error> {
|
2018-06-22 11:08:06 +03:00
|
|
|
let batch = self.store.batch()?;
|
2018-07-01 01:36:38 +03:00
|
|
|
batch.reset_head()?;
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.commit()?;
|
|
|
|
Ok(())
|
2017-12-29 03:49:27 +03:00
|
|
|
}
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Get the tip that's also the head of the chain
|
|
|
|
pub fn head(&self) -> Result<Tip, Error> {
|
|
|
|
Ok(self.head.lock().unwrap().clone())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Block header for the chain head
|
|
|
|
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.store
|
|
|
|
.head_header()
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain head header".to_owned()).into())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a block header by hash
|
|
|
|
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.store
|
|
|
|
.get_block(h)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get block".to_owned()).into())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Gets a block header by hash
|
|
|
|
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.store
|
|
|
|
.get_block_header(h)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get header".to_owned()).into())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
/// Get block_sums by header hash.
|
|
|
|
pub fn get_block_sums(&self, h: &Hash) -> Result<BlockSums, Error> {
|
|
|
|
self.store
|
|
|
|
.get_block_sums(h)
|
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get block_sums".to_owned()).into())
|
|
|
|
}
|
|
|
|
|
2017-07-04 02:46:25 +03:00
|
|
|
/// Gets the block header at the provided height
|
|
|
|
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.store
|
|
|
|
.get_header_by_height(height)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get header by height".to_owned()).into())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2018-08-20 21:02:44 +03:00
|
|
|
/// Gets the block header in which a given output appears in the txhashset
|
|
|
|
pub fn get_header_for_output(
|
|
|
|
&self,
|
|
|
|
output_ref: &OutputIdentifier,
|
|
|
|
) -> Result<BlockHeader, Error> {
|
|
|
|
let mut txhashset = self.txhashset.write().unwrap();
|
|
|
|
let (_, pos) = txhashset.is_unspent(output_ref)?;
|
|
|
|
let mut min = 1;
|
|
|
|
let mut max = {
|
|
|
|
let h = self.head.lock().unwrap();
|
|
|
|
h.height
|
|
|
|
};
|
|
|
|
|
|
|
|
loop {
|
|
|
|
let search_height = max - (max - min) / 2;
|
|
|
|
let h = self.get_header_by_height(search_height)?;
|
|
|
|
let h_prev = self.get_header_by_height(search_height - 1)?;
|
|
|
|
if pos > h.output_mmr_size {
|
|
|
|
min = search_height;
|
|
|
|
} else if pos < h_prev.output_mmr_size {
|
|
|
|
max = search_height;
|
|
|
|
} else {
|
|
|
|
if pos == h_prev.output_mmr_size {
|
|
|
|
return Ok(h_prev);
|
|
|
|
}
|
|
|
|
return Ok(h);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-05 21:32:57 +03:00
|
|
|
/// Verifies the given block header is actually on the current chain.
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Checks the header_by_height index to verify the header is where we say
|
|
|
|
/// it is
|
2017-12-05 21:32:57 +03:00
|
|
|
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
|
2018-09-27 11:35:25 +03:00
|
|
|
let batch = self.store.batch()?;
|
|
|
|
batch
|
2018-03-04 03:19:54 +03:00
|
|
|
.is_on_current_chain(header)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain is_on_current_chain".to_owned()).into())
|
2017-12-05 21:32:57 +03:00
|
|
|
}
|
|
|
|
|
2017-12-04 22:16:57 +03:00
|
|
|
/// Get the tip of the current "sync" header chain.
|
|
|
|
/// This may be significantly different to current header chain.
|
|
|
|
pub fn get_sync_head(&self) -> Result<Tip, Error> {
|
|
|
|
self.store
|
|
|
|
.get_sync_head()
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get sync head".to_owned()).into())
|
2017-12-04 22:16:57 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Get the tip of the header chain.
|
2017-07-04 02:46:25 +03:00
|
|
|
pub fn get_header_head(&self) -> Result<Tip, Error> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.store
|
|
|
|
.get_header_head()
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain get header head".to_owned()).into())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Builds an iterator on blocks starting from the current chain head and
|
|
|
|
/// running backward. Specialized to return information pertaining to block
|
|
|
|
/// difficulty calculation (timestamp and previous difficulties).
|
|
|
|
pub fn difficulty_iter(&self) -> store::DifficultyIter {
|
2018-09-27 11:35:25 +03:00
|
|
|
let batch = self.store.batch().unwrap();
|
2018-10-01 06:51:15 +03:00
|
|
|
let head = self.head.lock().unwrap();
|
2018-09-27 11:35:25 +03:00
|
|
|
store::DifficultyIter::from(head.last_block_h, batch)
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
2018-02-24 05:48:02 +03:00
|
|
|
|
2018-03-02 23:47:27 +03:00
|
|
|
/// Check whether we have a block without reading it
|
|
|
|
pub fn block_exists(&self, h: Hash) -> Result<bool, Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.store
|
|
|
|
.block_exists(&h)
|
2018-07-01 01:36:38 +03:00
|
|
|
.map_err(|e| ErrorKind::StoreErr(e, "chain block exists".to_owned()).into())
|
2018-03-02 23:47:27 +03:00
|
|
|
}
|
2018-09-20 02:29:24 +03:00
|
|
|
|
2018-10-01 18:03:06 +03:00
|
|
|
/// Reset sync_head to the provided head.
|
|
|
|
pub fn reset_sync_head(&self, head: &Tip) -> Result<(), Error> {
|
2018-09-20 02:29:24 +03:00
|
|
|
let batch = self.store.batch()?;
|
2018-10-01 18:03:06 +03:00
|
|
|
batch.save_sync_head(head)?;
|
2018-09-20 02:29:24 +03:00
|
|
|
batch.commit()?;
|
|
|
|
Ok(())
|
|
|
|
}
|
2018-06-22 11:08:06 +03:00
|
|
|
}
|
2018-03-03 12:08:36 +03:00
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
fn setup_head(
|
|
|
|
genesis: Block,
|
|
|
|
store: Arc<store::ChainStore>,
|
|
|
|
txhashset: &mut txhashset::TxHashSet,
|
|
|
|
) -> Result<(), Error> {
|
|
|
|
let mut batch = store.batch()?;
|
2018-09-27 11:35:25 +03:00
|
|
|
|
|
|
|
// check if we have a head in store, otherwise the genesis block is it
|
|
|
|
let head_res = batch.head();
|
2018-06-22 11:08:06 +03:00
|
|
|
let mut head: Tip;
|
|
|
|
match head_res {
|
|
|
|
Ok(h) => {
|
|
|
|
head = h;
|
|
|
|
loop {
|
|
|
|
// Use current chain tip if we have one.
|
|
|
|
// Note: We are rewinding and validating against a writeable extension.
|
|
|
|
// If validation is successful we will truncate the backend files
|
|
|
|
// to match the provided block header.
|
2018-09-27 11:35:25 +03:00
|
|
|
let header = batch.get_block_header(&head.last_block_h)?;
|
2018-06-22 11:08:06 +03:00
|
|
|
|
|
|
|
let res = txhashset::extending(txhashset, &mut batch, |extension| {
|
2018-08-20 22:34:12 +03:00
|
|
|
extension.rewind(&header)?;
|
2018-09-26 11:59:00 +03:00
|
|
|
extension.validate_roots()?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
// now check we have the "block sums" for the block in question
|
|
|
|
// if we have no sums (migrating an existing node) we need to go
|
|
|
|
// back to the txhashset and sum the outputs and kernels
|
|
|
|
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
|
|
|
|
{
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"chain: init: building (missing) block sums for {} @ {}",
|
|
|
|
header.height,
|
|
|
|
header.hash()
|
|
|
|
);
|
|
|
|
|
|
|
|
// Do a full (and slow) validation of the txhashset extension
|
|
|
|
// to calculate the utxo_sum and kernel_sum at this block height.
|
2018-09-26 11:59:00 +03:00
|
|
|
let (utxo_sum, kernel_sum) = extension.validate_kernel_sums()?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
// Save the block_sums to the db for use later.
|
|
|
|
extension.batch.save_block_sums(
|
|
|
|
&header.hash(),
|
|
|
|
&BlockSums {
|
|
|
|
utxo_sum,
|
|
|
|
kernel_sum,
|
|
|
|
},
|
|
|
|
)?;
|
|
|
|
}
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"chain: init: rewinding and validating before we start... {} at {}",
|
|
|
|
header.hash(),
|
|
|
|
header.height,
|
|
|
|
);
|
|
|
|
Ok(())
|
|
|
|
});
|
|
|
|
|
|
|
|
if res.is_ok() {
|
|
|
|
break;
|
|
|
|
} else {
|
|
|
|
// We may have corrupted the MMR backend files last time we stopped the
|
|
|
|
// node. If this appears to be the case revert the head to the previous
|
|
|
|
// header and try again
|
2018-09-27 11:35:25 +03:00
|
|
|
let prev_header = batch.get_block_header(&head.prev_block_h)?;
|
2018-06-22 11:08:06 +03:00
|
|
|
let _ = batch.delete_block(&header.hash());
|
|
|
|
let _ = batch.setup_height(&prev_header, &head)?;
|
|
|
|
head = Tip::from_block(&prev_header);
|
|
|
|
batch.save_head(&head)?;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(NotFoundErr(_)) => {
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.save_block(&genesis)?;
|
2018-09-26 11:59:00 +03:00
|
|
|
let tip = Tip::from_block(&genesis.header);
|
|
|
|
batch.save_head(&tip)?;
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.setup_height(&genesis.header, &tip)?;
|
2018-09-26 11:59:00 +03:00
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
txhashset::extending(txhashset, &mut batch, |extension| {
|
|
|
|
extension.apply_block(&genesis)?;
|
2018-09-20 11:19:32 +03:00
|
|
|
|
|
|
|
// Save the block_sums to the db for use later.
|
|
|
|
extension
|
|
|
|
.batch
|
|
|
|
.save_block_sums(&genesis.hash(), &BlockSums::default())?;
|
|
|
|
|
2018-06-22 11:08:06 +03:00
|
|
|
Ok(())
|
|
|
|
})?;
|
|
|
|
|
|
|
|
info!(LOGGER, "chain: init: saved genesis: {:?}", genesis.hash());
|
|
|
|
}
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
|
2018-06-22 11:08:06 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
// Initialize header_head and sync_head as necessary for chain init.
|
2018-10-01 18:03:06 +03:00
|
|
|
batch.reset_head()?;
|
2018-06-22 11:08:06 +03:00
|
|
|
batch.commit()?;
|
|
|
|
|
|
|
|
Ok(())
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|