2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2016-12-19 02:51:54 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-05-03 15:57:35 +03:00
|
|
|
//! Adapters connecting new block, new transaction, and accepted transaction
|
|
|
|
//! events to consumers of those events.
|
|
|
|
|
2018-06-14 15:16:14 +03:00
|
|
|
use rand::{self, Rng};
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
2017-02-19 05:42:34 +03:00
|
|
|
use std::net::SocketAddr;
|
2018-02-13 03:38:52 +03:00
|
|
|
use std::ops::Deref;
|
2018-05-30 23:57:13 +03:00
|
|
|
use std::sync::{Arc, RwLock, Weak};
|
2018-05-11 19:58:52 +03:00
|
|
|
use std::thread;
|
2018-03-21 23:50:08 +03:00
|
|
|
use std::time::Instant;
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2018-05-11 19:58:52 +03:00
|
|
|
use chain::{self, ChainAdapter, Options, Tip};
|
2018-07-02 02:08:39 +03:00
|
|
|
use common::types::{ChainValidationMode, ServerConfig, SyncState};
|
2018-01-17 06:03:40 +03:00
|
|
|
use core::core;
|
2017-09-12 20:24:24 +03:00
|
|
|
use core::core::block::BlockHeader;
|
2017-02-08 00:52:17 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
|
|
|
use core::core::target::Difficulty;
|
2018-05-30 23:57:13 +03:00
|
|
|
use core::core::transaction::Transaction;
|
2017-12-12 19:40:26 +03:00
|
|
|
use p2p;
|
2017-06-10 22:51:33 +03:00
|
|
|
use pool;
|
2017-02-10 22:02:21 +03:00
|
|
|
use store;
|
2018-06-14 15:16:14 +03:00
|
|
|
use util::{OneTime, LOGGER};
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2018-02-13 03:38:52 +03:00
|
|
|
// All adapters use `Weak` references instead of `Arc` to avoid cycles that
|
|
|
|
// can never be destroyed. These 2 functions are simple helpers to reduce the
|
|
|
|
// boilerplate of dealing with `Weak`.
|
|
|
|
fn w<T>(weak: &Weak<T>) -> Arc<T> {
|
|
|
|
weak.upgrade().unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn wo<T>(weak_one: &OneTime<Weak<T>>) -> Arc<T> {
|
|
|
|
w(weak_one.borrow().deref())
|
|
|
|
}
|
|
|
|
|
2016-12-21 04:39:02 +03:00
|
|
|
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
|
|
|
|
/// blocks and transactions are received and forwards to the chain and pool
|
|
|
|
/// implementations.
|
2016-12-19 02:51:54 +03:00
|
|
|
pub struct NetToChainAdapter {
|
2018-07-02 02:08:39 +03:00
|
|
|
sync_state: Arc<SyncState>,
|
2018-05-11 19:58:52 +03:00
|
|
|
archive_mode: bool,
|
2018-02-13 03:38:52 +03:00
|
|
|
chain: Weak<chain::Chain>,
|
2017-06-10 22:51:33 +03:00
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: OneTime<Weak<p2p::Peers>>,
|
2018-03-21 23:50:08 +03:00
|
|
|
config: ServerConfig,
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
impl p2p::ChainAdapter for NetToChainAdapter {
|
2017-02-08 00:52:17 +03:00
|
|
|
fn total_difficulty(&self) -> Difficulty {
|
2018-02-13 03:38:52 +03:00
|
|
|
w(&self.chain).total_difficulty()
|
2017-02-03 02:51:48 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2017-12-14 00:52:21 +03:00
|
|
|
fn total_height(&self) -> u64 {
|
2018-02-13 03:38:52 +03:00
|
|
|
w(&self.chain).head().unwrap().height
|
2017-12-14 00:52:21 +03:00
|
|
|
}
|
|
|
|
|
2018-03-20 06:18:54 +03:00
|
|
|
fn transaction_received(&self, tx: core::Transaction, stem: bool) {
|
2017-06-10 22:51:33 +03:00
|
|
|
let source = pool::TxSource {
|
|
|
|
debug_name: "p2p".to_string(),
|
|
|
|
identifier: "?.?.?.?".to_string(),
|
|
|
|
};
|
2018-05-30 23:57:13 +03:00
|
|
|
|
2017-10-26 00:06:24 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-05-30 23:57:13 +03:00
|
|
|
"Received tx {} from {:?}, going to process.",
|
2017-10-26 00:06:24 +03:00
|
|
|
tx.hash(),
|
2018-05-30 23:57:13 +03:00
|
|
|
source,
|
2017-10-26 00:06:24 +03:00
|
|
|
);
|
|
|
|
|
2017-11-18 23:34:05 +03:00
|
|
|
let h = tx.hash();
|
2018-04-24 22:47:13 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
let res = {
|
|
|
|
let mut tx_pool = self.tx_pool.write().unwrap();
|
|
|
|
tx_pool.add_to_pool(source, tx, stem)
|
|
|
|
};
|
|
|
|
|
|
|
|
if let Err(e) = res {
|
|
|
|
debug!(LOGGER, "Transaction {} rejected: {:?}", h, e);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
|
2017-10-12 19:56:44 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-01-30 17:42:04 +03:00
|
|
|
"Received block {} at {} from {}, going to process.",
|
2018-02-02 00:40:55 +03:00
|
|
|
b.hash(),
|
2017-10-18 07:48:21 +03:00
|
|
|
b.header.height,
|
2018-01-30 17:42:04 +03:00
|
|
|
addr,
|
2017-10-12 19:56:44 +03:00
|
|
|
);
|
2018-03-04 03:19:54 +03:00
|
|
|
self.process_block(b, addr)
|
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
|
|
|
|
let bhash = cb.hash();
|
2018-01-31 23:39:55 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Received compact_block {} at {} from {}, going to process.",
|
|
|
|
bhash,
|
2018-02-02 00:40:55 +03:00
|
|
|
cb.header.height,
|
2018-01-31 23:39:55 +03:00
|
|
|
addr,
|
|
|
|
);
|
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
if cb.kern_ids.is_empty() {
|
2018-02-16 18:42:27 +03:00
|
|
|
let block = core::Block::hydrate_from(cb, vec![]);
|
2018-02-02 00:40:55 +03:00
|
|
|
|
|
|
|
// push the freshly hydrated block through the chain pipeline
|
2018-03-04 03:19:54 +03:00
|
|
|
self.process_block(block, addr)
|
2018-02-02 00:40:55 +03:00
|
|
|
} else {
|
2018-02-16 18:42:27 +03:00
|
|
|
// TODO - do we need to validate the header here?
|
|
|
|
|
|
|
|
let txs = {
|
|
|
|
let tx_pool = self.tx_pool.read().unwrap();
|
|
|
|
tx_pool.retrieve_transactions(&cb)
|
|
|
|
};
|
2018-02-02 00:40:55 +03:00
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(LOGGER, "adapter: txs from tx pool - {}", txs.len(),);
|
2018-02-16 18:42:27 +03:00
|
|
|
|
|
|
|
// TODO - 3 scenarios here -
|
|
|
|
// 1) we hydrate a valid block (good to go)
|
|
|
|
// 2) we hydrate an invalid block (txs legit missing from our pool)
|
|
|
|
// 3) we hydrate an invalid block (peer sent us a "bad" compact block) - [TBD]
|
|
|
|
|
|
|
|
let block = core::Block::hydrate_from(cb.clone(), txs);
|
|
|
|
|
2018-03-14 18:22:09 +03:00
|
|
|
let chain = self.chain
|
|
|
|
.upgrade()
|
|
|
|
.expect("failed to upgrade weak ref to chain");
|
|
|
|
|
2018-06-21 04:30:22 +03:00
|
|
|
if let Ok(prev) = chain.get_block_header(&cb.header.previous) {
|
|
|
|
if block
|
|
|
|
.validate(&prev.total_kernel_offset, &prev.total_kernel_sum)
|
|
|
|
.is_ok()
|
|
|
|
{
|
2018-03-14 18:22:09 +03:00
|
|
|
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
|
|
|
|
self.process_block(block, addr)
|
|
|
|
} else {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: block invalid after hydration, requesting full block"
|
|
|
|
);
|
|
|
|
self.request_block(&cb.header, &addr);
|
|
|
|
true
|
|
|
|
}
|
2018-02-16 18:42:27 +03:00
|
|
|
} else {
|
2018-04-18 03:52:07 +03:00
|
|
|
debug!(
|
2018-03-04 03:19:54 +03:00
|
|
|
LOGGER,
|
2018-04-18 03:52:07 +03:00
|
|
|
"adapter: failed to retrieve previous block header (still syncing?)"
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-02-16 18:42:27 +03:00
|
|
|
true
|
|
|
|
}
|
2018-02-02 00:40:55 +03:00
|
|
|
}
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
|
|
|
|
let bhash = bh.hash();
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-03-04 03:19:54 +03:00
|
|
|
"Received block header {} at {} from {}, going to process.", bhash, bh.height, addr,
|
2018-01-30 17:42:04 +03:00
|
|
|
);
|
|
|
|
|
|
|
|
// pushing the new block header through the header chain pipeline
|
|
|
|
// we will go ask for the block if this is a new header
|
2018-02-13 03:38:52 +03:00
|
|
|
let res = w(&self.chain).process_block_header(&bh, self.chain_opts());
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
if let &Err(ref e) = &res {
|
|
|
|
debug!(LOGGER, "Block header {} refused by chain: {:?}", bhash, e);
|
2018-02-10 01:32:16 +03:00
|
|
|
if e.is_bad_data() {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"header_received: {} is a bad header, resetting header head", bhash
|
|
|
|
);
|
2018-02-13 03:38:52 +03:00
|
|
|
let _ = w(&self.chain).reset_head();
|
2018-01-30 17:42:04 +03:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// we got an error when trying to process the block header
|
|
|
|
// but nothing serious enough to need to ban the peer upstream
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we have successfully processed a block header
|
|
|
|
// so we can go request the block itself
|
2018-01-31 23:39:55 +03:00
|
|
|
self.request_compact_block(&bh, &addr);
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
// done receiving the header
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2017-11-30 18:27:50 +03:00
|
|
|
fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) {
|
2017-11-14 21:57:16 +03:00
|
|
|
info!(
|
|
|
|
LOGGER,
|
2017-11-30 18:27:50 +03:00
|
|
|
"Received block headers {:?} from {}",
|
|
|
|
bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(),
|
|
|
|
addr,
|
2017-11-14 21:57:16 +03:00
|
|
|
);
|
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
// try to add each header to our header chain
|
|
|
|
let mut added_hs = vec![];
|
|
|
|
for bh in bhs {
|
2018-02-13 03:38:52 +03:00
|
|
|
let res = w(&self.chain).sync_block_header(&bh, self.chain_opts());
|
2017-02-08 00:52:17 +03:00
|
|
|
match res {
|
|
|
|
Ok(_) => {
|
|
|
|
added_hs.push(bh.hash());
|
|
|
|
}
|
|
|
|
Err(e) => {
|
2018-07-01 01:36:38 +03:00
|
|
|
match e.kind() {
|
|
|
|
chain::ErrorKind::Unfit(s) => {
|
|
|
|
info!(
|
|
|
|
LOGGER,
|
|
|
|
"Received unfit block header {} at {}: {}.",
|
|
|
|
bh.hash(),
|
|
|
|
bh.height,
|
|
|
|
s
|
|
|
|
);
|
|
|
|
}
|
|
|
|
chain::ErrorKind::StoreErr(e, explanation) => {
|
|
|
|
error!(
|
|
|
|
LOGGER,
|
|
|
|
"Store error processing block header {}: in {} {:?}",
|
|
|
|
bh.hash(),
|
|
|
|
explanation,
|
|
|
|
e
|
|
|
|
);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
info!(LOGGER, "Invalid block header {}: {:?}.", bh.hash(), e);
|
|
|
|
// TODO penalize peer somehow
|
|
|
|
}
|
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2018-02-13 03:38:52 +03:00
|
|
|
let header_head = w(&self.chain).get_header_head().unwrap();
|
2017-10-12 19:56:44 +03:00
|
|
|
info!(
|
|
|
|
LOGGER,
|
2018-02-10 01:32:16 +03:00
|
|
|
"Added {} headers to the header chain. Last: {} at {}.",
|
|
|
|
added_hs.len(),
|
|
|
|
header_head.last_block_h,
|
|
|
|
header_head.height,
|
2017-10-12 19:56:44 +03:00
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(LOGGER, "locate_headers: {:?}", locator,);
|
2017-11-14 21:57:16 +03:00
|
|
|
|
2017-11-28 23:37:02 +03:00
|
|
|
let header = match self.find_common_header(locator) {
|
|
|
|
Some(header) => header,
|
|
|
|
None => return vec![],
|
2017-02-08 00:52:17 +03:00
|
|
|
};
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(LOGGER, "locate_headers: common header: {:?}", header.hash(),);
|
2017-11-14 21:57:16 +03:00
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
// looks like we know one, getting as many following headers as allowed
|
|
|
|
let hh = header.height;
|
|
|
|
let mut headers = vec![];
|
|
|
|
for h in (hh + 1)..(hh + (p2p::MAX_BLOCK_HEADERS as u64)) {
|
2018-02-13 03:38:52 +03:00
|
|
|
let header = w(&self.chain).get_header_by_height(h);
|
2017-02-08 00:52:17 +03:00
|
|
|
match header {
|
|
|
|
Ok(head) => headers.push(head),
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(e) => match e.kind() {
|
|
|
|
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break,
|
|
|
|
_ => {
|
|
|
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
},
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
}
|
2017-11-14 21:57:16 +03:00
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"locate_headers: returning headers: {}",
|
|
|
|
headers.len(),
|
|
|
|
);
|
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
headers
|
|
|
|
}
|
|
|
|
|
2017-02-19 05:42:34 +03:00
|
|
|
/// Gets a full block by its hash.
|
2017-02-08 00:52:17 +03:00
|
|
|
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
2018-02-13 03:38:52 +03:00
|
|
|
let b = w(&self.chain).get_block(&h);
|
2017-02-08 00:52:17 +03:00
|
|
|
match b {
|
|
|
|
Ok(b) => Some(b),
|
|
|
|
_ => None,
|
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
2017-02-19 05:42:34 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Provides a reading view into the current txhashset state as well as
|
2018-06-13 19:03:34 +03:00
|
|
|
/// the required indexes for a consumer to rewind to a consistent state
|
2018-02-10 01:32:16 +03:00
|
|
|
/// at the provided block hash.
|
2018-03-05 22:33:44 +03:00
|
|
|
fn txhashset_read(&self, h: Hash) -> Option<p2p::TxHashSetRead> {
|
|
|
|
match w(&self.chain).txhashset_read(h.clone()) {
|
|
|
|
Ok((out_index, kernel_index, read)) => Some(p2p::TxHashSetRead {
|
2018-02-10 01:32:16 +03:00
|
|
|
output_index: out_index,
|
|
|
|
kernel_index: kernel_index,
|
|
|
|
reader: read,
|
|
|
|
}),
|
|
|
|
Err(e) => {
|
2018-03-04 03:19:54 +03:00
|
|
|
warn!(
|
|
|
|
LOGGER,
|
2018-03-05 22:33:44 +03:00
|
|
|
"Couldn't produce txhashset data for block {}: {:?}", h, e
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-02-10 01:32:16 +03:00
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Writes a reading view on a txhashset state that's been provided to us.
|
2018-02-10 01:32:16 +03:00
|
|
|
/// If we're willing to accept that new state, the data stream will be
|
|
|
|
/// read as a zip file, unzipped and the resulting state files should be
|
|
|
|
/// rewound to the provided indexes.
|
2018-03-05 22:33:44 +03:00
|
|
|
fn txhashset_write(
|
2018-02-17 20:56:22 +03:00
|
|
|
&self,
|
|
|
|
h: Hash,
|
|
|
|
rewind_to_output: u64,
|
|
|
|
rewind_to_kernel: u64,
|
2018-03-05 22:33:44 +03:00
|
|
|
txhashset_data: File,
|
2018-02-17 20:56:22 +03:00
|
|
|
_peer_addr: SocketAddr,
|
|
|
|
) -> bool {
|
2018-03-05 22:33:44 +03:00
|
|
|
// TODO check whether we should accept any txhashset now
|
2018-03-04 03:19:54 +03:00
|
|
|
if let Err(e) =
|
2018-07-02 02:08:39 +03:00
|
|
|
w(&self.chain).txhashset_write(
|
|
|
|
h,
|
|
|
|
rewind_to_output,
|
|
|
|
rewind_to_kernel,
|
|
|
|
txhashset_data,
|
|
|
|
self.sync_state.as_ref(),
|
|
|
|
) {
|
2018-07-01 01:36:38 +03:00
|
|
|
error!(LOGGER, "Failed to save txhashset archive: {}", e);
|
2018-02-10 01:32:16 +03:00
|
|
|
!e.is_bad_data()
|
|
|
|
} else {
|
2018-03-05 22:33:44 +03:00
|
|
|
info!(LOGGER, "Received valid txhashset data for {}.", h);
|
2018-02-10 01:32:16 +03:00
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetToChainAdapter {
|
2018-05-03 15:57:35 +03:00
|
|
|
/// Construct a new NetToChainAdapter instance
|
2017-10-17 15:18:21 +03:00
|
|
|
pub fn new(
|
2018-07-02 02:08:39 +03:00
|
|
|
sync_state: Arc<SyncState>,
|
2018-05-11 19:58:52 +03:00
|
|
|
archive_mode: bool,
|
2018-02-13 03:38:52 +03:00
|
|
|
chain_ref: Weak<chain::Chain>,
|
2017-10-17 15:18:21 +03:00
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
2018-03-21 23:50:08 +03:00
|
|
|
config: ServerConfig,
|
2017-10-17 15:18:21 +03:00
|
|
|
) -> NetToChainAdapter {
|
2016-12-21 04:39:02 +03:00
|
|
|
NetToChainAdapter {
|
2018-07-02 02:08:39 +03:00
|
|
|
sync_state,
|
2018-05-11 19:58:52 +03:00
|
|
|
archive_mode,
|
2017-07-04 02:46:25 +03:00
|
|
|
chain: chain_ref,
|
2018-05-11 19:58:52 +03:00
|
|
|
tx_pool,
|
2018-01-30 17:42:04 +03:00
|
|
|
peers: OneTime::new(),
|
2018-05-11 19:58:52 +03:00
|
|
|
config,
|
2017-11-30 18:27:50 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
2017-12-03 15:46:00 +03:00
|
|
|
|
2018-05-03 15:57:35 +03:00
|
|
|
/// Initialize a NetToChainAdaptor with reference to a Peers object.
|
|
|
|
/// Should only be called once.
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn init(&self, peers: Weak<p2p::Peers>) {
|
2018-01-30 17:42:04 +03:00
|
|
|
self.peers.init(peers);
|
|
|
|
}
|
|
|
|
|
2017-11-28 23:37:02 +03:00
|
|
|
// recursively go back through the locator vector and stop when we find
|
|
|
|
// a header that we recognize this will be a header shared in common
|
|
|
|
// between us and the peer
|
|
|
|
fn find_common_header(&self, locator: Vec<Hash>) -> Option<BlockHeader> {
|
|
|
|
if locator.len() == 0 {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2018-02-13 03:38:52 +03:00
|
|
|
let chain = w(&self.chain);
|
|
|
|
let known = chain.get_block_header(&locator[0]);
|
2017-11-28 23:37:02 +03:00
|
|
|
|
|
|
|
match known {
|
|
|
|
Ok(header) => {
|
|
|
|
// even if we know the block, it may not be on our winning chain
|
2018-02-13 03:38:52 +03:00
|
|
|
let known_winning = chain.get_header_by_height(header.height);
|
2017-11-28 23:37:02 +03:00
|
|
|
if let Ok(known_winning) = known_winning {
|
|
|
|
if known_winning.hash() != header.hash() {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
} else {
|
|
|
|
Some(header)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(e) => match e.kind() {
|
|
|
|
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
},
|
2017-11-28 23:37:02 +03:00
|
|
|
}
|
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
// pushing the new block through the chain pipeline
|
|
|
|
// remembering to reset the head if we have a bad block
|
2018-03-04 03:19:54 +03:00
|
|
|
fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool {
|
|
|
|
let prev_hash = b.header.previous;
|
|
|
|
let bhash = b.hash();
|
|
|
|
let chain = w(&self.chain);
|
2018-03-23 01:44:32 +03:00
|
|
|
match chain.process_block(b, self.chain_opts()) {
|
2018-05-11 19:58:52 +03:00
|
|
|
Ok((tip, _)) => {
|
2018-03-23 01:44:32 +03:00
|
|
|
self.validate_chain(bhash);
|
2018-05-11 19:58:52 +03:00
|
|
|
self.check_compact(tip);
|
2018-03-23 01:44:32 +03:00
|
|
|
true
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
Err(ref e) if e.is_bad_data() => {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: process_block: {} is a bad block, resetting head", bhash
|
|
|
|
);
|
|
|
|
let _ = chain.reset_head();
|
2018-03-23 01:44:32 +03:00
|
|
|
|
|
|
|
// we potentially changed the state of the system here
|
|
|
|
// so check everything is still ok
|
|
|
|
self.validate_chain(bhash);
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
false
|
|
|
|
}
|
|
|
|
Err(e) => {
|
2018-07-01 01:36:38 +03:00
|
|
|
match e.kind() {
|
|
|
|
chain::ErrorKind::Orphan => {
|
|
|
|
// make sure we did not miss the parent block
|
2018-07-02 02:08:39 +03:00
|
|
|
if !chain.is_orphan(&prev_hash) && !self.sync_state.is_syncing() {
|
2018-07-01 01:36:38 +03:00
|
|
|
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
|
|
|
|
self.request_block_by_hash(prev_hash, &addr)
|
|
|
|
}
|
|
|
|
true
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: process_block: block {} refused by chain: {:?}", bhash, e
|
|
|
|
);
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-03-23 01:44:32 +03:00
|
|
|
}
|
|
|
|
}
|
2018-03-21 23:50:08 +03:00
|
|
|
|
2018-03-23 01:44:32 +03:00
|
|
|
fn validate_chain(&self, bhash: Hash) {
|
2018-03-21 23:50:08 +03:00
|
|
|
// If we are running in "validate the full chain every block" then
|
|
|
|
// panic here if validation fails for any reason.
|
|
|
|
// We are out of consensus at this point and want to track the problem
|
|
|
|
// down as soon as possible.
|
|
|
|
// Skip this if we are currently syncing (too slow).
|
2018-03-23 01:58:32 +03:00
|
|
|
let chain = w(&self.chain);
|
2018-07-01 01:36:38 +03:00
|
|
|
if chain.head().unwrap().height > 0
|
2018-07-02 02:08:39 +03:00
|
|
|
&& !self.sync_state.is_syncing()
|
2018-03-21 23:50:08 +03:00
|
|
|
&& self.config.chain_validation_mode == ChainValidationMode::EveryBlock
|
|
|
|
{
|
|
|
|
let now = Instant::now();
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: process_block: ***** validating full chain state at {}", bhash,
|
|
|
|
);
|
|
|
|
|
|
|
|
let chain = w(&self.chain);
|
|
|
|
chain
|
|
|
|
.validate(true)
|
|
|
|
.expect("chain validation failed, hard stop");
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: process_block: ***** done validating full chain state, took {}s",
|
|
|
|
now.elapsed().as_secs(),
|
|
|
|
);
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
|
|
|
}
|
2018-02-02 00:40:55 +03:00
|
|
|
|
2018-05-11 19:58:52 +03:00
|
|
|
fn check_compact(&self, tip_res: Option<Tip>) {
|
|
|
|
// no compaction during sync or if we're in historical mode
|
2018-07-02 02:08:39 +03:00
|
|
|
if self.archive_mode || self.sync_state.is_syncing() {
|
2018-05-11 19:58:52 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(tip) = tip_res {
|
|
|
|
// trigger compaction every 2000 blocks, uses a different thread to avoid
|
|
|
|
// blocking the caller thread (likely a peer)
|
|
|
|
if tip.height % 2000 == 0 {
|
|
|
|
let chain = w(&self.chain);
|
|
|
|
let _ = thread::Builder::new()
|
|
|
|
.name("compactor".to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
if let Err(e) = chain.compact() {
|
|
|
|
error!(LOGGER, "Could not compact chain: {:?}", e);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 23:39:55 +03:00
|
|
|
// After receiving a compact block if we cannot successfully hydrate
|
|
|
|
// it into a full block then fallback to requesting the full block
|
|
|
|
// from the same peer that gave us the compact block
|
2018-01-30 17:42:04 +03:00
|
|
|
// consider additional peers for redundancy?
|
|
|
|
fn request_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.request_block_by_hash(bh.hash(), addr)
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
|
|
|
|
2018-02-24 05:48:02 +03:00
|
|
|
fn request_block_by_hash(&self, h: Hash, addr: &SocketAddr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.send_block_request_to_peer(h, addr, |peer, h| peer.send_block_request(h))
|
|
|
|
}
|
2018-02-24 05:48:02 +03:00
|
|
|
|
2018-01-31 23:39:55 +03:00
|
|
|
// After we have received a block header in "header first" propagation
|
|
|
|
// we need to go request the block (compact representation) from the
|
|
|
|
// same peer that gave us the header (unless we have already accepted the block)
|
|
|
|
fn request_compact_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.send_block_request_to_peer(bh.hash(), addr, |peer, h| {
|
|
|
|
peer.send_compact_block_request(h)
|
|
|
|
})
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
fn send_block_request_to_peer<F>(&self, h: Hash, addr: &SocketAddr, f: F)
|
|
|
|
where
|
|
|
|
F: Fn(&p2p::Peer, Hash) -> Result<(), p2p::Error>,
|
|
|
|
{
|
|
|
|
match w(&self.chain).block_exists(h) {
|
2018-03-20 01:17:59 +03:00
|
|
|
Ok(false) => {
|
|
|
|
match wo(&self.peers).get_connected_peer(addr) {
|
|
|
|
None => debug!(LOGGER, "send_block_request_to_peer: can't send request to peer {:?}, not connected", addr),
|
|
|
|
Some(peer) => {
|
|
|
|
match peer.read() {
|
|
|
|
Err(e) => debug!(LOGGER, "send_block_request_to_peer: can't send request to peer {:?}, read fails: {:?}", addr, e),
|
|
|
|
Ok(p) => {
|
|
|
|
if let Err(e) = f(&p, h) {
|
|
|
|
error!(LOGGER, "send_block_request_to_peer: failed: {:?}", e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(true) => debug!(LOGGER, "send_block_request_to_peer: block {} already known", h),
|
|
|
|
Err(e) => error!(LOGGER, "send_block_request_to_peer: failed to check block exists: {:?}", e)
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-02-24 05:48:02 +03:00
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
/// Prepare options for the chain pipeline
|
|
|
|
fn chain_opts(&self) -> chain::Options {
|
2018-07-02 02:08:39 +03:00
|
|
|
let opts = if self.sync_state.is_syncing() {
|
2018-02-05 22:43:54 +03:00
|
|
|
chain::Options::SYNC
|
2017-06-19 18:59:56 +03:00
|
|
|
} else {
|
2018-02-05 22:43:54 +03:00
|
|
|
chain::Options::NONE
|
2017-06-19 18:59:56 +03:00
|
|
|
};
|
|
|
|
opts
|
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implementation of the ChainAdapter for the network. Gets notified when the
|
2017-06-10 22:51:33 +03:00
|
|
|
/// blockchain accepted a new block, asking the pool to update its state and
|
|
|
|
/// the network to broadcast the block
|
|
|
|
pub struct ChainToPoolAndNetAdapter {
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: OneTime<Weak<p2p::Peers>>,
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
impl ChainAdapter for ChainToPoolAndNetAdapter {
|
2018-01-30 17:42:04 +03:00
|
|
|
fn block_accepted(&self, b: &core::Block, opts: Options) {
|
2018-02-16 18:42:27 +03:00
|
|
|
debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash());
|
|
|
|
|
|
|
|
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
|
|
|
|
error!(
|
|
|
|
LOGGER,
|
|
|
|
"Pool could not update itself at block {}: {:?}",
|
|
|
|
b.hash(),
|
|
|
|
e,
|
|
|
|
);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
// If we mined the block then we want to broadcast the block itself.
|
2018-01-31 23:39:55 +03:00
|
|
|
// If block is empty then broadcast the block.
|
|
|
|
// If block contains txs then broadcast the compact block.
|
|
|
|
// If we received the block from another node then broadcast "header first"
|
2018-01-30 17:42:04 +03:00
|
|
|
// to minimize network traffic.
|
2018-03-27 23:48:09 +03:00
|
|
|
|
2018-02-05 22:43:54 +03:00
|
|
|
if opts.contains(Options::MINE) {
|
2018-01-31 23:39:55 +03:00
|
|
|
// propagate compact block out if we mined the block
|
|
|
|
// but broadcast full block if we have no txs
|
|
|
|
let cb = b.as_compact_block();
|
|
|
|
if cb.kern_ids.is_empty() {
|
|
|
|
// in the interest of testing all code paths
|
|
|
|
// randomly decide how we send an empty block out
|
|
|
|
// TODO - lock this down once we are comfortable it works...
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
if rng.gen() {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.peers).broadcast_block(&b);
|
2018-01-31 23:39:55 +03:00
|
|
|
} else {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.peers).broadcast_compact_block(&cb);
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
} else {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.peers).broadcast_compact_block(&cb);
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
2018-01-30 17:42:04 +03:00
|
|
|
} else {
|
|
|
|
// "header first" propagation if we are not the originator of this block
|
2018-03-27 23:48:09 +03:00
|
|
|
// again randomly chose between "header first" or "compact block" propagation
|
|
|
|
// to ensure we test a wide variety of code paths
|
|
|
|
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
if rng.gen() {
|
|
|
|
wo(&self.peers).broadcast_header(&b.header);
|
|
|
|
} else {
|
|
|
|
let cb = b.as_compact_block();
|
|
|
|
wo(&self.peers).broadcast_compact_block(&cb);
|
|
|
|
}
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
impl ChainToPoolAndNetAdapter {
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Construct a ChainToPoolAndNetAdapter instance.
|
2017-10-17 15:18:21 +03:00
|
|
|
pub fn new(
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
|
|
|
) -> ChainToPoolAndNetAdapter {
|
2017-06-10 22:51:33 +03:00
|
|
|
ChainToPoolAndNetAdapter {
|
|
|
|
tx_pool: tx_pool,
|
2017-12-12 19:40:26 +03:00
|
|
|
peers: OneTime::new(),
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
2018-05-03 15:57:35 +03:00
|
|
|
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Initialize a ChainToPoolAndNetAdapter instance with handle to a Peers
|
2018-05-30 23:57:13 +03:00
|
|
|
/// object. Should only be called once.
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn init(&self, peers: Weak<p2p::Peers>) {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.peers.init(peers);
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
}
|
2017-06-10 22:51:33 +03:00
|
|
|
|
2017-10-26 00:06:24 +03:00
|
|
|
/// Adapter between the transaction pool and the network, to relay
|
|
|
|
/// transactions that have been accepted.
|
|
|
|
pub struct PoolToNetAdapter {
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: OneTime<Weak<p2p::Peers>>,
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl pool::PoolAdapter for PoolToNetAdapter {
|
2018-05-30 23:57:13 +03:00
|
|
|
fn stem_tx_accepted(&self, tx: &core::Transaction) -> Result<(), pool::PoolError> {
|
|
|
|
wo(&self.peers)
|
|
|
|
.broadcast_stem_transaction(tx)
|
|
|
|
.map_err(|_| pool::PoolError::DandelionError)?;
|
|
|
|
Ok(())
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
2017-10-26 00:06:24 +03:00
|
|
|
fn tx_accepted(&self, tx: &core::Transaction) {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.peers).broadcast_transaction(tx);
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PoolToNetAdapter {
|
|
|
|
/// Create a new pool to net adapter
|
|
|
|
pub fn new() -> PoolToNetAdapter {
|
2017-11-01 02:32:33 +03:00
|
|
|
PoolToNetAdapter {
|
2017-12-12 19:40:26 +03:00
|
|
|
peers: OneTime::new(),
|
2017-11-01 02:32:33 +03:00
|
|
|
}
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Setup the p2p server on the adapter
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn init(&self, peers: Weak<p2p::Peers>) {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.peers.init(peers);
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
/// Implements the view of the blockchain required by the TransactionPool to
|
2017-07-04 02:46:25 +03:00
|
|
|
/// operate. Mostly needed to break any direct lifecycle or implementation
|
|
|
|
/// dependency between the pool and the chain.
|
2017-06-10 22:51:33 +03:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct PoolToChainAdapter {
|
2018-02-13 03:38:52 +03:00
|
|
|
chain: OneTime<Weak<chain::Chain>>,
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PoolToChainAdapter {
|
|
|
|
/// Create a new pool adapter
|
2017-07-04 02:46:25 +03:00
|
|
|
pub fn new() -> PoolToChainAdapter {
|
2017-11-01 02:32:33 +03:00
|
|
|
PoolToChainAdapter {
|
|
|
|
chain: OneTime::new(),
|
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-03 15:57:35 +03:00
|
|
|
/// Set the pool adapter's chain. Should only be called once.
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn set_chain(&self, chain_ref: Weak<chain::Chain>) {
|
2017-07-04 02:46:25 +03:00
|
|
|
self.chain.init(chain_ref);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl pool::BlockChain for PoolToChainAdapter {
|
2018-05-30 23:57:13 +03:00
|
|
|
fn validate_raw_txs(
|
|
|
|
&self,
|
|
|
|
txs: Vec<Transaction>,
|
|
|
|
pre_tx: Option<Transaction>,
|
|
|
|
) -> Result<(Vec<Transaction>), pool::PoolError> {
|
|
|
|
wo(&self.chain).validate_raw_txs(txs, pre_tx).map_err(|_| {
|
|
|
|
pool::PoolError::Other("Chain adapter failed to validate_raw_txs.".to_string())
|
2018-03-04 03:19:54 +03:00
|
|
|
})
|
2017-09-12 20:24:24 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.chain)
|
2018-05-30 23:57:13 +03:00
|
|
|
.verify_coinbase_maturity(tx)
|
|
|
|
.map_err(|_| pool::PoolError::ImmatureCoinbase)
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-09-12 20:24:24 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
fn verify_tx_lock_height(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.chain)
|
2018-05-30 23:57:13 +03:00
|
|
|
.verify_tx_lock_height(tx)
|
|
|
|
.map_err(|_| pool::PoolError::ImmatureTransaction)
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|