2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2016-12-19 02:51:54 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-05-03 15:57:35 +03:00
|
|
|
//! Adapters connecting new block, new transaction, and accepted transaction
|
|
|
|
//! events to consumers of those events.
|
|
|
|
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
2017-02-19 05:42:34 +03:00
|
|
|
use std::net::SocketAddr;
|
2018-02-13 03:38:52 +03:00
|
|
|
use std::ops::Deref;
|
2018-05-30 23:57:13 +03:00
|
|
|
use std::sync::{Arc, RwLock, Weak};
|
2018-05-11 19:58:52 +03:00
|
|
|
use std::thread;
|
2018-03-21 23:50:08 +03:00
|
|
|
use std::time::Instant;
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2018-05-11 19:58:52 +03:00
|
|
|
use chain::{self, ChainAdapter, Options, Tip};
|
2018-08-08 18:14:54 +03:00
|
|
|
use common::types::{self, ChainValidationMode, ServerConfig, SyncState, SyncStatus};
|
2017-02-08 00:52:17 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-05-30 23:57:13 +03:00
|
|
|
use core::core::transaction::Transaction;
|
2018-08-30 17:44:34 +03:00
|
|
|
use core::core::verifier_cache::VerifierCache;
|
2018-09-24 11:24:10 +03:00
|
|
|
use core::core::{BlockHeader, BlockSums, CompactBlock};
|
2018-09-19 01:12:57 +03:00
|
|
|
use core::pow::Difficulty;
|
2018-08-16 02:20:33 +03:00
|
|
|
use core::{core, global};
|
2017-12-12 19:40:26 +03:00
|
|
|
use p2p;
|
2017-06-10 22:51:33 +03:00
|
|
|
use pool;
|
2017-02-10 22:02:21 +03:00
|
|
|
use store;
|
2018-06-14 15:16:14 +03:00
|
|
|
use util::{OneTime, LOGGER};
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2018-02-13 03:38:52 +03:00
|
|
|
// All adapters use `Weak` references instead of `Arc` to avoid cycles that
|
|
|
|
// can never be destroyed. These 2 functions are simple helpers to reduce the
|
|
|
|
// boilerplate of dealing with `Weak`.
|
|
|
|
fn w<T>(weak: &Weak<T>) -> Arc<T> {
|
|
|
|
weak.upgrade().unwrap()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn wo<T>(weak_one: &OneTime<Weak<T>>) -> Arc<T> {
|
|
|
|
w(weak_one.borrow().deref())
|
|
|
|
}
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
/// Implementation of the NetAdapter for the . Gets notified when new
|
2016-12-21 04:39:02 +03:00
|
|
|
/// blocks and transactions are received and forwards to the chain and pool
|
|
|
|
/// implementations.
|
2016-12-19 02:51:54 +03:00
|
|
|
pub struct NetToChainAdapter {
|
2018-07-02 02:08:39 +03:00
|
|
|
sync_state: Arc<SyncState>,
|
2018-05-11 19:58:52 +03:00
|
|
|
archive_mode: bool,
|
2018-02-13 03:38:52 +03:00
|
|
|
chain: Weak<chain::Chain>,
|
2018-08-28 00:22:48 +03:00
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
2018-08-30 17:44:34 +03:00
|
|
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: OneTime<Weak<p2p::Peers>>,
|
2018-03-21 23:50:08 +03:00
|
|
|
config: ServerConfig,
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
impl p2p::ChainAdapter for NetToChainAdapter {
|
2017-02-08 00:52:17 +03:00
|
|
|
fn total_difficulty(&self) -> Difficulty {
|
2018-02-13 03:38:52 +03:00
|
|
|
w(&self.chain).total_difficulty()
|
2017-02-03 02:51:48 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2017-12-14 00:52:21 +03:00
|
|
|
fn total_height(&self) -> u64 {
|
2018-02-13 03:38:52 +03:00
|
|
|
w(&self.chain).head().unwrap().height
|
2017-12-14 00:52:21 +03:00
|
|
|
}
|
|
|
|
|
2018-03-20 06:18:54 +03:00
|
|
|
fn transaction_received(&self, tx: core::Transaction, stem: bool) {
|
2018-07-16 23:58:56 +03:00
|
|
|
// nothing much we can do with a new transaction while syncing
|
|
|
|
if self.sync_state.is_syncing() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
let source = pool::TxSource {
|
|
|
|
debug_name: "p2p".to_string(),
|
|
|
|
identifier: "?.?.?.?".to_string(),
|
|
|
|
};
|
2018-05-30 23:57:13 +03:00
|
|
|
|
2018-08-20 16:48:05 +03:00
|
|
|
let tx_hash = tx.hash();
|
2018-09-24 11:24:10 +03:00
|
|
|
let header = w(&self.chain).head_header().unwrap();
|
2018-08-19 20:15:42 +03:00
|
|
|
|
2017-10-26 00:06:24 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-08-19 20:15:42 +03:00
|
|
|
"Received tx {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
|
2018-08-20 16:48:05 +03:00
|
|
|
tx_hash,
|
2018-08-19 20:15:42 +03:00
|
|
|
tx.inputs().len(),
|
|
|
|
tx.outputs().len(),
|
|
|
|
tx.kernels().len(),
|
2017-10-26 00:06:24 +03:00
|
|
|
);
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
let res = {
|
|
|
|
let mut tx_pool = self.tx_pool.write().unwrap();
|
2018-09-24 11:24:10 +03:00
|
|
|
tx_pool.add_to_pool(source, tx, stem, &header)
|
2018-05-30 23:57:13 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
if let Err(e) = res {
|
2018-08-20 16:48:05 +03:00
|
|
|
debug!(LOGGER, "Transaction {} rejected: {:?}", tx_hash, e);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
|
2017-10-12 19:56:44 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-08-19 20:15:42 +03:00
|
|
|
"Received block {} at {} from {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
|
2018-02-02 00:40:55 +03:00
|
|
|
b.hash(),
|
2017-10-18 07:48:21 +03:00
|
|
|
b.header.height,
|
2018-01-30 17:42:04 +03:00
|
|
|
addr,
|
2018-08-19 20:15:42 +03:00
|
|
|
b.inputs().len(),
|
|
|
|
b.outputs().len(),
|
|
|
|
b.kernels().len(),
|
2017-10-12 19:56:44 +03:00
|
|
|
);
|
2018-03-04 03:19:54 +03:00
|
|
|
self.process_block(b, addr)
|
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
|
|
|
|
let bhash = cb.hash();
|
2018-01-31 23:39:55 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-08-19 20:15:42 +03:00
|
|
|
"Received compact_block {} at {} from {}, outputs: {}, kernels: {}, kern_ids: {}, going to process.",
|
2018-01-31 23:39:55 +03:00
|
|
|
bhash,
|
2018-02-02 00:40:55 +03:00
|
|
|
cb.header.height,
|
2018-01-31 23:39:55 +03:00
|
|
|
addr,
|
2018-08-22 22:19:37 +03:00
|
|
|
cb.out_full().len(),
|
|
|
|
cb.kern_full().len(),
|
|
|
|
cb.kern_ids().len(),
|
2018-01-31 23:39:55 +03:00
|
|
|
);
|
|
|
|
|
2018-08-22 22:19:37 +03:00
|
|
|
let cb_hash = cb.hash();
|
|
|
|
if cb.kern_ids().is_empty() {
|
2018-02-02 00:40:55 +03:00
|
|
|
// push the freshly hydrated block through the chain pipeline
|
2018-08-16 02:20:33 +03:00
|
|
|
match core::Block::hydrate_from(cb, vec![]) {
|
|
|
|
Ok(block) => self.process_block(block, addr),
|
|
|
|
Err(e) => {
|
2018-08-22 22:19:37 +03:00
|
|
|
debug!(LOGGER, "Invalid hydrated block {}: {}", cb_hash, e);
|
2018-08-16 02:20:33 +03:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
2018-02-02 00:40:55 +03:00
|
|
|
} else {
|
2018-07-13 21:38:12 +03:00
|
|
|
// check at least the header is valid before hydrating
|
|
|
|
if let Err(e) = w(&self.chain).process_block_header(&cb.header, self.chain_opts()) {
|
2018-08-22 22:19:37 +03:00
|
|
|
debug!(LOGGER, "Invalid compact block header {}: {}", cb_hash, e);
|
2018-07-13 21:38:12 +03:00
|
|
|
return !e.is_bad_data();
|
|
|
|
}
|
2018-02-16 18:42:27 +03:00
|
|
|
|
2018-09-18 17:25:26 +03:00
|
|
|
let (txs, missing_short_ids) = {
|
2018-02-16 18:42:27 +03:00
|
|
|
let tx_pool = self.tx_pool.read().unwrap();
|
2018-09-18 17:25:26 +03:00
|
|
|
tx_pool.retrieve_transactions(cb.hash(), cb.nonce, cb.kern_ids())
|
2018-02-16 18:42:27 +03:00
|
|
|
};
|
2018-02-02 00:40:55 +03:00
|
|
|
|
2018-09-18 17:25:26 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: txs from tx pool - {}, (unknown kern_ids: {})",
|
|
|
|
txs.len(),
|
|
|
|
missing_short_ids.len(),
|
|
|
|
);
|
2018-02-16 18:42:27 +03:00
|
|
|
|
|
|
|
// TODO - 3 scenarios here -
|
|
|
|
// 1) we hydrate a valid block (good to go)
|
|
|
|
// 2) we hydrate an invalid block (txs legit missing from our pool)
|
|
|
|
// 3) we hydrate an invalid block (peer sent us a "bad" compact block) - [TBD]
|
|
|
|
|
2018-08-16 02:20:33 +03:00
|
|
|
let block = match core::Block::hydrate_from(cb.clone(), txs) {
|
|
|
|
Ok(block) => block,
|
|
|
|
Err(e) => {
|
|
|
|
debug!(LOGGER, "Invalid hydrated block {}: {}", cb.hash(), e);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
2018-02-16 18:42:27 +03:00
|
|
|
|
2018-08-28 00:22:48 +03:00
|
|
|
let chain = self
|
|
|
|
.chain
|
2018-03-14 18:22:09 +03:00
|
|
|
.upgrade()
|
|
|
|
.expect("failed to upgrade weak ref to chain");
|
|
|
|
|
2018-06-21 04:30:22 +03:00
|
|
|
if let Ok(prev) = chain.get_block_header(&cb.header.previous) {
|
|
|
|
if block
|
2018-08-30 17:44:34 +03:00
|
|
|
.validate(
|
|
|
|
&prev.total_kernel_offset,
|
|
|
|
&prev.total_kernel_sum,
|
|
|
|
self.verifier_cache.clone(),
|
2018-09-18 17:25:26 +03:00
|
|
|
).is_ok()
|
2018-06-21 04:30:22 +03:00
|
|
|
{
|
2018-03-14 18:22:09 +03:00
|
|
|
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
|
|
|
|
self.process_block(block, addr)
|
|
|
|
} else {
|
2018-08-01 23:09:03 +03:00
|
|
|
if self.sync_state.status() == SyncStatus::NoSync {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: block invalid after hydration, requesting full block"
|
|
|
|
);
|
|
|
|
self.request_block(&cb.header, &addr);
|
|
|
|
true
|
|
|
|
} else {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: block invalid after hydration, ignoring it, cause still syncing"
|
|
|
|
);
|
|
|
|
true
|
|
|
|
}
|
2018-03-14 18:22:09 +03:00
|
|
|
}
|
2018-02-16 18:42:27 +03:00
|
|
|
} else {
|
2018-04-18 03:52:07 +03:00
|
|
|
debug!(
|
2018-03-04 03:19:54 +03:00
|
|
|
LOGGER,
|
2018-04-18 03:52:07 +03:00
|
|
|
"adapter: failed to retrieve previous block header (still syncing?)"
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-02-16 18:42:27 +03:00
|
|
|
true
|
|
|
|
}
|
2018-02-02 00:40:55 +03:00
|
|
|
}
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
|
|
|
|
let bhash = bh.hash();
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-03-04 03:19:54 +03:00
|
|
|
"Received block header {} at {} from {}, going to process.", bhash, bh.height, addr,
|
2018-01-30 17:42:04 +03:00
|
|
|
);
|
|
|
|
|
|
|
|
// pushing the new block header through the header chain pipeline
|
|
|
|
// we will go ask for the block if this is a new header
|
2018-02-13 03:38:52 +03:00
|
|
|
let res = w(&self.chain).process_block_header(&bh, self.chain_opts());
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
if let &Err(ref e) = &res {
|
2018-08-28 04:48:15 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Block header {} refused by chain: {:?}",
|
|
|
|
bhash,
|
|
|
|
e.kind()
|
|
|
|
);
|
2018-02-10 01:32:16 +03:00
|
|
|
if e.is_bad_data() {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"header_received: {} is a bad header, resetting header head", bhash
|
|
|
|
);
|
2018-02-13 03:38:52 +03:00
|
|
|
let _ = w(&self.chain).reset_head();
|
2018-01-30 17:42:04 +03:00
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// we got an error when trying to process the block header
|
|
|
|
// but nothing serious enough to need to ban the peer upstream
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we have successfully processed a block header
|
|
|
|
// so we can go request the block itself
|
2018-01-31 23:39:55 +03:00
|
|
|
self.request_compact_block(&bh, &addr);
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
// done receiving the header
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2018-08-17 05:30:05 +03:00
|
|
|
fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) -> bool {
|
2017-11-14 21:57:16 +03:00
|
|
|
info!(
|
|
|
|
LOGGER,
|
2017-11-30 18:27:50 +03:00
|
|
|
"Received block headers {:?} from {}",
|
|
|
|
bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(),
|
|
|
|
addr,
|
2017-11-14 21:57:16 +03:00
|
|
|
);
|
|
|
|
|
2018-08-17 05:30:05 +03:00
|
|
|
if bhs.len() == 0 {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
// headers will just set us backward if even the last is unknown
|
|
|
|
let last_h = bhs.last().unwrap().hash();
|
|
|
|
if let Ok(_) = w(&self.chain).get_block_header(&last_h) {
|
|
|
|
info!(LOGGER, "All known, ignoring");
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
// try to add each header to our header chain
|
|
|
|
for bh in bhs {
|
2018-02-13 03:38:52 +03:00
|
|
|
let res = w(&self.chain).sync_block_header(&bh, self.chain_opts());
|
2018-08-17 05:30:05 +03:00
|
|
|
if let &Err(ref e) = &res {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Block header {} refused by chain: {:?}",
|
|
|
|
bh.hash(),
|
|
|
|
e
|
|
|
|
);
|
|
|
|
|
|
|
|
if e.is_bad_data() {
|
|
|
|
return false;
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-17 05:30:05 +03:00
|
|
|
true
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(LOGGER, "locate_headers: {:?}", locator,);
|
2017-11-14 21:57:16 +03:00
|
|
|
|
2017-11-28 23:37:02 +03:00
|
|
|
let header = match self.find_common_header(locator) {
|
|
|
|
Some(header) => header,
|
|
|
|
None => return vec![],
|
2017-02-08 00:52:17 +03:00
|
|
|
};
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(LOGGER, "locate_headers: common header: {:?}", header.hash(),);
|
2017-11-14 21:57:16 +03:00
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
// looks like we know one, getting as many following headers as allowed
|
|
|
|
let hh = header.height;
|
|
|
|
let mut headers = vec![];
|
|
|
|
for h in (hh + 1)..(hh + (p2p::MAX_BLOCK_HEADERS as u64)) {
|
2018-02-13 03:38:52 +03:00
|
|
|
let header = w(&self.chain).get_header_by_height(h);
|
2017-02-08 00:52:17 +03:00
|
|
|
match header {
|
|
|
|
Ok(head) => headers.push(head),
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(e) => match e.kind() {
|
|
|
|
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break,
|
|
|
|
_ => {
|
|
|
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
},
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
}
|
2017-11-14 21:57:16 +03:00
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"locate_headers: returning headers: {}",
|
|
|
|
headers.len(),
|
|
|
|
);
|
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
headers
|
|
|
|
}
|
|
|
|
|
2017-02-19 05:42:34 +03:00
|
|
|
/// Gets a full block by its hash.
|
2017-02-08 00:52:17 +03:00
|
|
|
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
2018-02-13 03:38:52 +03:00
|
|
|
let b = w(&self.chain).get_block(&h);
|
2017-02-08 00:52:17 +03:00
|
|
|
match b {
|
|
|
|
Ok(b) => Some(b),
|
|
|
|
_ => None,
|
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
2017-02-19 05:42:34 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Provides a reading view into the current txhashset state as well as
|
2018-06-13 19:03:34 +03:00
|
|
|
/// the required indexes for a consumer to rewind to a consistent state
|
2018-02-10 01:32:16 +03:00
|
|
|
/// at the provided block hash.
|
2018-03-05 22:33:44 +03:00
|
|
|
fn txhashset_read(&self, h: Hash) -> Option<p2p::TxHashSetRead> {
|
|
|
|
match w(&self.chain).txhashset_read(h.clone()) {
|
|
|
|
Ok((out_index, kernel_index, read)) => Some(p2p::TxHashSetRead {
|
2018-02-10 01:32:16 +03:00
|
|
|
output_index: out_index,
|
|
|
|
kernel_index: kernel_index,
|
|
|
|
reader: read,
|
|
|
|
}),
|
|
|
|
Err(e) => {
|
2018-03-04 03:19:54 +03:00
|
|
|
warn!(
|
|
|
|
LOGGER,
|
2018-03-05 22:33:44 +03:00
|
|
|
"Couldn't produce txhashset data for block {}: {:?}", h, e
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-02-10 01:32:16 +03:00
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:06:52 +03:00
|
|
|
fn txhashset_receive_ready(&self) -> bool {
|
|
|
|
self.sync_state.status() == SyncStatus::TxHashsetDownload
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
/// Writes a reading view on a txhashset state that's been provided to us.
|
2018-02-10 01:32:16 +03:00
|
|
|
/// If we're willing to accept that new state, the data stream will be
|
|
|
|
/// read as a zip file, unzipped and the resulting state files should be
|
|
|
|
/// rewound to the provided indexes.
|
2018-08-16 02:20:33 +03:00
|
|
|
fn txhashset_write(&self, h: Hash, txhashset_data: File, _peer_addr: SocketAddr) -> bool {
|
2018-07-16 23:58:56 +03:00
|
|
|
// check status again after download, in case 2 txhashsets made it somehow
|
|
|
|
if self.sync_state.status() != SyncStatus::TxHashsetDownload {
|
|
|
|
return true;
|
|
|
|
}
|
2018-08-16 02:20:33 +03:00
|
|
|
|
|
|
|
if let Err(e) = w(&self.chain).txhashset_write(h, txhashset_data, self.sync_state.as_ref())
|
2018-07-02 02:23:24 +03:00
|
|
|
{
|
2018-07-01 01:36:38 +03:00
|
|
|
error!(LOGGER, "Failed to save txhashset archive: {}", e);
|
2018-08-08 18:14:54 +03:00
|
|
|
let is_good_data = !e.is_bad_data();
|
|
|
|
self.sync_state.set_sync_error(types::Error::Chain(e));
|
|
|
|
is_good_data
|
2018-02-10 01:32:16 +03:00
|
|
|
} else {
|
2018-03-05 22:33:44 +03:00
|
|
|
info!(LOGGER, "Received valid txhashset data for {}.", h);
|
2018-02-10 01:32:16 +03:00
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetToChainAdapter {
|
2018-05-03 15:57:35 +03:00
|
|
|
/// Construct a new NetToChainAdapter instance
|
2017-10-17 15:18:21 +03:00
|
|
|
pub fn new(
|
2018-07-02 02:08:39 +03:00
|
|
|
sync_state: Arc<SyncState>,
|
2018-05-11 19:58:52 +03:00
|
|
|
archive_mode: bool,
|
2018-02-13 03:38:52 +03:00
|
|
|
chain_ref: Weak<chain::Chain>,
|
2018-08-28 00:22:48 +03:00
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
2018-08-30 17:44:34 +03:00
|
|
|
verifier_cache: Arc<RwLock<VerifierCache>>,
|
2018-03-21 23:50:08 +03:00
|
|
|
config: ServerConfig,
|
2017-10-17 15:18:21 +03:00
|
|
|
) -> NetToChainAdapter {
|
2016-12-21 04:39:02 +03:00
|
|
|
NetToChainAdapter {
|
2018-07-02 02:08:39 +03:00
|
|
|
sync_state,
|
2018-05-11 19:58:52 +03:00
|
|
|
archive_mode,
|
2017-07-04 02:46:25 +03:00
|
|
|
chain: chain_ref,
|
2018-05-11 19:58:52 +03:00
|
|
|
tx_pool,
|
2018-08-30 17:44:34 +03:00
|
|
|
verifier_cache,
|
2018-01-30 17:42:04 +03:00
|
|
|
peers: OneTime::new(),
|
2018-05-11 19:58:52 +03:00
|
|
|
config,
|
2017-11-30 18:27:50 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
2017-12-03 15:46:00 +03:00
|
|
|
|
2018-05-03 15:57:35 +03:00
|
|
|
/// Initialize a NetToChainAdaptor with reference to a Peers object.
|
|
|
|
/// Should only be called once.
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn init(&self, peers: Weak<p2p::Peers>) {
|
2018-01-30 17:42:04 +03:00
|
|
|
self.peers.init(peers);
|
|
|
|
}
|
|
|
|
|
2017-11-28 23:37:02 +03:00
|
|
|
// recursively go back through the locator vector and stop when we find
|
|
|
|
// a header that we recognize this will be a header shared in common
|
|
|
|
// between us and the peer
|
|
|
|
fn find_common_header(&self, locator: Vec<Hash>) -> Option<BlockHeader> {
|
|
|
|
if locator.len() == 0 {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
2018-02-13 03:38:52 +03:00
|
|
|
let chain = w(&self.chain);
|
|
|
|
let known = chain.get_block_header(&locator[0]);
|
2017-11-28 23:37:02 +03:00
|
|
|
|
|
|
|
match known {
|
|
|
|
Ok(header) => {
|
|
|
|
// even if we know the block, it may not be on our winning chain
|
2018-02-13 03:38:52 +03:00
|
|
|
let known_winning = chain.get_header_by_height(header.height);
|
2017-11-28 23:37:02 +03:00
|
|
|
if let Ok(known_winning) = known_winning {
|
|
|
|
if known_winning.hash() != header.hash() {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
} else {
|
|
|
|
Some(header)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-07-01 01:36:38 +03:00
|
|
|
Err(e) => match e.kind() {
|
|
|
|
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
},
|
2017-11-28 23:37:02 +03:00
|
|
|
}
|
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
// pushing the new block through the chain pipeline
|
|
|
|
// remembering to reset the head if we have a bad block
|
2018-03-04 03:19:54 +03:00
|
|
|
fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool {
|
2018-07-16 23:58:56 +03:00
|
|
|
let chain = w(&self.chain);
|
|
|
|
if !self.archive_mode {
|
|
|
|
let head = chain.head().unwrap();
|
|
|
|
// we have a fast sync'd node and are sent a block older than our horizon,
|
|
|
|
// only sync can do something with that
|
2018-09-18 17:25:26 +03:00
|
|
|
if b.header.height < head
|
|
|
|
.height
|
|
|
|
.saturating_sub(global::cut_through_horizon() as u64)
|
2018-08-16 02:20:33 +03:00
|
|
|
{
|
2018-07-16 23:58:56 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
let prev_hash = b.header.previous;
|
|
|
|
let bhash = b.hash();
|
2018-03-23 01:44:32 +03:00
|
|
|
match chain.process_block(b, self.chain_opts()) {
|
2018-05-11 19:58:52 +03:00
|
|
|
Ok((tip, _)) => {
|
2018-03-23 01:44:32 +03:00
|
|
|
self.validate_chain(bhash);
|
2018-05-11 19:58:52 +03:00
|
|
|
self.check_compact(tip);
|
2018-03-23 01:44:32 +03:00
|
|
|
true
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
Err(ref e) if e.is_bad_data() => {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: process_block: {} is a bad block, resetting head", bhash
|
|
|
|
);
|
|
|
|
let _ = chain.reset_head();
|
2018-03-23 01:44:32 +03:00
|
|
|
|
|
|
|
// we potentially changed the state of the system here
|
|
|
|
// so check everything is still ok
|
|
|
|
self.validate_chain(bhash);
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
false
|
|
|
|
}
|
|
|
|
Err(e) => {
|
2018-07-01 01:36:38 +03:00
|
|
|
match e.kind() {
|
|
|
|
chain::ErrorKind::Orphan => {
|
|
|
|
// make sure we did not miss the parent block
|
2018-07-02 02:08:39 +03:00
|
|
|
if !chain.is_orphan(&prev_hash) && !self.sync_state.is_syncing() {
|
2018-07-01 01:36:38 +03:00
|
|
|
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
|
|
|
|
self.request_block_by_hash(prev_hash, &addr)
|
|
|
|
}
|
|
|
|
true
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-08-28 04:48:15 +03:00
|
|
|
"adapter: process_block: block {} refused by chain: {}",
|
|
|
|
bhash,
|
|
|
|
e.kind()
|
2018-07-01 01:36:38 +03:00
|
|
|
);
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-03-23 01:44:32 +03:00
|
|
|
}
|
|
|
|
}
|
2018-03-21 23:50:08 +03:00
|
|
|
|
2018-03-23 01:44:32 +03:00
|
|
|
fn validate_chain(&self, bhash: Hash) {
|
2018-03-21 23:50:08 +03:00
|
|
|
// If we are running in "validate the full chain every block" then
|
|
|
|
// panic here if validation fails for any reason.
|
|
|
|
// We are out of consensus at this point and want to track the problem
|
|
|
|
// down as soon as possible.
|
|
|
|
// Skip this if we are currently syncing (too slow).
|
2018-03-23 01:58:32 +03:00
|
|
|
let chain = w(&self.chain);
|
2018-07-01 01:36:38 +03:00
|
|
|
if chain.head().unwrap().height > 0
|
2018-07-02 02:08:39 +03:00
|
|
|
&& !self.sync_state.is_syncing()
|
2018-03-21 23:50:08 +03:00
|
|
|
&& self.config.chain_validation_mode == ChainValidationMode::EveryBlock
|
|
|
|
{
|
|
|
|
let now = Instant::now();
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: process_block: ***** validating full chain state at {}", bhash,
|
|
|
|
);
|
|
|
|
|
|
|
|
let chain = w(&self.chain);
|
|
|
|
chain
|
|
|
|
.validate(true)
|
|
|
|
.expect("chain validation failed, hard stop");
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"adapter: process_block: ***** done validating full chain state, took {}s",
|
|
|
|
now.elapsed().as_secs(),
|
|
|
|
);
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
|
|
|
}
|
2018-02-02 00:40:55 +03:00
|
|
|
|
2018-05-11 19:58:52 +03:00
|
|
|
fn check_compact(&self, tip_res: Option<Tip>) {
|
|
|
|
// no compaction during sync or if we're in historical mode
|
2018-07-02 02:08:39 +03:00
|
|
|
if self.archive_mode || self.sync_state.is_syncing() {
|
2018-05-11 19:58:52 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if let Some(tip) = tip_res {
|
|
|
|
// trigger compaction every 2000 blocks, uses a different thread to avoid
|
|
|
|
// blocking the caller thread (likely a peer)
|
|
|
|
if tip.height % 2000 == 0 {
|
|
|
|
let chain = w(&self.chain);
|
|
|
|
let _ = thread::Builder::new()
|
|
|
|
.name("compactor".to_string())
|
|
|
|
.spawn(move || {
|
|
|
|
if let Err(e) = chain.compact() {
|
|
|
|
error!(LOGGER, "Could not compact chain: {:?}", e);
|
|
|
|
}
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 23:39:55 +03:00
|
|
|
// After receiving a compact block if we cannot successfully hydrate
|
|
|
|
// it into a full block then fallback to requesting the full block
|
|
|
|
// from the same peer that gave us the compact block
|
2018-01-30 17:42:04 +03:00
|
|
|
// consider additional peers for redundancy?
|
|
|
|
fn request_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.request_block_by_hash(bh.hash(), addr)
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
|
|
|
|
2018-02-24 05:48:02 +03:00
|
|
|
fn request_block_by_hash(&self, h: Hash, addr: &SocketAddr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.send_block_request_to_peer(h, addr, |peer, h| peer.send_block_request(h))
|
|
|
|
}
|
2018-02-24 05:48:02 +03:00
|
|
|
|
2018-01-31 23:39:55 +03:00
|
|
|
// After we have received a block header in "header first" propagation
|
|
|
|
// we need to go request the block (compact representation) from the
|
|
|
|
// same peer that gave us the header (unless we have already accepted the block)
|
|
|
|
fn request_compact_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
self.send_block_request_to_peer(bh.hash(), addr, |peer, h| {
|
|
|
|
peer.send_compact_block_request(h)
|
|
|
|
})
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
fn send_block_request_to_peer<F>(&self, h: Hash, addr: &SocketAddr, f: F)
|
|
|
|
where
|
|
|
|
F: Fn(&p2p::Peer, Hash) -> Result<(), p2p::Error>,
|
|
|
|
{
|
|
|
|
match w(&self.chain).block_exists(h) {
|
2018-03-20 01:17:59 +03:00
|
|
|
Ok(false) => {
|
|
|
|
match wo(&self.peers).get_connected_peer(addr) {
|
|
|
|
None => debug!(LOGGER, "send_block_request_to_peer: can't send request to peer {:?}, not connected", addr),
|
|
|
|
Some(peer) => {
|
|
|
|
match peer.read() {
|
|
|
|
Err(e) => debug!(LOGGER, "send_block_request_to_peer: can't send request to peer {:?}, read fails: {:?}", addr, e),
|
|
|
|
Ok(p) => {
|
|
|
|
if let Err(e) = f(&p, h) {
|
|
|
|
error!(LOGGER, "send_block_request_to_peer: failed: {:?}", e)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
Ok(true) => debug!(LOGGER, "send_block_request_to_peer: block {} already known", h),
|
|
|
|
Err(e) => error!(LOGGER, "send_block_request_to_peer: failed to check block exists: {:?}", e)
|
|
|
|
}
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2018-02-24 05:48:02 +03:00
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
/// Prepare options for the chain pipeline
|
|
|
|
fn chain_opts(&self) -> chain::Options {
|
2018-07-02 02:08:39 +03:00
|
|
|
let opts = if self.sync_state.is_syncing() {
|
2018-02-05 22:43:54 +03:00
|
|
|
chain::Options::SYNC
|
2017-06-19 18:59:56 +03:00
|
|
|
} else {
|
2018-02-05 22:43:54 +03:00
|
|
|
chain::Options::NONE
|
2017-06-19 18:59:56 +03:00
|
|
|
};
|
|
|
|
opts
|
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implementation of the ChainAdapter for the network. Gets notified when the
|
2018-09-24 11:24:10 +03:00
|
|
|
/// accepted a new block, asking the pool to update its state and
|
2017-06-10 22:51:33 +03:00
|
|
|
/// the network to broadcast the block
|
|
|
|
pub struct ChainToPoolAndNetAdapter {
|
2018-07-16 23:58:56 +03:00
|
|
|
sync_state: Arc<SyncState>,
|
2018-08-28 00:22:48 +03:00
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: OneTime<Weak<p2p::Peers>>,
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
impl ChainAdapter for ChainToPoolAndNetAdapter {
|
2018-01-30 17:42:04 +03:00
|
|
|
fn block_accepted(&self, b: &core::Block, opts: Options) {
|
2018-07-16 23:58:56 +03:00
|
|
|
if self.sync_state.is_syncing() {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-02-16 18:42:27 +03:00
|
|
|
debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash());
|
|
|
|
|
|
|
|
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
|
|
|
|
error!(
|
|
|
|
LOGGER,
|
|
|
|
"Pool could not update itself at block {}: {:?}",
|
|
|
|
b.hash(),
|
|
|
|
e,
|
|
|
|
);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2018-01-30 17:42:04 +03:00
|
|
|
|
2018-09-18 20:51:37 +03:00
|
|
|
// If we mined the block then we want to broadcast the compact block.
|
2018-01-31 23:39:55 +03:00
|
|
|
// If we received the block from another node then broadcast "header first"
|
2018-01-30 17:42:04 +03:00
|
|
|
// to minimize network traffic.
|
2018-02-05 22:43:54 +03:00
|
|
|
if opts.contains(Options::MINE) {
|
2018-01-31 23:39:55 +03:00
|
|
|
// propagate compact block out if we mined the block
|
2018-08-22 22:19:37 +03:00
|
|
|
let cb: CompactBlock = b.clone().into();
|
2018-09-18 20:51:37 +03:00
|
|
|
wo(&self.peers).broadcast_compact_block(&cb);
|
2018-01-30 17:42:04 +03:00
|
|
|
} else {
|
|
|
|
// "header first" propagation if we are not the originator of this block
|
2018-09-18 20:51:37 +03:00
|
|
|
wo(&self.peers).broadcast_header(&b.header);
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
impl ChainToPoolAndNetAdapter {
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Construct a ChainToPoolAndNetAdapter instance.
|
2017-10-17 15:18:21 +03:00
|
|
|
pub fn new(
|
2018-07-16 23:58:56 +03:00
|
|
|
sync_state: Arc<SyncState>,
|
2018-08-28 00:22:48 +03:00
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool>>,
|
2017-10-17 15:18:21 +03:00
|
|
|
) -> ChainToPoolAndNetAdapter {
|
2017-06-10 22:51:33 +03:00
|
|
|
ChainToPoolAndNetAdapter {
|
2018-07-16 23:58:56 +03:00
|
|
|
sync_state,
|
|
|
|
tx_pool,
|
2017-12-12 19:40:26 +03:00
|
|
|
peers: OneTime::new(),
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
2018-05-03 15:57:35 +03:00
|
|
|
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Initialize a ChainToPoolAndNetAdapter instance with handle to a Peers
|
2018-05-30 23:57:13 +03:00
|
|
|
/// object. Should only be called once.
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn init(&self, peers: Weak<p2p::Peers>) {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.peers.init(peers);
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
}
|
2017-06-10 22:51:33 +03:00
|
|
|
|
2017-10-26 00:06:24 +03:00
|
|
|
/// Adapter between the transaction pool and the network, to relay
|
|
|
|
/// transactions that have been accepted.
|
|
|
|
pub struct PoolToNetAdapter {
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: OneTime<Weak<p2p::Peers>>,
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl pool::PoolAdapter for PoolToNetAdapter {
|
2018-05-30 23:57:13 +03:00
|
|
|
fn stem_tx_accepted(&self, tx: &core::Transaction) -> Result<(), pool::PoolError> {
|
|
|
|
wo(&self.peers)
|
|
|
|
.broadcast_stem_transaction(tx)
|
|
|
|
.map_err(|_| pool::PoolError::DandelionError)?;
|
|
|
|
Ok(())
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
2017-10-26 00:06:24 +03:00
|
|
|
fn tx_accepted(&self, tx: &core::Transaction) {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.peers).broadcast_transaction(tx);
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PoolToNetAdapter {
|
|
|
|
/// Create a new pool to net adapter
|
|
|
|
pub fn new() -> PoolToNetAdapter {
|
2017-11-01 02:32:33 +03:00
|
|
|
PoolToNetAdapter {
|
2017-12-12 19:40:26 +03:00
|
|
|
peers: OneTime::new(),
|
2017-11-01 02:32:33 +03:00
|
|
|
}
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Setup the p2p server on the adapter
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn init(&self, peers: Weak<p2p::Peers>) {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.peers.init(peers);
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
/// Implements the view of the required by the TransactionPool to
|
2017-07-04 02:46:25 +03:00
|
|
|
/// operate. Mostly needed to break any direct lifecycle or implementation
|
|
|
|
/// dependency between the pool and the chain.
|
2017-06-10 22:51:33 +03:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct PoolToChainAdapter {
|
2018-02-13 03:38:52 +03:00
|
|
|
chain: OneTime<Weak<chain::Chain>>,
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PoolToChainAdapter {
|
|
|
|
/// Create a new pool adapter
|
2017-07-04 02:46:25 +03:00
|
|
|
pub fn new() -> PoolToChainAdapter {
|
2017-11-01 02:32:33 +03:00
|
|
|
PoolToChainAdapter {
|
|
|
|
chain: OneTime::new(),
|
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
2018-05-03 15:57:35 +03:00
|
|
|
/// Set the pool adapter's chain. Should only be called once.
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn set_chain(&self, chain_ref: Weak<chain::Chain>) {
|
2017-07-04 02:46:25 +03:00
|
|
|
self.chain.init(chain_ref);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl pool::BlockChain for PoolToChainAdapter {
|
2018-08-20 16:48:05 +03:00
|
|
|
fn chain_head(&self) -> Result<BlockHeader, pool::PoolError> {
|
2018-09-24 11:24:10 +03:00
|
|
|
wo(&self.chain)
|
|
|
|
.head_header()
|
|
|
|
.map_err(|_| pool::PoolError::Other(format!("failed to get head_header")))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_block_header(&self, hash: &Hash) -> Result<BlockHeader, pool::PoolError> {
|
|
|
|
wo(&self.chain)
|
|
|
|
.get_block_header(hash)
|
|
|
|
.map_err(|_| pool::PoolError::Other(format!("failed to get block_header")))
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_block_sums(&self, hash: &Hash) -> Result<BlockSums, pool::PoolError> {
|
|
|
|
wo(&self.chain)
|
|
|
|
.get_block_sums(hash)
|
|
|
|
.map_err(|_| pool::PoolError::Other(format!("failed to get block_sums")))
|
2018-08-20 16:48:05 +03:00
|
|
|
}
|
|
|
|
|
2018-09-24 11:24:10 +03:00
|
|
|
fn validate_tx(&self, tx: &Transaction, header: &BlockHeader) -> Result<(), pool::PoolError> {
|
2018-08-20 16:48:05 +03:00
|
|
|
wo(&self.chain)
|
2018-09-24 11:24:10 +03:00
|
|
|
.validate_tx(tx, header)
|
|
|
|
.map_err(|_| pool::PoolError::Other(format!("failed to validate tx")))
|
2017-09-12 20:24:24 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.chain)
|
2018-05-30 23:57:13 +03:00
|
|
|
.verify_coinbase_maturity(tx)
|
|
|
|
.map_err(|_| pool::PoolError::ImmatureCoinbase)
|
2018-03-04 03:19:54 +03:00
|
|
|
}
|
2017-09-12 20:24:24 +03:00
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
fn verify_tx_lock_height(&self, tx: &Transaction) -> Result<(), pool::PoolError> {
|
2018-02-13 03:38:52 +03:00
|
|
|
wo(&self.chain)
|
2018-05-30 23:57:13 +03:00
|
|
|
.verify_tx_lock_height(tx)
|
|
|
|
.map_err(|_| pool::PoolError::ImmatureTransaction)
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|