2017-12-12 19:40:26 +03:00
|
|
|
// Copyright 2017 The Grin Developers
|
2016-12-19 02:51:54 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-02-19 05:42:34 +03:00
|
|
|
use std::net::SocketAddr;
|
2017-08-10 03:54:10 +03:00
|
|
|
use std::sync::{Arc, RwLock};
|
2017-11-30 18:27:50 +03:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2018-01-31 23:39:55 +03:00
|
|
|
use rand;
|
|
|
|
use rand::Rng;
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2018-02-05 22:43:54 +03:00
|
|
|
use chain::{self, ChainAdapter, Options};
|
2018-01-17 06:03:40 +03:00
|
|
|
use core::core;
|
2017-09-12 20:24:24 +03:00
|
|
|
use core::core::block::BlockHeader;
|
2017-02-08 00:52:17 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
|
|
|
use core::core::target::Difficulty;
|
2018-01-17 06:03:40 +03:00
|
|
|
use core::core::transaction::{Input, OutputIdentifier};
|
2017-12-12 19:40:26 +03:00
|
|
|
use p2p;
|
2017-06-10 22:51:33 +03:00
|
|
|
use pool;
|
2016-12-21 04:39:02 +03:00
|
|
|
use util::OneTime;
|
2017-02-10 22:02:21 +03:00
|
|
|
use store;
|
2017-10-12 19:56:44 +03:00
|
|
|
use util::LOGGER;
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2016-12-21 04:39:02 +03:00
|
|
|
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
|
|
|
|
/// blocks and transactions are received and forwards to the chain and pool
|
|
|
|
/// implementations.
|
2016-12-19 02:51:54 +03:00
|
|
|
pub struct NetToChainAdapter {
|
2017-12-12 19:40:26 +03:00
|
|
|
currently_syncing: Arc<AtomicBool>,
|
2017-07-04 02:46:25 +03:00
|
|
|
chain: Arc<chain::Chain>,
|
2017-06-10 22:51:33 +03:00
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
2018-01-30 17:42:04 +03:00
|
|
|
peers: OneTime<p2p::Peers>,
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
impl p2p::ChainAdapter for NetToChainAdapter {
|
2017-02-08 00:52:17 +03:00
|
|
|
fn total_difficulty(&self) -> Difficulty {
|
2017-07-04 02:46:25 +03:00
|
|
|
self.chain.total_difficulty()
|
2017-02-03 02:51:48 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2017-12-14 00:52:21 +03:00
|
|
|
fn total_height(&self) -> u64 {
|
|
|
|
self.chain.head().unwrap().height
|
|
|
|
}
|
|
|
|
|
2016-12-19 02:51:54 +03:00
|
|
|
fn transaction_received(&self, tx: core::Transaction) {
|
2017-06-10 22:51:33 +03:00
|
|
|
let source = pool::TxSource {
|
|
|
|
debug_name: "p2p".to_string(),
|
|
|
|
identifier: "?.?.?.?".to_string(),
|
|
|
|
};
|
2017-10-26 00:06:24 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Received tx {} from {}, going to process.",
|
|
|
|
tx.hash(),
|
|
|
|
source.identifier,
|
|
|
|
);
|
|
|
|
|
2017-11-18 23:34:05 +03:00
|
|
|
let h = tx.hash();
|
2017-06-10 22:51:33 +03:00
|
|
|
if let Err(e) = self.tx_pool.write().unwrap().add_to_memory_pool(source, tx) {
|
2017-11-18 23:34:05 +03:00
|
|
|
debug!(LOGGER, "Transaction {} rejected: {:?}", h, e);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
|
2017-10-12 19:56:44 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-01-30 17:42:04 +03:00
|
|
|
"Received block {} at {} from {}, going to process.",
|
2018-02-02 00:40:55 +03:00
|
|
|
b.hash(),
|
2017-10-18 07:48:21 +03:00
|
|
|
b.header.height,
|
2018-01-30 17:42:04 +03:00
|
|
|
addr,
|
2017-10-12 19:56:44 +03:00
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
self.process_block(b)
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
|
|
|
|
let bhash = cb.hash();
|
2018-01-31 23:39:55 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Received compact_block {} at {} from {}, going to process.",
|
|
|
|
bhash,
|
2018-02-02 00:40:55 +03:00
|
|
|
cb.header.height,
|
2018-01-31 23:39:55 +03:00
|
|
|
addr,
|
|
|
|
);
|
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
if cb.kern_ids.is_empty() {
|
|
|
|
let block = core::Block::hydrate_from(cb, vec![], vec![], vec![]);
|
|
|
|
|
|
|
|
// push the freshly hydrated block through the chain pipeline
|
|
|
|
self.process_block(block)
|
|
|
|
} else {
|
|
|
|
// TODO - do we need to validate the header here to be sure it is not total garbage?
|
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"*** cannot hydrate non-empty compact block (not yet implemented), \
|
|
|
|
falling back to requesting full block",
|
|
|
|
);
|
|
|
|
self.request_block(&cb.header, &addr);
|
|
|
|
true
|
|
|
|
}
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
|
|
|
|
let bhash = bh.hash();
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Received block header {} at {} from {}, going to process.",
|
|
|
|
bhash,
|
|
|
|
bh.height,
|
|
|
|
addr,
|
|
|
|
);
|
|
|
|
|
|
|
|
// pushing the new block header through the header chain pipeline
|
|
|
|
// we will go ask for the block if this is a new header
|
|
|
|
let res = self.chain.process_block_header(&bh, self.chain_opts());
|
|
|
|
|
|
|
|
if let &Err(ref e) = &res {
|
|
|
|
debug!(LOGGER, "Block header {} refused by chain: {:?}", bhash, e);
|
|
|
|
if e.is_bad_block() {
|
|
|
|
debug!(LOGGER, "header_received: {} is a bad header, resetting header head", bhash);
|
|
|
|
let _ = self.chain.reset_head();
|
|
|
|
return false;
|
|
|
|
} else {
|
|
|
|
// we got an error when trying to process the block header
|
|
|
|
// but nothing serious enough to need to ban the peer upstream
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// we have successfully processed a block header
|
|
|
|
// so we can go request the block itself
|
2018-01-31 23:39:55 +03:00
|
|
|
self.request_compact_block(&bh, &addr);
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
// done receiving the header
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2017-11-30 18:27:50 +03:00
|
|
|
fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) {
|
2017-11-14 21:57:16 +03:00
|
|
|
info!(
|
|
|
|
LOGGER,
|
2017-11-30 18:27:50 +03:00
|
|
|
"Received block headers {:?} from {}",
|
|
|
|
bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(),
|
|
|
|
addr,
|
2017-11-14 21:57:16 +03:00
|
|
|
);
|
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
// try to add each header to our header chain
|
|
|
|
let mut added_hs = vec![];
|
|
|
|
for bh in bhs {
|
2017-12-04 22:16:57 +03:00
|
|
|
let res = self.chain.sync_block_header(&bh, self.chain_opts());
|
2017-02-08 00:52:17 +03:00
|
|
|
match res {
|
|
|
|
Ok(_) => {
|
|
|
|
added_hs.push(bh.hash());
|
|
|
|
}
|
2017-04-28 08:05:12 +03:00
|
|
|
Err(chain::Error::Unfit(s)) => {
|
2017-09-29 21:44:25 +03:00
|
|
|
info!(
|
2017-10-12 19:56:44 +03:00
|
|
|
LOGGER,
|
2017-09-29 21:44:25 +03:00
|
|
|
"Received unfit block header {} at {}: {}.",
|
|
|
|
bh.hash(),
|
|
|
|
bh.height,
|
|
|
|
s
|
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
2017-10-22 10:11:45 +03:00
|
|
|
Err(chain::Error::StoreErr(e, explanation)) => {
|
2017-10-12 19:56:44 +03:00
|
|
|
error!(
|
|
|
|
LOGGER,
|
2017-10-22 10:11:45 +03:00
|
|
|
"Store error processing block header {}: in {} {:?}",
|
2017-10-12 19:56:44 +03:00
|
|
|
bh.hash(),
|
2017-10-22 13:40:24 +03:00
|
|
|
explanation,
|
2017-10-12 19:56:44 +03:00
|
|
|
e
|
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
Err(e) => {
|
2017-10-12 19:56:44 +03:00
|
|
|
info!(LOGGER, "Invalid block header {}: {:?}.", bh.hash(), e);
|
2017-02-08 00:52:17 +03:00
|
|
|
// TODO penalize peer somehow
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-10-12 19:56:44 +03:00
|
|
|
info!(
|
|
|
|
LOGGER,
|
|
|
|
"Added {} headers to the header chain.",
|
|
|
|
added_hs.len()
|
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
|
2017-11-14 21:57:16 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"locate_headers: {:?}",
|
|
|
|
locator,
|
|
|
|
);
|
|
|
|
|
2017-11-28 23:37:02 +03:00
|
|
|
let header = match self.find_common_header(locator) {
|
|
|
|
Some(header) => header,
|
|
|
|
None => return vec![],
|
2017-02-08 00:52:17 +03:00
|
|
|
};
|
|
|
|
|
2017-11-14 21:57:16 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2017-11-15 23:37:40 +03:00
|
|
|
"locate_headers: common header: {:?}",
|
|
|
|
header.hash(),
|
2017-11-14 21:57:16 +03:00
|
|
|
);
|
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
// looks like we know one, getting as many following headers as allowed
|
|
|
|
let hh = header.height;
|
|
|
|
let mut headers = vec![];
|
|
|
|
for h in (hh + 1)..(hh + (p2p::MAX_BLOCK_HEADERS as u64)) {
|
2017-07-04 02:46:25 +03:00
|
|
|
let header = self.chain.get_header_by_height(h);
|
2017-02-08 00:52:17 +03:00
|
|
|
match header {
|
|
|
|
Ok(head) => headers.push(head),
|
2017-10-22 10:11:45 +03:00
|
|
|
Err(chain::Error::StoreErr(store::Error::NotFoundErr, _)) => break,
|
2017-02-08 00:52:17 +03:00
|
|
|
Err(e) => {
|
2017-10-12 19:56:44 +03:00
|
|
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
2017-02-08 00:52:17 +03:00
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-11-14 21:57:16 +03:00
|
|
|
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"locate_headers: returning headers: {}",
|
|
|
|
headers.len(),
|
|
|
|
);
|
|
|
|
|
2017-02-08 00:52:17 +03:00
|
|
|
headers
|
|
|
|
}
|
|
|
|
|
2017-02-19 05:42:34 +03:00
|
|
|
/// Gets a full block by its hash.
|
2017-02-08 00:52:17 +03:00
|
|
|
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
2017-07-04 02:46:25 +03:00
|
|
|
let b = self.chain.get_block(&h);
|
2017-02-08 00:52:17 +03:00
|
|
|
match b {
|
|
|
|
Ok(b) => Some(b),
|
|
|
|
_ => None,
|
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
2017-02-19 05:42:34 +03:00
|
|
|
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetToChainAdapter {
|
2017-10-17 15:18:21 +03:00
|
|
|
pub fn new(
|
2017-12-12 19:40:26 +03:00
|
|
|
currently_syncing: Arc<AtomicBool>,
|
2017-10-17 15:18:21 +03:00
|
|
|
chain_ref: Arc<chain::Chain>,
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
|
|
|
) -> NetToChainAdapter {
|
2016-12-21 04:39:02 +03:00
|
|
|
NetToChainAdapter {
|
2017-12-12 19:40:26 +03:00
|
|
|
currently_syncing: currently_syncing,
|
2017-07-04 02:46:25 +03:00
|
|
|
chain: chain_ref,
|
2017-06-10 22:51:33 +03:00
|
|
|
tx_pool: tx_pool,
|
2018-01-30 17:42:04 +03:00
|
|
|
peers: OneTime::new(),
|
2017-11-30 18:27:50 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
2017-12-03 15:46:00 +03:00
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
pub fn init(&self, peers: p2p::Peers) {
|
|
|
|
self.peers.init(peers);
|
|
|
|
}
|
|
|
|
|
2017-11-28 23:37:02 +03:00
|
|
|
// recursively go back through the locator vector and stop when we find
|
|
|
|
// a header that we recognize this will be a header shared in common
|
|
|
|
// between us and the peer
|
|
|
|
fn find_common_header(&self, locator: Vec<Hash>) -> Option<BlockHeader> {
|
|
|
|
if locator.len() == 0 {
|
|
|
|
return None;
|
|
|
|
}
|
|
|
|
|
|
|
|
let known = self.chain.get_block_header(&locator[0]);
|
|
|
|
|
|
|
|
match known {
|
|
|
|
Ok(header) => {
|
|
|
|
// even if we know the block, it may not be on our winning chain
|
|
|
|
let known_winning = self.chain.get_header_by_height(header.height);
|
|
|
|
if let Ok(known_winning) = known_winning {
|
|
|
|
if known_winning.hash() != header.hash() {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
} else {
|
|
|
|
Some(header)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
}
|
|
|
|
},
|
|
|
|
Err(chain::Error::StoreErr(store::Error::NotFoundErr, _)) => {
|
|
|
|
self.find_common_header(locator[1..].to_vec())
|
|
|
|
},
|
|
|
|
Err(e) => {
|
|
|
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
|
2018-02-02 00:40:55 +03:00
|
|
|
// pushing the new block through the chain pipeline
|
|
|
|
// remembering to reset the head if we have a bad block
|
|
|
|
fn process_block(&self, b: core::Block) -> bool {
|
|
|
|
let bhash = b.hash();
|
|
|
|
let res = self.chain.process_block(b, self.chain_opts());
|
|
|
|
if let Err(ref e) = res {
|
|
|
|
debug!(LOGGER, "Block {} refused by chain: {:?}", bhash, e);
|
|
|
|
if e.is_bad_block() {
|
|
|
|
debug!(LOGGER, "adapter: process_block: {} is a bad block, resetting head", bhash);
|
|
|
|
let _ = self.chain.reset_head();
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
2018-01-31 23:39:55 +03:00
|
|
|
// After receiving a compact block if we cannot successfully hydrate
|
|
|
|
// it into a full block then fallback to requesting the full block
|
|
|
|
// from the same peer that gave us the compact block
|
2018-01-30 17:42:04 +03:00
|
|
|
//
|
|
|
|
// TODO - currently only request block from a single peer
|
|
|
|
// consider additional peers for redundancy?
|
|
|
|
fn request_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
|
|
|
if let None = self.peers.borrow().adapter.get_block(bh.hash()) {
|
2018-01-31 23:39:55 +03:00
|
|
|
if let Some(peer) = self.peers.borrow().get_connected_peer(addr) {
|
|
|
|
if let Ok(peer) = peer.read() {
|
|
|
|
let _ = peer.send_block_request(bh.hash());
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
debug!(LOGGER, "request_block: block {} already known", bh.hash());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-31 23:39:55 +03:00
|
|
|
// After we have received a block header in "header first" propagation
|
|
|
|
// we need to go request the block (compact representation) from the
|
|
|
|
// same peer that gave us the header (unless we have already accepted the block)
|
|
|
|
//
|
|
|
|
// TODO - currently only request block from a single peer
|
|
|
|
// consider additional peers for redundancy?
|
|
|
|
fn request_compact_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
|
|
|
if let None = self.peers.borrow().adapter.get_block(bh.hash()) {
|
|
|
|
if let Some(peer) = self.peers.borrow().get_connected_peer(addr) {
|
|
|
|
if let Ok(peer) = peer.read() {
|
|
|
|
let _ = peer.send_compact_block_request(bh.hash());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
debug!(LOGGER, "request_compact_block: block {} already known", bh.hash());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
/// Prepare options for the chain pipeline
|
|
|
|
fn chain_opts(&self) -> chain::Options {
|
2017-12-12 19:40:26 +03:00
|
|
|
let opts = if self.currently_syncing.load(Ordering::Relaxed) {
|
2018-02-05 22:43:54 +03:00
|
|
|
chain::Options::SYNC
|
2017-06-19 18:59:56 +03:00
|
|
|
} else {
|
2018-02-05 22:43:54 +03:00
|
|
|
chain::Options::NONE
|
2017-06-19 18:59:56 +03:00
|
|
|
};
|
|
|
|
opts
|
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implementation of the ChainAdapter for the network. Gets notified when the
|
2017-06-10 22:51:33 +03:00
|
|
|
/// blockchain accepted a new block, asking the pool to update its state and
|
|
|
|
/// the network to broadcast the block
|
|
|
|
pub struct ChainToPoolAndNetAdapter {
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
2017-12-12 19:40:26 +03:00
|
|
|
peers: OneTime<p2p::Peers>,
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
impl ChainAdapter for ChainToPoolAndNetAdapter {
|
2018-01-30 17:42:04 +03:00
|
|
|
fn block_accepted(&self, b: &core::Block, opts: Options) {
|
2017-06-10 22:51:33 +03:00
|
|
|
{
|
|
|
|
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
|
2017-09-29 21:44:25 +03:00
|
|
|
error!(
|
2017-10-12 19:56:44 +03:00
|
|
|
LOGGER,
|
2017-09-29 21:44:25 +03:00
|
|
|
"Pool could not update itself at block {}: {:?}",
|
|
|
|
b.hash(),
|
|
|
|
e
|
|
|
|
);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|
2018-01-30 17:42:04 +03:00
|
|
|
|
|
|
|
// If we mined the block then we want to broadcast the block itself.
|
2018-01-31 23:39:55 +03:00
|
|
|
// If block is empty then broadcast the block.
|
|
|
|
// If block contains txs then broadcast the compact block.
|
|
|
|
// If we received the block from another node then broadcast "header first"
|
2018-01-30 17:42:04 +03:00
|
|
|
// to minimize network traffic.
|
2018-02-05 22:43:54 +03:00
|
|
|
if opts.contains(Options::MINE) {
|
2018-01-31 23:39:55 +03:00
|
|
|
// propagate compact block out if we mined the block
|
|
|
|
// but broadcast full block if we have no txs
|
|
|
|
let cb = b.as_compact_block();
|
|
|
|
if cb.kern_ids.is_empty() {
|
|
|
|
|
|
|
|
// in the interest of testing all code paths
|
|
|
|
// randomly decide how we send an empty block out
|
|
|
|
// TODO - lock this down once we are comfortable it works...
|
|
|
|
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
if rng.gen() {
|
|
|
|
self.peers.borrow().broadcast_block(&b);
|
|
|
|
} else {
|
|
|
|
self.peers.borrow().broadcast_compact_block(&cb);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
self.peers.borrow().broadcast_compact_block(&cb);
|
|
|
|
}
|
2018-01-30 17:42:04 +03:00
|
|
|
} else {
|
|
|
|
// "header first" propagation if we are not the originator of this block
|
|
|
|
self.peers.borrow().broadcast_header(&b.header);
|
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
impl ChainToPoolAndNetAdapter {
|
2017-10-17 15:18:21 +03:00
|
|
|
pub fn new(
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
|
|
|
) -> ChainToPoolAndNetAdapter {
|
2017-06-10 22:51:33 +03:00
|
|
|
ChainToPoolAndNetAdapter {
|
|
|
|
tx_pool: tx_pool,
|
2017-12-12 19:40:26 +03:00
|
|
|
peers: OneTime::new(),
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn init(&self, peers: p2p::Peers) {
|
|
|
|
self.peers.init(peers);
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
}
|
2017-06-10 22:51:33 +03:00
|
|
|
|
2017-10-26 00:06:24 +03:00
|
|
|
/// Adapter between the transaction pool and the network, to relay
|
|
|
|
/// transactions that have been accepted.
|
|
|
|
pub struct PoolToNetAdapter {
|
2017-12-12 19:40:26 +03:00
|
|
|
peers: OneTime<p2p::Peers>,
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl pool::PoolAdapter for PoolToNetAdapter {
|
|
|
|
fn tx_accepted(&self, tx: &core::Transaction) {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.peers.borrow().broadcast_transaction(tx);
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl PoolToNetAdapter {
|
|
|
|
/// Create a new pool to net adapter
|
|
|
|
pub fn new() -> PoolToNetAdapter {
|
2017-11-01 02:32:33 +03:00
|
|
|
PoolToNetAdapter {
|
2017-12-12 19:40:26 +03:00
|
|
|
peers: OneTime::new(),
|
2017-11-01 02:32:33 +03:00
|
|
|
}
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Setup the p2p server on the adapter
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn init(&self, peers: p2p::Peers) {
|
|
|
|
self.peers.init(peers);
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
/// Implements the view of the blockchain required by the TransactionPool to
|
2017-07-04 02:46:25 +03:00
|
|
|
/// operate. Mostly needed to break any direct lifecycle or implementation
|
|
|
|
/// dependency between the pool and the chain.
|
2017-06-10 22:51:33 +03:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct PoolToChainAdapter {
|
2017-07-04 02:46:25 +03:00
|
|
|
chain: OneTime<Arc<chain::Chain>>,
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PoolToChainAdapter {
|
|
|
|
/// Create a new pool adapter
|
2017-07-04 02:46:25 +03:00
|
|
|
pub fn new() -> PoolToChainAdapter {
|
2017-11-01 02:32:33 +03:00
|
|
|
PoolToChainAdapter {
|
|
|
|
chain: OneTime::new(),
|
|
|
|
}
|
2017-07-04 02:46:25 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_chain(&self, chain_ref: Arc<chain::Chain>) {
|
|
|
|
self.chain.init(chain_ref);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl pool::BlockChain for PoolToChainAdapter {
|
2018-01-17 06:03:40 +03:00
|
|
|
fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<(), pool::PoolError> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.chain
|
|
|
|
.borrow()
|
2018-01-17 06:03:40 +03:00
|
|
|
.is_unspent(output_ref)
|
2017-11-01 02:32:33 +03:00
|
|
|
.map_err(|e| match e {
|
2017-09-12 20:24:24 +03:00
|
|
|
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
|
|
|
|
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
|
|
|
|
_ => pool::PoolError::GenericPoolError,
|
2017-11-01 02:32:33 +03:00
|
|
|
})
|
2017-09-12 20:24:24 +03:00
|
|
|
}
|
|
|
|
|
2018-01-17 06:03:40 +03:00
|
|
|
fn is_matured(&self, input: &Input, height: u64) -> Result<(), pool::PoolError> {
|
2017-09-29 21:44:25 +03:00
|
|
|
self.chain
|
|
|
|
.borrow()
|
2018-01-17 06:03:40 +03:00
|
|
|
.is_matured(input, height)
|
|
|
|
.map_err(|e| match e {
|
|
|
|
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
|
|
|
|
_ => pool::PoolError::GenericPoolError,
|
|
|
|
})
|
|
|
|
}
|
2017-09-12 20:24:24 +03:00
|
|
|
|
|
|
|
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
|
2017-11-01 02:32:33 +03:00
|
|
|
self.chain
|
|
|
|
.borrow()
|
|
|
|
.head_header()
|
|
|
|
.map_err(|_| pool::PoolError::GenericPoolError)
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|