2016-12-19 02:51:54 +03:00
|
|
|
// Copyright 2016 The Grin Developers
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2017-02-19 05:42:34 +03:00
|
|
|
use std::net::SocketAddr;
|
2017-08-10 03:54:10 +03:00
|
|
|
use std::sync::{Arc, RwLock};
|
2017-02-08 00:52:17 +03:00
|
|
|
use std::thread;
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2016-12-21 04:39:02 +03:00
|
|
|
use chain::{self, ChainAdapter};
|
2017-06-10 22:51:33 +03:00
|
|
|
use core::core::{self, Output};
|
2017-09-12 20:24:24 +03:00
|
|
|
use core::core::block::BlockHeader;
|
2017-02-08 00:52:17 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
|
|
|
use core::core::target::Difficulty;
|
2017-08-10 03:54:10 +03:00
|
|
|
use p2p::{self, NetAdapter, Server, PeerStore, PeerData, State};
|
2017-06-10 22:51:33 +03:00
|
|
|
use pool;
|
|
|
|
use secp::pedersen::Commitment;
|
2016-12-21 04:39:02 +03:00
|
|
|
use util::OneTime;
|
2017-02-10 22:02:21 +03:00
|
|
|
use store;
|
2017-02-08 00:52:17 +03:00
|
|
|
use sync;
|
2017-10-12 19:56:44 +03:00
|
|
|
use util::LOGGER;
|
2017-09-29 21:44:25 +03:00
|
|
|
use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
|
2016-12-19 02:51:54 +03:00
|
|
|
|
2016-12-21 04:39:02 +03:00
|
|
|
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
|
|
|
|
/// blocks and transactions are received and forwards to the chain and pool
|
|
|
|
/// implementations.
|
2016-12-19 02:51:54 +03:00
|
|
|
pub struct NetToChainAdapter {
|
2017-07-04 02:46:25 +03:00
|
|
|
chain: Arc<chain::Chain>,
|
2017-02-19 05:42:34 +03:00
|
|
|
peer_store: Arc<PeerStore>,
|
2017-06-10 22:51:33 +03:00
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
2017-02-08 00:52:17 +03:00
|
|
|
|
|
|
|
syncer: OneTime<Arc<sync::Syncer>>,
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetAdapter for NetToChainAdapter {
|
2017-02-08 00:52:17 +03:00
|
|
|
fn total_difficulty(&self) -> Difficulty {
|
2017-07-04 02:46:25 +03:00
|
|
|
self.chain.total_difficulty()
|
2017-02-03 02:51:48 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2016-12-19 02:51:54 +03:00
|
|
|
fn transaction_received(&self, tx: core::Transaction) {
|
2017-06-10 22:51:33 +03:00
|
|
|
let source = pool::TxSource {
|
|
|
|
debug_name: "p2p".to_string(),
|
|
|
|
identifier: "?.?.?.?".to_string(),
|
|
|
|
};
|
|
|
|
if let Err(e) = self.tx_pool.write().unwrap().add_to_memory_pool(source, tx) {
|
2017-10-12 19:56:44 +03:00
|
|
|
error!(LOGGER, "Transaction rejected: {:?}", e);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2016-12-19 02:51:54 +03:00
|
|
|
fn block_received(&self, b: core::Block) {
|
2017-09-29 21:44:25 +03:00
|
|
|
let bhash = b.hash();
|
2017-10-12 19:56:44 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Received block {} from network, going to process.",
|
|
|
|
bhash
|
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2016-12-21 04:39:02 +03:00
|
|
|
// pushing the new block through the chain pipeline
|
2017-07-27 22:08:48 +03:00
|
|
|
let res = self.chain.process_block(b, self.chain_opts());
|
2016-12-21 04:39:02 +03:00
|
|
|
|
|
|
|
if let Err(e) = res {
|
2017-10-12 19:56:44 +03:00
|
|
|
debug!(LOGGER, "Block {} refused by chain: {:?}", bhash, e);
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
|
|
|
if self.syncer.borrow().syncing() {
|
2017-07-27 22:08:48 +03:00
|
|
|
self.syncer.borrow().block_received(bhash);
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn headers_received(&self, bhs: Vec<core::BlockHeader>) {
|
|
|
|
// try to add each header to our header chain
|
|
|
|
let mut added_hs = vec![];
|
|
|
|
for bh in bhs {
|
2017-07-04 02:46:25 +03:00
|
|
|
let res = self.chain.process_block_header(&bh, self.chain_opts());
|
2017-02-08 00:52:17 +03:00
|
|
|
match res {
|
|
|
|
Ok(_) => {
|
|
|
|
added_hs.push(bh.hash());
|
|
|
|
}
|
2017-04-28 08:05:12 +03:00
|
|
|
Err(chain::Error::Unfit(s)) => {
|
2017-09-29 21:44:25 +03:00
|
|
|
info!(
|
2017-10-12 19:56:44 +03:00
|
|
|
LOGGER,
|
2017-09-29 21:44:25 +03:00
|
|
|
"Received unfit block header {} at {}: {}.",
|
|
|
|
bh.hash(),
|
|
|
|
bh.height,
|
|
|
|
s
|
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
Err(chain::Error::StoreErr(e)) => {
|
2017-10-12 19:56:44 +03:00
|
|
|
error!(
|
|
|
|
LOGGER,
|
|
|
|
"Store error processing block header {}: {:?}",
|
|
|
|
bh.hash(),
|
|
|
|
e
|
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
Err(e) => {
|
2017-10-12 19:56:44 +03:00
|
|
|
info!(LOGGER, "Invalid block header {}: {:?}.", bh.hash(), e);
|
2017-02-08 00:52:17 +03:00
|
|
|
// TODO penalize peer somehow
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2017-10-12 19:56:44 +03:00
|
|
|
info!(
|
|
|
|
LOGGER,
|
|
|
|
"Added {} headers to the header chain.",
|
|
|
|
added_hs.len()
|
|
|
|
);
|
2017-02-08 00:52:17 +03:00
|
|
|
|
|
|
|
if self.syncer.borrow().syncing() {
|
|
|
|
self.syncer.borrow().headers_received(added_hs);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
|
|
|
|
if locator.len() == 0 {
|
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
|
|
|
|
// go through the locator vector and check if we know any of these headers
|
2017-07-04 02:46:25 +03:00
|
|
|
let known = self.chain.get_block_header(&locator[0]);
|
2017-02-08 00:52:17 +03:00
|
|
|
let header = match known {
|
|
|
|
Ok(header) => header,
|
2017-07-04 02:46:25 +03:00
|
|
|
Err(chain::Error::StoreErr(store::Error::NotFoundErr)) => {
|
2017-02-08 00:52:17 +03:00
|
|
|
return self.locate_headers(locator[1..].to_vec());
|
|
|
|
}
|
|
|
|
Err(e) => {
|
2017-10-12 19:56:44 +03:00
|
|
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
2017-02-08 00:52:17 +03:00
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// looks like we know one, getting as many following headers as allowed
|
|
|
|
let hh = header.height;
|
|
|
|
let mut headers = vec![];
|
|
|
|
for h in (hh + 1)..(hh + (p2p::MAX_BLOCK_HEADERS as u64)) {
|
2017-07-04 02:46:25 +03:00
|
|
|
let header = self.chain.get_header_by_height(h);
|
2017-02-08 00:52:17 +03:00
|
|
|
match header {
|
|
|
|
Ok(head) => headers.push(head),
|
2017-07-04 02:46:25 +03:00
|
|
|
Err(chain::Error::StoreErr(store::Error::NotFoundErr)) => break,
|
2017-02-08 00:52:17 +03:00
|
|
|
Err(e) => {
|
2017-10-12 19:56:44 +03:00
|
|
|
error!(LOGGER, "Could not build header locator: {:?}", e);
|
2017-02-08 00:52:17 +03:00
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
headers
|
|
|
|
}
|
|
|
|
|
2017-02-19 05:42:34 +03:00
|
|
|
/// Gets a full block by its hash.
|
2017-02-08 00:52:17 +03:00
|
|
|
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
2017-07-04 02:46:25 +03:00
|
|
|
let b = self.chain.get_block(&h);
|
2017-02-08 00:52:17 +03:00
|
|
|
match b {
|
|
|
|
Ok(b) => Some(b),
|
|
|
|
_ => None,
|
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
2017-02-19 05:42:34 +03:00
|
|
|
|
|
|
|
/// Find good peers we know with the provided capability and return their
|
|
|
|
/// addresses.
|
|
|
|
fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec<SocketAddr> {
|
2017-10-12 19:56:44 +03:00
|
|
|
let peers = self.peer_store
|
|
|
|
.find_peers(State::Healthy, capab, p2p::MAX_PEER_ADDRS as usize);
|
|
|
|
debug!(LOGGER, "Got {} peer addrs to send.", peers.len());
|
2017-02-19 05:42:34 +03:00
|
|
|
map_vec!(peers, |p| p.addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A list of peers has been received from one of our peers.
|
|
|
|
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
|
2017-10-12 19:56:44 +03:00
|
|
|
debug!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len());
|
2017-02-19 05:42:34 +03:00
|
|
|
for pa in peer_addrs {
|
|
|
|
if let Ok(e) = self.peer_store.exists_peer(pa) {
|
|
|
|
if e {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let peer = PeerData {
|
|
|
|
addr: pa,
|
|
|
|
capabilities: p2p::UNKNOWN,
|
|
|
|
user_agent: "".to_string(),
|
|
|
|
flags: State::Healthy,
|
|
|
|
};
|
|
|
|
if let Err(e) = self.peer_store.save_peer(&peer) {
|
2017-10-12 19:56:44 +03:00
|
|
|
error!(LOGGER, "Could not save received peer address: {:?}", e);
|
2017-02-19 05:42:34 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Network successfully connected to a peer.
|
|
|
|
fn peer_connected(&self, pi: &p2p::PeerInfo) {
|
2017-10-12 19:56:44 +03:00
|
|
|
debug!(LOGGER, "Saving newly connected peer {}.", pi.addr);
|
2017-02-19 05:42:34 +03:00
|
|
|
let peer = PeerData {
|
|
|
|
addr: pi.addr,
|
|
|
|
capabilities: pi.capabilities,
|
|
|
|
user_agent: pi.user_agent.clone(),
|
|
|
|
flags: State::Healthy,
|
|
|
|
};
|
|
|
|
if let Err(e) = self.peer_store.save_peer(&peer) {
|
2017-10-12 19:56:44 +03:00
|
|
|
error!(LOGGER, "Could not save connected peer: {:?}", e);
|
2017-02-19 05:42:34 +03:00
|
|
|
}
|
|
|
|
}
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetToChainAdapter {
|
2017-10-12 19:56:44 +03:00
|
|
|
pub fn new(chain_ref: Arc<chain::Chain>,
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
|
|
|
peer_store: Arc<PeerStore>)
|
|
|
|
-> NetToChainAdapter {
|
2016-12-21 04:39:02 +03:00
|
|
|
NetToChainAdapter {
|
2017-07-04 02:46:25 +03:00
|
|
|
chain: chain_ref,
|
2017-02-19 05:42:34 +03:00
|
|
|
peer_store: peer_store,
|
2017-06-10 22:51:33 +03:00
|
|
|
tx_pool: tx_pool,
|
2017-02-08 00:52:17 +03:00
|
|
|
syncer: OneTime::new(),
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
/// Start syncing the chain by instantiating and running the Syncer in the
|
|
|
|
/// background (a new thread is created).
|
2017-02-08 00:52:17 +03:00
|
|
|
pub fn start_sync(&self, sync: sync::Syncer) {
|
|
|
|
let arc_sync = Arc::new(sync);
|
|
|
|
self.syncer.init(arc_sync.clone());
|
2017-10-12 19:56:44 +03:00
|
|
|
let spawn_result = thread::Builder::new()
|
|
|
|
.name("syncer".to_string())
|
|
|
|
.spawn(move || {
|
2017-09-29 21:44:25 +03:00
|
|
|
let sync_run_result = arc_sync.run();
|
|
|
|
match sync_run_result {
|
|
|
|
Ok(_) => {}
|
|
|
|
Err(_) => {}
|
|
|
|
}
|
2017-10-12 19:56:44 +03:00
|
|
|
});
|
2017-08-10 03:54:10 +03:00
|
|
|
match spawn_result {
|
|
|
|
Ok(_) => {}
|
|
|
|
Err(_) => {}
|
|
|
|
}
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
|
|
|
|
/// Prepare options for the chain pipeline
|
|
|
|
fn chain_opts(&self) -> chain::Options {
|
2017-08-09 19:40:23 +03:00
|
|
|
let opts = if self.syncer.borrow().syncing() {
|
2017-06-19 18:59:56 +03:00
|
|
|
chain::SYNC
|
|
|
|
} else {
|
|
|
|
chain::NONE
|
|
|
|
};
|
2017-09-29 21:44:25 +03:00
|
|
|
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
2017-08-09 19:40:23 +03:00
|
|
|
let opts = match *param_ref {
|
|
|
|
MiningParameterMode::AutomatedTesting => opts | chain::EASY_POW,
|
|
|
|
MiningParameterMode::UserTesting => opts | chain::EASY_POW,
|
|
|
|
MiningParameterMode::Production => opts,
|
|
|
|
};
|
2017-06-19 18:59:56 +03:00
|
|
|
opts
|
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Implementation of the ChainAdapter for the network. Gets notified when the
|
2017-06-10 22:51:33 +03:00
|
|
|
/// blockchain accepted a new block, asking the pool to update its state and
|
|
|
|
/// the network to broadcast the block
|
|
|
|
pub struct ChainToPoolAndNetAdapter {
|
|
|
|
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
2016-12-21 04:39:02 +03:00
|
|
|
p2p: OneTime<Arc<Server>>,
|
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
impl ChainAdapter for ChainToPoolAndNetAdapter {
|
2016-12-21 04:39:02 +03:00
|
|
|
fn block_accepted(&self, b: &core::Block) {
|
2017-06-10 22:51:33 +03:00
|
|
|
{
|
|
|
|
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
|
2017-09-29 21:44:25 +03:00
|
|
|
error!(
|
2017-10-12 19:56:44 +03:00
|
|
|
LOGGER,
|
2017-09-29 21:44:25 +03:00
|
|
|
"Pool could not update itself at block {}: {:?}",
|
|
|
|
b.hash(),
|
|
|
|
e
|
|
|
|
);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
self.p2p.borrow().broadcast_block(b);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-10 22:51:33 +03:00
|
|
|
impl ChainToPoolAndNetAdapter {
|
2017-10-12 19:56:44 +03:00
|
|
|
pub fn new(tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>)
|
|
|
|
-> ChainToPoolAndNetAdapter {
|
2017-06-10 22:51:33 +03:00
|
|
|
ChainToPoolAndNetAdapter {
|
|
|
|
tx_pool: tx_pool,
|
|
|
|
p2p: OneTime::new(),
|
|
|
|
}
|
2016-12-21 04:39:02 +03:00
|
|
|
}
|
|
|
|
pub fn init(&self, p2p: Arc<Server>) {
|
|
|
|
self.p2p.init(p2p);
|
2016-12-19 02:51:54 +03:00
|
|
|
}
|
|
|
|
}
|
2017-06-10 22:51:33 +03:00
|
|
|
|
|
|
|
/// Implements the view of the blockchain required by the TransactionPool to
|
2017-07-04 02:46:25 +03:00
|
|
|
/// operate. Mostly needed to break any direct lifecycle or implementation
|
|
|
|
/// dependency between the pool and the chain.
|
2017-06-10 22:51:33 +03:00
|
|
|
#[derive(Clone)]
|
|
|
|
pub struct PoolToChainAdapter {
|
2017-07-04 02:46:25 +03:00
|
|
|
chain: OneTime<Arc<chain::Chain>>,
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl PoolToChainAdapter {
|
|
|
|
/// Create a new pool adapter
|
2017-07-04 02:46:25 +03:00
|
|
|
pub fn new() -> PoolToChainAdapter {
|
|
|
|
PoolToChainAdapter { chain: OneTime::new() }
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_chain(&self, chain_ref: Arc<chain::Chain>) {
|
|
|
|
self.chain.init(chain_ref);
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl pool::BlockChain for PoolToChainAdapter {
|
2017-09-12 20:24:24 +03:00
|
|
|
fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, pool::PoolError> {
|
2017-10-12 19:56:44 +03:00
|
|
|
self.chain
|
|
|
|
.borrow()
|
|
|
|
.get_unspent(output_ref)
|
|
|
|
.map_err(|e| match e {
|
2017-09-12 20:24:24 +03:00
|
|
|
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
|
|
|
|
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
|
|
|
|
_ => pool::PoolError::GenericPoolError,
|
2017-10-12 19:56:44 +03:00
|
|
|
})
|
2017-09-12 20:24:24 +03:00
|
|
|
}
|
|
|
|
|
2017-10-12 19:56:44 +03:00
|
|
|
fn get_block_header_by_output_commit(&self,
|
|
|
|
commit: &Commitment)
|
|
|
|
-> Result<BlockHeader, pool::PoolError> {
|
2017-09-29 21:44:25 +03:00
|
|
|
self.chain
|
|
|
|
.borrow()
|
|
|
|
.get_block_header_by_output_commit(commit)
|
2017-09-12 20:24:24 +03:00
|
|
|
.map_err(|_| pool::PoolError::GenericPoolError)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
|
2017-10-12 19:56:44 +03:00
|
|
|
self.chain
|
|
|
|
.borrow()
|
|
|
|
.head_header()
|
|
|
|
.map_err(|_| pool::PoolError::GenericPoolError)
|
2017-06-10 22:51:33 +03:00
|
|
|
}
|
|
|
|
}
|