grin/p2p/src/peer.rs

533 lines
14 KiB
Rust
Raw Normal View History

// Copyright 2018 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
use std::fs::File;
use std::net::{SocketAddr, TcpStream};
use std::sync::Arc;
use util::{Mutex, RwLock};
use chrono::prelude::{DateTime, Utc};
use conn;
use core::core::hash::{Hash, Hashed};
use core::pow::Difficulty;
use core::{core, global};
use handshake::Handshake;
use msg::{self, BanReason, GetPeerAddrs, Locator, Ping, TxHashSetRequest};
use protocol::Protocol;
use types::{
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerInfo, ReasonForBan, TxHashSetRead,
};
const MAX_TRACK_SIZE: usize = 30;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
/// Remind: don't mix up this 'State' with that 'State' in p2p/src/store.rs,
/// which has different 3 states: {Healthy, Banned, Defunct}.
/// For example: 'Disconnected' state here could still be 'Healthy' and could reconnect in next loop.
enum State {
Connected,
Disconnected,
Banned,
// Banned from Peers side, by ban_peer().
// This could happen when error in block (or compact block) received, header(s) received,
// or txhashset received.
}
pub struct Peer {
pub info: PeerInfo,
state: Arc<RwLock<State>>,
// set of all hashes known to this peer (so no need to send)
tracking_adapter: TrackingAdapter,
connection: Option<Mutex<conn::Tracker>>,
}
impl Peer {
// Only accept and connect can be externally used to build a peer
fn new(info: PeerInfo, adapter: Arc<NetAdapter>) -> Peer {
Peer {
info,
state: Arc::new(RwLock::new(State::Connected)),
tracking_adapter: TrackingAdapter::new(adapter),
connection: None,
}
}
pub fn accept(
conn: &mut TcpStream,
capab: Capabilities,
total_difficulty: Difficulty,
hs: &Handshake,
adapter: Arc<NetAdapter>,
) -> Result<Peer, Error> {
let info = hs.accept(capab, total_difficulty, conn)?;
Ok(Peer::new(info, adapter))
}
pub fn connect(
conn: &mut TcpStream,
capab: Capabilities,
total_difficulty: Difficulty,
self_addr: SocketAddr,
hs: &Handshake,
na: Arc<NetAdapter>,
) -> Result<Peer, Error> {
let info = hs.initiate(capab, total_difficulty, self_addr, conn)?;
Ok(Peer::new(info, na))
}
/// Main peer loop listening for messages and forwarding to the rest of the
/// system.
pub fn start(&mut self, conn: TcpStream) {
let addr = self.info.addr;
let adapter = Arc::new(self.tracking_adapter.clone());
let handler = Protocol::new(adapter, addr);
self.connection = Some(Mutex::new(conn::listen(conn, handler)));
}
pub fn is_denied(config: &P2PConfig, peer_addr: &SocketAddr) -> bool {
let peer = format!("{}:{}", peer_addr.ip(), peer_addr.port());
if let Some(ref denied) = config.peers_deny {
if denied.contains(&peer) {
2018-03-04 03:19:54 +03:00
debug!(
"checking peer allowed/denied: {:?} explicitly denied",
peer_addr
2018-03-04 03:19:54 +03:00
);
return true;
}
}
if let Some(ref allowed) = config.peers_allow {
if allowed.contains(&peer) {
2018-03-04 03:19:54 +03:00
debug!(
"checking peer allowed/denied: {:?} explicitly allowed",
peer_addr
2018-03-04 03:19:54 +03:00
);
return false;
} else {
2018-03-04 03:19:54 +03:00
debug!(
"checking peer allowed/denied: {:?} not explicitly allowed, denying",
peer_addr
2018-03-04 03:19:54 +03:00
);
return true;
}
}
2018-03-04 03:19:54 +03:00
// default to allowing peer connection if we do not explicitly allow or deny
// the peer
false
}
/// Whether this peer is still connected.
pub fn is_connected(&self) -> bool {
self.check_connection()
}
/// Whether this peer has been banned.
pub fn is_banned(&self) -> bool {
State::Banned == *self.state.read()
}
/// Whether this peer is stuck on sync.
pub fn is_stuck(&self) -> (bool, Difficulty) {
let peer_live_info = self.info.live_info.read();
let now = Utc::now().timestamp_millis();
// if last updated difficulty is 2 hours ago, we're sure this peer is a stuck node.
if now > peer_live_info.stuck_detector.timestamp_millis() + global::STUCK_PEER_KICK_TIME {
(true, peer_live_info.total_difficulty)
} else {
(false, peer_live_info.total_difficulty)
}
}
/// Number of bytes sent to the peer
pub fn sent_bytes(&self) -> Option<u64> {
if let Some(ref tracker) = self.connection {
let conn = tracker.lock();
let sent_bytes = conn.sent_bytes.read();
return Some(*sent_bytes);
}
None
}
/// Number of bytes received from the peer
pub fn received_bytes(&self) -> Option<u64> {
if let Some(ref tracker) = self.connection {
let conn = tracker.lock();
let received_bytes = conn.received_bytes.read();
return Some(*received_bytes);
}
None
}
/// Set this peer status to banned
pub fn set_banned(&self) {
*self.state.write() = State::Banned;
}
2018-03-04 03:19:54 +03:00
/// Send a ping to the remote peer, providing our local difficulty and
/// height
pub fn send_ping(&self, total_difficulty: Difficulty, height: u64) -> Result<(), Error> {
2018-03-04 03:19:54 +03:00
let ping_msg = Ping {
total_difficulty,
height,
};
self.connection
.as_ref()
.unwrap()
.lock()
2018-03-04 03:19:54 +03:00
.send(ping_msg, msg::Type::Ping)
}
/// Send the ban reason before banning
pub fn send_ban_reason(&self, ban_reason: ReasonForBan) {
let ban_reason_msg = BanReason { ban_reason };
match self
.connection
.as_ref()
.unwrap()
.lock()
.send(ban_reason_msg, msg::Type::BanReason)
{
Ok(_) => debug!("Sent ban reason {:?} to {}", ban_reason, self.info.addr),
Err(e) => error!(
"Could not send ban reason {:?} to {}: {:?}",
ban_reason, self.info.addr, e
),
};
}
2016-12-21 04:33:20 +03:00
/// Sends the provided block to the remote peer. The request may be dropped
/// if the remote peer is known to already have the block.
pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> {
if !self.tracking_adapter.has(b.hash()) {
trace!("Send block {} to {}", b.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
.lock()
.send(b, msg::Type::Block)?;
Ok(true)
} else {
debug!(
"Suppress block send {} to {} (already seen)",
b.hash(),
self.info.addr,
);
Ok(false)
}
}
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> {
if !self.tracking_adapter.has(b.hash()) {
trace!("Send compact block {} to {}", b.hash(), self.info.addr);
2018-03-04 03:19:54 +03:00
self.connection
.as_ref()
.unwrap()
.lock()
.send(b, msg::Type::CompactBlock)?;
Ok(true)
} else {
debug!(
"Suppress compact block send {} to {} (already seen)",
b.hash(),
self.info.addr,
);
Ok(false)
}
}
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<bool, Error> {
if !self.tracking_adapter.has(bh.hash()) {
debug!("Send header {} to {}", bh.hash(), self.info.addr);
2018-03-04 03:19:54 +03:00
self.connection
.as_ref()
.unwrap()
.lock()
.send(bh, msg::Type::Header)?;
Ok(true)
} else {
debug!(
"Suppress header send {} to {} (already seen)",
bh.hash(),
self.info.addr,
);
Ok(false)
}
2016-12-21 04:33:20 +03:00
}
/// Sends the provided transaction to the remote peer. The request may be
/// dropped if the remote peer is known to already have the transaction.
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<bool, Error> {
if !self.tracking_adapter.has(tx.hash()) {
debug!("Send tx {} to {}", tx.hash(), self.info.addr);
2018-03-04 03:19:54 +03:00
self.connection
.as_ref()
.unwrap()
.lock()
.send(tx, msg::Type::Transaction)?;
Ok(true)
} else {
2018-03-04 03:19:54 +03:00
debug!(
"Not sending tx {} to {} (already seen)",
tx.hash(),
self.info.addr
);
Ok(false)
}
}
Minimal Transaction Pool (#1067) * verify a tx like we verify a block (experimental) * first minimal_pool test up and running but not testing what we need to * rework tx_pool validation to use txhashset extension * minimal tx pool wired up but rough * works locally (rough statew though) delete "legacy" pool and graph code * rework the new pool into TransactionPool and Pool impls * rework pool to store pool entries with associated timer and source etc. * all_transactions * extra_txs so we can validate stempool against existing txpool * rework reconcile_block * txhashset apply_raw_tx can now rewind to a checkpoint (prev raw tx) * wip - txhashset tx tests * more flexible rewind on MMRs * add tests to cover apply_raw_txs on txhashset extension * add_to_stempool and add_to_txpool * deaggregate multi kernel tx when adding to txpoool * handle freshness in stempool handle propagation of stempool txs via dandelion monitor * patience timer and fluff if we cannot propagate to next relay * aggregate and fluff stempool is we have no relay * refactor coinbase maturity * rewrote basic tx pool tests to use a real txhashset via chain adapter * rework dandelion monitor to reflect recent discussion works locally but needs a cleanup * refactor dandelion_monitor - split out phases * more pool test coverage * remove old test code from pool (still wip) * block_building and block_reconciliation tests * tracked down chain test failure... * fix test_coinbase_maturity * dandelion_monitor now runs... * refactor dandelion config, shared across p2p and pool components * fix pool tests with new config * fix p2p tests * rework tx pool to deal with duplicate commitments (testnet2 limitation) * cleanup and address some PR feedback * add big comment about pre_tx...
2018-05-30 23:57:13 +03:00
/// Sends the provided stem transaction to the remote peer.
/// Note: tracking adapter is ignored for stem transactions (while under
/// embargo).
pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
debug!("Send (stem) tx {} to {}", tx.hash(), self.info.addr);
Minimal Transaction Pool (#1067) * verify a tx like we verify a block (experimental) * first minimal_pool test up and running but not testing what we need to * rework tx_pool validation to use txhashset extension * minimal tx pool wired up but rough * works locally (rough statew though) delete "legacy" pool and graph code * rework the new pool into TransactionPool and Pool impls * rework pool to store pool entries with associated timer and source etc. * all_transactions * extra_txs so we can validate stempool against existing txpool * rework reconcile_block * txhashset apply_raw_tx can now rewind to a checkpoint (prev raw tx) * wip - txhashset tx tests * more flexible rewind on MMRs * add tests to cover apply_raw_txs on txhashset extension * add_to_stempool and add_to_txpool * deaggregate multi kernel tx when adding to txpoool * handle freshness in stempool handle propagation of stempool txs via dandelion monitor * patience timer and fluff if we cannot propagate to next relay * aggregate and fluff stempool is we have no relay * refactor coinbase maturity * rewrote basic tx pool tests to use a real txhashset via chain adapter * rework dandelion monitor to reflect recent discussion works locally but needs a cleanup * refactor dandelion_monitor - split out phases * more pool test coverage * remove old test code from pool (still wip) * block_building and block_reconciliation tests * tracked down chain test failure... * fix test_coinbase_maturity * dandelion_monitor now runs... * refactor dandelion config, shared across p2p and pool components * fix pool tests with new config * fix p2p tests * rework tx pool to deal with duplicate commitments (testnet2 limitation) * cleanup and address some PR feedback * add big comment about pre_tx...
2018-05-30 23:57:13 +03:00
self.connection
.as_ref()
.unwrap()
.lock()
Minimal Transaction Pool (#1067) * verify a tx like we verify a block (experimental) * first minimal_pool test up and running but not testing what we need to * rework tx_pool validation to use txhashset extension * minimal tx pool wired up but rough * works locally (rough statew though) delete "legacy" pool and graph code * rework the new pool into TransactionPool and Pool impls * rework pool to store pool entries with associated timer and source etc. * all_transactions * extra_txs so we can validate stempool against existing txpool * rework reconcile_block * txhashset apply_raw_tx can now rewind to a checkpoint (prev raw tx) * wip - txhashset tx tests * more flexible rewind on MMRs * add tests to cover apply_raw_txs on txhashset extension * add_to_stempool and add_to_txpool * deaggregate multi kernel tx when adding to txpoool * handle freshness in stempool handle propagation of stempool txs via dandelion monitor * patience timer and fluff if we cannot propagate to next relay * aggregate and fluff stempool is we have no relay * refactor coinbase maturity * rewrote basic tx pool tests to use a real txhashset via chain adapter * rework dandelion monitor to reflect recent discussion works locally but needs a cleanup * refactor dandelion_monitor - split out phases * more pool test coverage * remove old test code from pool (still wip) * block_building and block_reconciliation tests * tracked down chain test failure... * fix test_coinbase_maturity * dandelion_monitor now runs... * refactor dandelion config, shared across p2p and pool components * fix pool tests with new config * fix p2p tests * rework tx pool to deal with duplicate commitments (testnet2 limitation) * cleanup and address some PR feedback * add big comment about pre_tx...
2018-05-30 23:57:13 +03:00
.send(tx, msg::Type::StemTransaction)?;
Ok(())
}
/// Sends a request for block headers from the provided block locator
pub fn send_header_request(&self, locator: Vec<Hash>) -> Result<(), Error> {
2018-03-04 03:19:54 +03:00
self.connection
.as_ref()
.unwrap()
.lock()
2018-03-04 03:19:54 +03:00
.send(&Locator { hashes: locator }, msg::Type::GetHeaders)
}
/// Sends a request for a specific block by hash
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
debug!("Requesting block {} from peer {}.", h, self.info.addr);
2018-03-04 03:19:54 +03:00
self.connection
.as_ref()
.unwrap()
.lock()
2018-03-04 03:19:54 +03:00
.send(&h, msg::Type::GetBlock)
}
/// Sends a request for a specific compact block by hash
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
debug!("Requesting compact block {} from {}", h, self.info.addr);
2018-03-04 03:19:54 +03:00
self.connection
.as_ref()
.unwrap()
.lock()
2018-03-04 03:19:54 +03:00
.send(&h, msg::Type::GetCompactBlock)
}
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
debug!("Asking {} for more peers.", self.info.addr);
self.connection.as_ref().unwrap().lock().send(
&GetPeerAddrs {
capabilities: capab,
},
2018-03-04 03:19:54 +03:00
msg::Type::GetPeerAddrs,
)
}
pub fn send_txhashset_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
2018-03-04 03:19:54 +03:00
debug!(
"Asking {} for txhashset archive at {} {}.",
self.info.addr, height, hash
2018-03-04 03:19:54 +03:00
);
self.connection.as_ref().unwrap().lock().send(
&TxHashSetRequest { hash, height },
msg::Type::TxHashSetRequest,
2018-03-04 03:19:54 +03:00
)
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
}
/// Stops the peer, closing its connection
pub fn stop(&self) {
let _ = self
.connection
.as_ref()
.unwrap()
.lock()
.close_channel
.send(());
}
fn check_connection(&self) -> bool {
match self
.connection
.as_ref()
.unwrap()
.lock()
.error_channel
.try_recv()
{
Ok(Error::Serialization(e)) => {
let need_stop = {
let mut state = self.state.write();
if State::Banned != *state {
*state = State::Disconnected;
true
} else {
false
}
};
if need_stop {
debug!(
"Client {} corrupted, will disconnect ({:?}).",
self.info.addr, e
);
self.stop();
}
false
}
Ok(e) => {
let need_stop = {
let mut state = self.state.write();
if State::Disconnected != *state {
*state = State::Disconnected;
true
} else {
false
}
};
if need_stop {
debug!("Client {} connection lost: {:?}", self.info.addr, e);
self.stop();
}
false
}
Err(_) => {
let state = self.state.read();
State::Connected == *state
}
}
}
}
/// Adapter implementation that forwards everything to an underlying adapter
/// but keeps track of the block and transaction hashes that were received.
#[derive(Clone)]
struct TrackingAdapter {
adapter: Arc<NetAdapter>,
known: Arc<RwLock<Vec<Hash>>>,
}
impl TrackingAdapter {
fn new(adapter: Arc<NetAdapter>) -> TrackingAdapter {
TrackingAdapter {
adapter: adapter,
known: Arc::new(RwLock::new(vec![])),
}
}
fn has(&self, hash: Hash) -> bool {
let known = self.known.read();
// may become too slow, an ordered set (by timestamp for eviction) may
// end up being a better choice
known.contains(&hash)
}
fn push(&self, hash: Hash) {
let mut known = self.known.write();
if known.len() > MAX_TRACK_SIZE {
known.truncate(MAX_TRACK_SIZE);
}
known.insert(0, hash);
}
}
impl ChainAdapter for TrackingAdapter {
fn total_difficulty(&self) -> Difficulty {
self.adapter.total_difficulty()
}
fn total_height(&self) -> u64 {
self.adapter.total_height()
}
fn transaction_received(&self, tx: core::Transaction, stem: bool) {
Minimal Transaction Pool (#1067) * verify a tx like we verify a block (experimental) * first minimal_pool test up and running but not testing what we need to * rework tx_pool validation to use txhashset extension * minimal tx pool wired up but rough * works locally (rough statew though) delete "legacy" pool and graph code * rework the new pool into TransactionPool and Pool impls * rework pool to store pool entries with associated timer and source etc. * all_transactions * extra_txs so we can validate stempool against existing txpool * rework reconcile_block * txhashset apply_raw_tx can now rewind to a checkpoint (prev raw tx) * wip - txhashset tx tests * more flexible rewind on MMRs * add tests to cover apply_raw_txs on txhashset extension * add_to_stempool and add_to_txpool * deaggregate multi kernel tx when adding to txpoool * handle freshness in stempool handle propagation of stempool txs via dandelion monitor * patience timer and fluff if we cannot propagate to next relay * aggregate and fluff stempool is we have no relay * refactor coinbase maturity * rewrote basic tx pool tests to use a real txhashset via chain adapter * rework dandelion monitor to reflect recent discussion works locally but needs a cleanup * refactor dandelion_monitor - split out phases * more pool test coverage * remove old test code from pool (still wip) * block_building and block_reconciliation tests * tracked down chain test failure... * fix test_coinbase_maturity * dandelion_monitor now runs... * refactor dandelion config, shared across p2p and pool components * fix pool tests with new config * fix p2p tests * rework tx pool to deal with duplicate commitments (testnet2 limitation) * cleanup and address some PR feedback * add big comment about pre_tx...
2018-05-30 23:57:13 +03:00
// Do not track the tx hash for stem txs.
// Otherwise we fail to handle the subsequent fluff or embargo expiration
// correctly.
if !stem {
self.push(tx.hash());
}
self.adapter.transaction_received(tx, stem)
}
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
self.push(b.hash());
self.adapter.block_received(b, addr)
}
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
self.push(cb.hash());
self.adapter.compact_block_received(cb, addr)
}
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
self.push(bh.hash());
self.adapter.header_received(bh, addr)
}
fn headers_received(&self, bh: Vec<core::BlockHeader>, addr: SocketAddr) -> bool {
self.adapter.headers_received(bh, addr)
}
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
self.adapter.locate_headers(locator)
}
fn get_block(&self, h: Hash) -> Option<core::Block> {
self.adapter.get_block(h)
}
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
fn txhashset_read(&self, h: Hash) -> Option<TxHashSetRead> {
self.adapter.txhashset_read(h)
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
}
fn txhashset_receive_ready(&self) -> bool {
self.adapter.txhashset_receive_ready()
}
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
self.adapter.txhashset_write(h, txhashset_data, peer_addr)
[WIP] Abridged sync (#440) * Util to zip and unzip directories * First pass at sumtree request/response. Add message types, implement the exchange in the protocol, zip up the sumtree directory and stream the file over, with necessary adapter hooks. * Implement the sumtree archive receive logicGets the sumtree archive data stream from the network and write it to a file. Unzip the file, place it at the right spot and reconstruct the sumtree data structure, rewinding where to the right spot. * Sumtree hash structure validation * Simplify sumtree backend buffering logic. The backend for a sumtree has to implement some in-memory buffering logic to provide a commit/rollback interface. The backend itself is an aggregate of 3 underlying storages (an append only file, a remove log and a skip list). The buffering was previously implemented both by the backend and some of the underlying storages. Now pushing back all buffering logic to the storages to keep the backend simpler. * Add kernel append only store file to sumtrees. The chain sumtrees structure now also saves all kernels to a dedicated file. As that storage is implemented by the append only file wrapper, it's also rewind-aware. * Full state validation. Checks that: - MMRs are sane (hash and sum each node) - Tree roots match the corresponding header - Kernel signatures are valid - Sum of all kernel excesses equals the sum of UTXO commitments minus the supply * Fast sync handoff to body sync. Once the fast-sync state is fully setup, get bacj in body sync mode to get the full bodies of the last blocks we're missing. * First fully working fast sync * Facility in p2p conn to deal with attachments (raw binary after message). * Re-introduced sumtree send and receive message handling using the above. * Fixed test and finished updating all required db state after sumtree validation. * Massaged a little bit the pipeline orphan check to still work after the new sumtrees have been setup. * Various cleanup. Consolidated fast sync and full sync into a single function as they're very similar. Proper conditions to trigger a sumtree request and some checks on receiving it.
2018-02-10 01:32:16 +03:00
}
fn txhashset_download_update(
&self,
start_time: DateTime<Utc>,
downloaded_size: u64,
total_size: u64,
) -> bool {
self.adapter
.txhashset_download_update(start_time, downloaded_size, total_size)
}
}
impl NetAdapter for TrackingAdapter {
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
self.adapter.find_peer_addrs(capab)
}
fn peer_addrs_received(&self, addrs: Vec<SocketAddr>) {
self.adapter.peer_addrs_received(addrs)
}
2018-03-04 03:19:54 +03:00
fn peer_difficulty(&self, addr: SocketAddr, diff: Difficulty, height: u64) {
self.adapter.peer_difficulty(addr, diff, height)
}
fn is_banned(&self, addr: SocketAddr) -> bool {
self.adapter.is_banned(addr)
}
}