2019-10-02 11:40:20 +03:00
|
|
|
// Copyright 2019 The Grin Developers
|
2016-10-25 07:35:10 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::util::{Mutex, RwLock};
|
2019-03-20 16:08:56 +03:00
|
|
|
use std::fmt;
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
2019-05-14 19:17:38 +03:00
|
|
|
use std::io::Read;
|
2019-02-18 15:15:32 +03:00
|
|
|
use std::net::{Shutdown, TcpStream};
|
2019-04-23 02:54:36 +03:00
|
|
|
use std::path::PathBuf;
|
2019-08-01 19:46:06 +03:00
|
|
|
use std::sync::atomic::{AtomicBool, Ordering};
|
2018-10-20 03:13:07 +03:00
|
|
|
use std::sync::Arc;
|
2016-12-16 01:57:04 +03:00
|
|
|
|
2019-10-10 11:38:25 +03:00
|
|
|
use lru_cache::LruCache;
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
use crate::chain;
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::conn;
|
|
|
|
use crate::core::core::hash::{Hash, Hashed};
|
|
|
|
use crate::core::pow::Difficulty;
|
2019-05-03 17:56:25 +03:00
|
|
|
use crate::core::ser::Writeable;
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::core::{core, global};
|
|
|
|
use crate::handshake::Handshake;
|
2019-05-14 19:17:38 +03:00
|
|
|
use crate::msg::{
|
2019-10-07 18:22:05 +03:00
|
|
|
self, BanReason, GetPeerAddrs, KernelDataRequest, Locator, Msg, Ping, TxHashSetRequest, Type,
|
2019-05-14 19:17:38 +03:00
|
|
|
};
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::protocol::Protocol;
|
|
|
|
use crate::types::{
|
2019-02-18 15:15:32 +03:00
|
|
|
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerAddr, PeerInfo, ReasonForBan,
|
|
|
|
TxHashSetRead,
|
2018-08-17 05:30:05 +03:00
|
|
|
};
|
2018-12-08 02:59:40 +03:00
|
|
|
use chrono::prelude::{DateTime, Utc};
|
2016-10-25 07:35:10 +03:00
|
|
|
|
2017-10-26 20:48:51 +03:00
|
|
|
const MAX_TRACK_SIZE: usize = 30;
|
2018-11-08 01:15:12 +03:00
|
|
|
const MAX_PEER_MSG_PER_MIN: u64 = 500;
|
2017-10-26 20:48:51 +03:00
|
|
|
|
2017-02-28 01:17:53 +03:00
|
|
|
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
2018-09-13 19:53:35 +03:00
|
|
|
/// Remind: don't mix up this 'State' with that 'State' in p2p/src/store.rs,
|
|
|
|
/// which has different 3 states: {Healthy, Banned, Defunct}.
|
|
|
|
/// For example: 'Disconnected' state here could still be 'Healthy' and could reconnect in next loop.
|
2017-02-27 07:08:40 +03:00
|
|
|
enum State {
|
|
|
|
Connected,
|
|
|
|
Banned,
|
|
|
|
}
|
|
|
|
|
2016-10-29 22:36:45 +03:00
|
|
|
pub struct Peer {
|
2017-02-08 00:52:17 +03:00
|
|
|
pub info: PeerInfo,
|
2017-02-27 07:08:40 +03:00
|
|
|
state: Arc<RwLock<State>>,
|
2017-10-26 20:48:51 +03:00
|
|
|
// set of all hashes known to this peer (so no need to send)
|
|
|
|
tracking_adapter: TrackingAdapter,
|
2019-05-15 18:51:35 +03:00
|
|
|
tracker: Arc<conn::Tracker>,
|
|
|
|
send_handle: Mutex<conn::ConnHandle>,
|
|
|
|
// we need a special lock for stop operation, can't reuse handle mutex for that
|
|
|
|
// because it may be locked by different reasons, so we should wait for that, close
|
|
|
|
// mutex can be taken only during shutdown, it happens once
|
|
|
|
stop_handle: Mutex<conn::StopHandle>,
|
2019-08-01 19:46:06 +03:00
|
|
|
// Whether or not we requested a txhashset from this peer
|
|
|
|
state_sync_requested: Arc<AtomicBool>,
|
[1.1.0] Merge master into 1.1.0 (#2720)
* cleanup legacy "3 dot" check (#2625)
* Allow to peers behind NAT to get up to preferred_max connections (#2543)
Allow to peers behind NAT to get up to preffered_max connections
If peer has only outbound connections it's mot likely behind NAT and we should not stop it from getting more outbound connections
* Reduce usage of unwrap in p2p crate (#2627)
Also change store crate a bit
* Simplify (and fix) output_pos cleanup during chain compaction (#2609)
* expose leaf pos iterator
use it for various things in txhashset when iterating over outputs
* fix
* cleanup
* rebuild output_pos index (and clear it out first) when compacting the chain
* fixup tests
* refactor to match on (output, proof) tuple
* add comments to compact() to explain what is going on.
* get rid of some boxing around the leaf_set iterator
* cleanup
* [docs] Add switch commitment documentation (#2526)
* remove references to no-longer existing switch commitment hash
(as switch commitments were removed in ca8447f3bd49e80578770da841e5fbbac2c23cde
and moved into the blinding factor of the Pedersen Commitment)
* some rewording (points vs curves) and fix of small formatting issues
* Add switch commitment documentation
* [docs] Documents in grin repo had translated in Korean. (#2604)
* Start to M/W intro translate in Korean
* translate in Korean
* add korean translation on intro
* table_of_content.md translate in Korean.
* table_of_content_KR.md finish translate in Korean, start to translate State_KR.md
* add state_KR.md & commit some translation in State_KR.md
* WIP stat_KR.md translation
* add build_KR.md && stratum_KR.md
* finish translate stratum_KR.md & table_of_content_KR.md
* rename intro.KR.md to intro_KR.md
* add intro_KR.md file path each language's intro.md
* add Korean translation file path to stratum.md & table_of_contents.md
* fix difference with grin/master
* Fix TxHashSet file filter for Windows. (#2641)
* Fix TxHashSet file filter for Windows.
* rustfmt
* Updating regexp
* Adding in test case
* Display the current download rate rather than the average when syncing the chain (#2633)
* When syncing the chain, calculate the displayed download speed using the current rate from the most recent iteration, rather than the average download speed from the entire syncing process.
* Replace the explicitly ignored variables in the pattern with an implicit ignore
* remove root = true from editorconfig (#2655)
* Add Medium post to intro (#2654)
Spoke to @yeastplume who agreed it makes sense to add the "Grin Transactions Explained, Step-by-Step" Medium post to intro.md
Open for suggestions on a better location.
* add a new configure item for log_max_files (#2601)
* add a new configure item for log_max_files
* rustfmt
* use a constant instead of multiple 32
* rustfmt
* Fix the build warning of deprecated trim_right_matches (#2662)
* [DOC] state.md, build.md and chain directory documents translate in Korean. (#2649)
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* remove some useless md files for translation. this is rearrange set up translation order.
* add dot end of sentence & translate build.md in korean
* remove fast-sync_KR.md
* finish build_KR.md translation
* finish build_KR.md translation
* finish translation state_KR.md & add phrase in state.md to move other language md file
* translate blocks_and_headers.md && chain_sync.md in Korean
* add . in chain_sync.md , translation finished in doc/chain dir.
* fix some miss typos
* Api documentation fixes (#2646)
* Fix the API documentation for Chain Validate (v1/chain/validate). It was documented as a POST, but it is actually a GET request, which can be seen in its handler ChainValidationHandler
* Update the API V1 route list response to include the headers and merkleproof routes. Also clarify that for the chain/outputs route you must specify either byids or byheight to select outputs.
* refactor(ci): reorganize CI related code (#2658)
Break-down the CI related code into smaller more maintainable pieces.
* Specify grin or nanogrins in API docs where applicable (#2642)
* Set Content-Type in API client (#2680)
* Reduce number of unwraps in chain crate (#2679)
* fix: the restart of state sync doesn't work sometimes (#2687)
* let check_txhashset_needed return true on abnormal case (#2684)
* Reduce number of unwwaps in api crate (#2681)
* Reduce number of unwwaps in api crate
* Format use section
* Small QoL improvements for wallet developers (#2651)
* Small changes for wallet devs
* Move create_nonce into Keychain trait
* Replace match by map_err
* Add flag to Slate to skip fee check
* Fix secp dependency
* Remove check_fee flag in Slate
* Add Japanese edition of build.md (#2697)
* catch the panic to avoid peer thread quit early (#2686)
* catch the panic to avoid peer thread quit before taking the chance to ban
* move catch wrapper logic down into the util crate
* log the panic info
* keep txhashset.rs untouched
* remove a warning
* [DOC] dandelion.md, simulation.md ,fast-sync.md and pruning.md documents translate in Korean. (#2678)
* Show response code in API client error message (#2683)
It's hard to investigate what happens when an API client error is
printed out
* Add some better logging for get_outputs_by_id failure states (#2705)
* Switch commitment doc fixes (#2645)
Fix some typos and remove the use of parentheses in a
couple of places to make the reading flow a bit better.
* docs: update/add new README.md badges (#2708)
Replace existing badges with SVG counterparts and add a bunch of new ones.
* Update intro.md (#2702)
Add mention of censoring attack prevented by range proofs
* use sandbox folder for txhashset validation on state sync (#2685)
* use sandbox folder for txhashset validation on state sync
* rustfmt
* use temp directory as the sandbox instead actual db_root txhashset dir
* rustfmt
* move txhashset overwrite to the end of full validation
* fix travis-ci test
* rustfmt
* fix: hashset have 2 folders including txhashset and header
* rustfmt
*
(1)switch to rebuild_header_mmr instead of copy the sandbox header mmr
(2)lock txhashset when overwriting and opening and rebuild
* minor improve on sandbox_dir
* add Japanese edition of state.md (#2703)
* Attempt to fix broken TUI locale (#2713)
Can confirm that on the same machine 1.0.2 TUI looks great and is broken on
the current master. Bump of `cursive` version fixed it for me.
Fixes #2676
* clean the header folder in sandbox (#2716)
* forgot to clean the header folder in sandbox in #2685
* Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1
branch and adapters, which is big enough for a separate PR.
* rustfmt
* change version to beta
2019-04-01 13:47:48 +03:00
|
|
|
}
|
|
|
|
|
2019-03-20 16:08:56 +03:00
|
|
|
impl fmt::Debug for Peer {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
|
|
|
write!(f, "Peer({:?})", &self.info)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-10-29 22:36:45 +03:00
|
|
|
impl Peer {
|
2017-10-26 20:48:51 +03:00
|
|
|
// Only accept and connect can be externally used to build a peer
|
2019-05-15 18:51:35 +03:00
|
|
|
fn new(info: PeerInfo, conn: TcpStream, adapter: Arc<dyn NetAdapter>) -> std::io::Result<Peer> {
|
2019-05-03 17:56:25 +03:00
|
|
|
let state = Arc::new(RwLock::new(State::Connected));
|
2019-08-01 19:46:06 +03:00
|
|
|
let state_sync_requested = Arc::new(AtomicBool::new(false));
|
2019-05-03 17:56:25 +03:00
|
|
|
let tracking_adapter = TrackingAdapter::new(adapter);
|
2019-08-01 19:46:06 +03:00
|
|
|
let handler = Protocol::new(
|
|
|
|
Arc::new(tracking_adapter.clone()),
|
|
|
|
info.clone(),
|
|
|
|
state_sync_requested.clone(),
|
|
|
|
);
|
2019-05-15 18:51:35 +03:00
|
|
|
let tracker = Arc::new(conn::Tracker::new());
|
2019-06-27 19:19:41 +03:00
|
|
|
let (sendh, stoph) = conn::listen(conn, info.version, tracker.clone(), handler)?;
|
2019-05-15 18:51:35 +03:00
|
|
|
let send_handle = Mutex::new(sendh);
|
|
|
|
let stop_handle = Mutex::new(stoph);
|
|
|
|
Ok(Peer {
|
2018-10-09 10:27:34 +03:00
|
|
|
info,
|
2019-05-03 17:56:25 +03:00
|
|
|
state,
|
|
|
|
tracking_adapter,
|
2019-05-15 18:51:35 +03:00
|
|
|
tracker,
|
|
|
|
send_handle,
|
|
|
|
stop_handle,
|
2019-08-01 19:46:06 +03:00
|
|
|
state_sync_requested,
|
2019-05-15 18:51:35 +03:00
|
|
|
})
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
pub fn accept(
|
2019-05-03 17:56:25 +03:00
|
|
|
mut conn: TcpStream,
|
2017-10-26 20:48:51 +03:00
|
|
|
capab: Capabilities,
|
|
|
|
total_difficulty: Difficulty,
|
2018-02-02 05:03:12 +03:00
|
|
|
hs: &Handshake,
|
2018-12-08 02:59:40 +03:00
|
|
|
adapter: Arc<dyn NetAdapter>,
|
2018-02-02 05:03:12 +03:00
|
|
|
) -> Result<Peer, Error> {
|
2018-12-31 14:24:30 +03:00
|
|
|
debug!("accept: handshaking from {:?}", conn.peer_addr());
|
2019-05-03 17:56:25 +03:00
|
|
|
let info = hs.accept(capab, total_difficulty, &mut conn);
|
2018-12-31 14:24:30 +03:00
|
|
|
match info {
|
2019-05-15 18:51:35 +03:00
|
|
|
Ok(info) => Ok(Peer::new(info, conn, adapter)?),
|
2018-12-31 14:24:30 +03:00
|
|
|
Err(e) => {
|
|
|
|
debug!(
|
|
|
|
"accept: handshaking from {:?} failed with error: {:?}",
|
|
|
|
conn.peer_addr(),
|
|
|
|
e
|
|
|
|
);
|
|
|
|
if let Err(e) = conn.shutdown(Shutdown::Both) {
|
|
|
|
debug!("Error shutting down conn: {:?}", e);
|
|
|
|
}
|
|
|
|
Err(e)
|
|
|
|
}
|
|
|
|
}
|
2016-10-31 04:23:52 +03:00
|
|
|
}
|
2016-10-29 22:36:45 +03:00
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
pub fn connect(
|
2019-05-03 17:56:25 +03:00
|
|
|
mut conn: TcpStream,
|
2017-10-26 20:48:51 +03:00
|
|
|
capab: Capabilities,
|
|
|
|
total_difficulty: Difficulty,
|
2019-02-18 15:15:32 +03:00
|
|
|
self_addr: PeerAddr,
|
2017-10-26 20:48:51 +03:00
|
|
|
hs: &Handshake,
|
2019-05-03 17:56:25 +03:00
|
|
|
adapter: Arc<dyn NetAdapter>,
|
2018-02-02 05:03:12 +03:00
|
|
|
) -> Result<Peer, Error> {
|
2019-01-02 04:54:17 +03:00
|
|
|
debug!("connect: handshaking with {:?}", conn.peer_addr());
|
2019-05-03 17:56:25 +03:00
|
|
|
let info = hs.initiate(capab, total_difficulty, self_addr, &mut conn);
|
2018-12-31 14:24:30 +03:00
|
|
|
match info {
|
2019-05-15 18:51:35 +03:00
|
|
|
Ok(info) => Ok(Peer::new(info, conn, adapter)?),
|
2018-12-31 14:24:30 +03:00
|
|
|
Err(e) => {
|
|
|
|
debug!(
|
|
|
|
"connect: handshaking with {:?} failed with error: {:?}",
|
2019-01-02 04:54:17 +03:00
|
|
|
conn.peer_addr(),
|
2018-12-31 14:24:30 +03:00
|
|
|
e
|
|
|
|
);
|
|
|
|
if let Err(e) = conn.shutdown(Shutdown::Both) {
|
|
|
|
debug!("Error shutting down conn: {:?}", e);
|
|
|
|
}
|
|
|
|
Err(e)
|
|
|
|
}
|
|
|
|
}
|
2016-10-31 04:23:52 +03:00
|
|
|
}
|
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn is_denied(config: &P2PConfig, peer_addr: PeerAddr) -> bool {
|
2018-01-31 00:44:13 +03:00
|
|
|
if let Some(ref denied) = config.peers_deny {
|
2019-02-18 15:15:32 +03:00
|
|
|
if denied.contains(&peer_addr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"checking peer allowed/denied: {:?} explicitly denied",
|
|
|
|
peer_addr
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-01-31 00:44:13 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if let Some(ref allowed) = config.peers_allow {
|
2019-02-18 15:15:32 +03:00
|
|
|
if allowed.contains(&peer_addr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"checking peer allowed/denied: {:?} explicitly allowed",
|
|
|
|
peer_addr
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-01-31 00:44:13 +03:00
|
|
|
return false;
|
|
|
|
} else {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"checking peer allowed/denied: {:?} not explicitly allowed, denying",
|
|
|
|
peer_addr
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-01-31 00:44:13 +03:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
// default to allowing peer connection if we do not explicitly allow or deny
|
|
|
|
// the peer
|
2018-01-31 00:44:13 +03:00
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2019-05-03 17:35:43 +03:00
|
|
|
/// Whether this peer is currently connected.
|
2017-02-28 01:17:53 +03:00
|
|
|
pub fn is_connected(&self) -> bool {
|
2019-05-03 17:35:43 +03:00
|
|
|
State::Connected == *self.state.read()
|
2017-02-28 01:17:53 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Whether this peer has been banned.
|
|
|
|
pub fn is_banned(&self) -> bool {
|
2018-10-20 03:13:07 +03:00
|
|
|
State::Banned == *self.state.read()
|
2017-02-28 01:17:53 +03:00
|
|
|
}
|
|
|
|
|
2018-10-16 19:14:16 +03:00
|
|
|
/// Whether this peer is stuck on sync.
|
|
|
|
pub fn is_stuck(&self) -> (bool, Difficulty) {
|
2018-10-20 03:13:07 +03:00
|
|
|
let peer_live_info = self.info.live_info.read();
|
2018-10-16 19:14:16 +03:00
|
|
|
let now = Utc::now().timestamp_millis();
|
|
|
|
// if last updated difficulty is 2 hours ago, we're sure this peer is a stuck node.
|
|
|
|
if now > peer_live_info.stuck_detector.timestamp_millis() + global::STUCK_PEER_KICK_TIME {
|
|
|
|
(true, peer_live_info.total_difficulty)
|
|
|
|
} else {
|
|
|
|
(false, peer_live_info.total_difficulty)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-11-07 04:51:22 +03:00
|
|
|
/// Whether the peer is considered abusive, mostly for spammy nodes
|
|
|
|
pub fn is_abusive(&self) -> bool {
|
2019-05-15 18:51:35 +03:00
|
|
|
let rec = self.tracker.received_bytes.read();
|
|
|
|
let sent = self.tracker.sent_bytes.read();
|
2019-05-03 17:56:25 +03:00
|
|
|
rec.count_per_min() > MAX_PEER_MSG_PER_MIN || sent.count_per_min() > MAX_PEER_MSG_PER_MIN
|
2018-11-07 04:51:22 +03:00
|
|
|
}
|
|
|
|
|
2018-10-17 20:01:42 +03:00
|
|
|
/// Number of bytes sent to the peer
|
2018-11-07 04:51:22 +03:00
|
|
|
pub fn last_min_sent_bytes(&self) -> Option<u64> {
|
2019-05-15 18:51:35 +03:00
|
|
|
let sent_bytes = self.tracker.sent_bytes.read();
|
2019-05-03 17:56:25 +03:00
|
|
|
Some(sent_bytes.bytes_per_min())
|
2018-10-17 20:01:42 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Number of bytes received from the peer
|
2018-11-07 04:51:22 +03:00
|
|
|
pub fn last_min_received_bytes(&self) -> Option<u64> {
|
2019-05-15 18:51:35 +03:00
|
|
|
let received_bytes = self.tracker.received_bytes.read();
|
2019-05-03 17:56:25 +03:00
|
|
|
Some(received_bytes.bytes_per_min())
|
2018-10-17 20:01:42 +03:00
|
|
|
}
|
|
|
|
|
2018-11-08 01:15:12 +03:00
|
|
|
pub fn last_min_message_counts(&self) -> Option<(u64, u64)> {
|
2019-05-15 18:51:35 +03:00
|
|
|
let received_bytes = self.tracker.received_bytes.read();
|
|
|
|
let sent_bytes = self.tracker.sent_bytes.read();
|
2019-05-03 17:56:25 +03:00
|
|
|
Some((sent_bytes.count_per_min(), received_bytes.count_per_min()))
|
2018-11-08 01:15:12 +03:00
|
|
|
}
|
|
|
|
|
2017-11-28 07:44:33 +03:00
|
|
|
/// Set this peer status to banned
|
|
|
|
pub fn set_banned(&self) {
|
2018-10-20 03:13:07 +03:00
|
|
|
*self.state.write() = State::Banned;
|
2017-11-28 07:44:33 +03:00
|
|
|
}
|
|
|
|
|
2019-05-03 17:56:25 +03:00
|
|
|
/// Send a msg with given msg_type to our peer via the connection.
|
|
|
|
fn send<T: Writeable>(&self, msg: T, msg_type: Type) -> Result<(), Error> {
|
2019-10-07 18:22:05 +03:00
|
|
|
let msg = Msg::new(msg_type, msg, self.info.version)?;
|
|
|
|
self.send_handle.lock().send(msg)
|
2019-05-03 17:56:25 +03:00
|
|
|
}
|
|
|
|
|
2018-03-04 03:19:54 +03:00
|
|
|
/// Send a ping to the remote peer, providing our local difficulty and
|
|
|
|
/// height
|
2017-12-14 00:52:21 +03:00
|
|
|
pub fn send_ping(&self, total_difficulty: Difficulty, height: u64) -> Result<(), Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
let ping_msg = Ping {
|
|
|
|
total_difficulty,
|
|
|
|
height,
|
|
|
|
};
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(ping_msg, msg::Type::Ping)
|
2016-12-11 06:11:49 +03:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Send the ban reason before banning
|
[1.1.0] Merge master into 1.1.0 (#2720)
* cleanup legacy "3 dot" check (#2625)
* Allow to peers behind NAT to get up to preferred_max connections (#2543)
Allow to peers behind NAT to get up to preffered_max connections
If peer has only outbound connections it's mot likely behind NAT and we should not stop it from getting more outbound connections
* Reduce usage of unwrap in p2p crate (#2627)
Also change store crate a bit
* Simplify (and fix) output_pos cleanup during chain compaction (#2609)
* expose leaf pos iterator
use it for various things in txhashset when iterating over outputs
* fix
* cleanup
* rebuild output_pos index (and clear it out first) when compacting the chain
* fixup tests
* refactor to match on (output, proof) tuple
* add comments to compact() to explain what is going on.
* get rid of some boxing around the leaf_set iterator
* cleanup
* [docs] Add switch commitment documentation (#2526)
* remove references to no-longer existing switch commitment hash
(as switch commitments were removed in ca8447f3bd49e80578770da841e5fbbac2c23cde
and moved into the blinding factor of the Pedersen Commitment)
* some rewording (points vs curves) and fix of small formatting issues
* Add switch commitment documentation
* [docs] Documents in grin repo had translated in Korean. (#2604)
* Start to M/W intro translate in Korean
* translate in Korean
* add korean translation on intro
* table_of_content.md translate in Korean.
* table_of_content_KR.md finish translate in Korean, start to translate State_KR.md
* add state_KR.md & commit some translation in State_KR.md
* WIP stat_KR.md translation
* add build_KR.md && stratum_KR.md
* finish translate stratum_KR.md & table_of_content_KR.md
* rename intro.KR.md to intro_KR.md
* add intro_KR.md file path each language's intro.md
* add Korean translation file path to stratum.md & table_of_contents.md
* fix difference with grin/master
* Fix TxHashSet file filter for Windows. (#2641)
* Fix TxHashSet file filter for Windows.
* rustfmt
* Updating regexp
* Adding in test case
* Display the current download rate rather than the average when syncing the chain (#2633)
* When syncing the chain, calculate the displayed download speed using the current rate from the most recent iteration, rather than the average download speed from the entire syncing process.
* Replace the explicitly ignored variables in the pattern with an implicit ignore
* remove root = true from editorconfig (#2655)
* Add Medium post to intro (#2654)
Spoke to @yeastplume who agreed it makes sense to add the "Grin Transactions Explained, Step-by-Step" Medium post to intro.md
Open for suggestions on a better location.
* add a new configure item for log_max_files (#2601)
* add a new configure item for log_max_files
* rustfmt
* use a constant instead of multiple 32
* rustfmt
* Fix the build warning of deprecated trim_right_matches (#2662)
* [DOC] state.md, build.md and chain directory documents translate in Korean. (#2649)
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* remove some useless md files for translation. this is rearrange set up translation order.
* add dot end of sentence & translate build.md in korean
* remove fast-sync_KR.md
* finish build_KR.md translation
* finish build_KR.md translation
* finish translation state_KR.md & add phrase in state.md to move other language md file
* translate blocks_and_headers.md && chain_sync.md in Korean
* add . in chain_sync.md , translation finished in doc/chain dir.
* fix some miss typos
* Api documentation fixes (#2646)
* Fix the API documentation for Chain Validate (v1/chain/validate). It was documented as a POST, but it is actually a GET request, which can be seen in its handler ChainValidationHandler
* Update the API V1 route list response to include the headers and merkleproof routes. Also clarify that for the chain/outputs route you must specify either byids or byheight to select outputs.
* refactor(ci): reorganize CI related code (#2658)
Break-down the CI related code into smaller more maintainable pieces.
* Specify grin or nanogrins in API docs where applicable (#2642)
* Set Content-Type in API client (#2680)
* Reduce number of unwraps in chain crate (#2679)
* fix: the restart of state sync doesn't work sometimes (#2687)
* let check_txhashset_needed return true on abnormal case (#2684)
* Reduce number of unwwaps in api crate (#2681)
* Reduce number of unwwaps in api crate
* Format use section
* Small QoL improvements for wallet developers (#2651)
* Small changes for wallet devs
* Move create_nonce into Keychain trait
* Replace match by map_err
* Add flag to Slate to skip fee check
* Fix secp dependency
* Remove check_fee flag in Slate
* Add Japanese edition of build.md (#2697)
* catch the panic to avoid peer thread quit early (#2686)
* catch the panic to avoid peer thread quit before taking the chance to ban
* move catch wrapper logic down into the util crate
* log the panic info
* keep txhashset.rs untouched
* remove a warning
* [DOC] dandelion.md, simulation.md ,fast-sync.md and pruning.md documents translate in Korean. (#2678)
* Show response code in API client error message (#2683)
It's hard to investigate what happens when an API client error is
printed out
* Add some better logging for get_outputs_by_id failure states (#2705)
* Switch commitment doc fixes (#2645)
Fix some typos and remove the use of parentheses in a
couple of places to make the reading flow a bit better.
* docs: update/add new README.md badges (#2708)
Replace existing badges with SVG counterparts and add a bunch of new ones.
* Update intro.md (#2702)
Add mention of censoring attack prevented by range proofs
* use sandbox folder for txhashset validation on state sync (#2685)
* use sandbox folder for txhashset validation on state sync
* rustfmt
* use temp directory as the sandbox instead actual db_root txhashset dir
* rustfmt
* move txhashset overwrite to the end of full validation
* fix travis-ci test
* rustfmt
* fix: hashset have 2 folders including txhashset and header
* rustfmt
*
(1)switch to rebuild_header_mmr instead of copy the sandbox header mmr
(2)lock txhashset when overwriting and opening and rebuild
* minor improve on sandbox_dir
* add Japanese edition of state.md (#2703)
* Attempt to fix broken TUI locale (#2713)
Can confirm that on the same machine 1.0.2 TUI looks great and is broken on
the current master. Bump of `cursive` version fixed it for me.
Fixes #2676
* clean the header folder in sandbox (#2716)
* forgot to clean the header folder in sandbox in #2685
* Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1
branch and adapters, which is big enough for a separate PR.
* rustfmt
* change version to beta
2019-04-01 13:47:48 +03:00
|
|
|
pub fn send_ban_reason(&self, ban_reason: ReasonForBan) -> Result<(), Error> {
|
2018-05-29 05:45:31 +03:00
|
|
|
let ban_reason_msg = BanReason { ban_reason };
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(ban_reason_msg, msg::Type::BanReason).map(|_| ())
|
2018-05-29 05:45:31 +03:00
|
|
|
}
|
|
|
|
|
2016-12-21 04:33:20 +03:00
|
|
|
/// Sends the provided block to the remote peer. The request may be dropped
|
|
|
|
/// if the remote peer is known to already have the block.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> {
|
2019-01-12 20:28:03 +03:00
|
|
|
if !self.tracking_adapter.has_recv(b.hash()) {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Send block {} to {}", b.hash(), self.info.addr);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(b, msg::Type::Block)?;
|
2018-10-09 10:27:34 +03:00
|
|
|
Ok(true)
|
2017-10-26 20:48:51 +03:00
|
|
|
} else {
|
2018-01-30 17:42:04 +03:00
|
|
|
debug!(
|
|
|
|
"Suppress block send {} to {} (already seen)",
|
|
|
|
b.hash(),
|
|
|
|
self.info.addr,
|
|
|
|
);
|
2018-10-09 10:27:34 +03:00
|
|
|
Ok(false)
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> {
|
2019-01-12 20:28:03 +03:00
|
|
|
if !self.tracking_adapter.has_recv(b.hash()) {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Send compact block {} to {}", b.hash(), self.info.addr);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(b, msg::Type::CompactBlock)?;
|
2018-10-09 10:27:34 +03:00
|
|
|
Ok(true)
|
2018-01-31 23:39:55 +03:00
|
|
|
} else {
|
|
|
|
debug!(
|
|
|
|
"Suppress compact block send {} to {} (already seen)",
|
|
|
|
b.hash(),
|
|
|
|
self.info.addr,
|
|
|
|
);
|
2018-10-09 10:27:34 +03:00
|
|
|
Ok(false)
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<bool, Error> {
|
2019-01-12 20:28:03 +03:00
|
|
|
if !self.tracking_adapter.has_recv(bh.hash()) {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Send header {} to {}", bh.hash(), self.info.addr);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(bh, msg::Type::Header)?;
|
2018-10-09 10:27:34 +03:00
|
|
|
Ok(true)
|
2018-01-30 17:42:04 +03:00
|
|
|
} else {
|
2018-10-09 10:27:34 +03:00
|
|
|
debug!(
|
2018-01-30 17:42:04 +03:00
|
|
|
"Suppress header send {} to {} (already seen)",
|
|
|
|
bh.hash(),
|
|
|
|
self.info.addr,
|
|
|
|
);
|
2018-10-09 10:27:34 +03:00
|
|
|
Ok(false)
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
2016-12-21 04:33:20 +03:00
|
|
|
}
|
|
|
|
|
2018-11-07 12:28:17 +03:00
|
|
|
pub fn send_tx_kernel_hash(&self, h: Hash) -> Result<bool, Error> {
|
2019-01-12 20:28:03 +03:00
|
|
|
if !self.tracking_adapter.has_recv(h) {
|
2018-11-07 12:28:17 +03:00
|
|
|
debug!("Send tx kernel hash {} to {}", h, self.info.addr);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(h, msg::Type::TransactionKernel)?;
|
2018-11-07 12:28:17 +03:00
|
|
|
Ok(true)
|
|
|
|
} else {
|
|
|
|
debug!(
|
|
|
|
"Not sending tx kernel hash {} to {} (already seen)",
|
|
|
|
h, self.info.addr
|
|
|
|
);
|
|
|
|
Ok(false)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-10-26 00:06:24 +03:00
|
|
|
/// Sends the provided transaction to the remote peer. The request may be
|
|
|
|
/// dropped if the remote peer is known to already have the transaction.
|
2018-11-07 12:28:17 +03:00
|
|
|
/// We support broadcast of lightweight tx kernel hash
|
|
|
|
/// so track known txs by kernel hash.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<bool, Error> {
|
2018-11-07 12:28:17 +03:00
|
|
|
let kernel = &tx.kernels()[0];
|
|
|
|
|
|
|
|
if self
|
|
|
|
.info
|
|
|
|
.capabilities
|
|
|
|
.contains(Capabilities::TX_KERNEL_HASH)
|
|
|
|
{
|
|
|
|
return self.send_tx_kernel_hash(kernel.hash());
|
|
|
|
}
|
|
|
|
|
2019-01-12 20:28:03 +03:00
|
|
|
if !self.tracking_adapter.has_recv(kernel.hash()) {
|
2018-11-07 12:28:17 +03:00
|
|
|
debug!("Send full tx {} to {}", tx.hash(), self.info.addr);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(tx, msg::Type::Transaction)?;
|
2018-10-09 10:27:34 +03:00
|
|
|
Ok(true)
|
2017-10-26 20:48:51 +03:00
|
|
|
} else {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
"Not sending tx {} to {} (already seen)",
|
|
|
|
tx.hash(),
|
|
|
|
self.info.addr
|
|
|
|
);
|
2018-10-09 10:27:34 +03:00
|
|
|
Ok(false)
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
2017-10-26 00:06:24 +03:00
|
|
|
}
|
|
|
|
|
2018-05-30 23:57:13 +03:00
|
|
|
/// Sends the provided stem transaction to the remote peer.
|
|
|
|
/// Note: tracking adapter is ignored for stem transactions (while under
|
|
|
|
/// embargo).
|
2018-03-20 06:18:54 +03:00
|
|
|
pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Send (stem) tx {} to {}", tx.hash(), self.info.addr);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(tx, msg::Type::StemTransaction)
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
/// Sends a request for block headers from the provided block locator
|
2017-02-08 00:52:17 +03:00
|
|
|
pub fn send_header_request(&self, locator: Vec<Hash>) -> Result<(), Error> {
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(&Locator { hashes: locator }, msg::Type::GetHeaders)
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
|
2018-11-07 12:28:17 +03:00
|
|
|
pub fn send_tx_request(&self, h: Hash) -> Result<(), Error> {
|
|
|
|
debug!(
|
|
|
|
"Requesting tx (kernel hash) {} from peer {}.",
|
|
|
|
h, self.info.addr
|
|
|
|
);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(&h, msg::Type::GetTransaction)
|
2018-11-07 12:28:17 +03:00
|
|
|
}
|
|
|
|
|
2019-10-10 11:38:25 +03:00
|
|
|
/// Sends a request for a specific block by hash.
|
|
|
|
/// Takes opts so we can track if this request was due to our node syncing or otherwise.
|
|
|
|
pub fn send_block_request(&self, h: Hash, opts: chain::Options) -> Result<(), Error> {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Requesting block {} from peer {}.", h, self.info.addr);
|
2019-10-10 11:38:25 +03:00
|
|
|
self.tracking_adapter.push_req(h, opts);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(&h, msg::Type::GetBlock)
|
2017-02-08 00:52:17 +03:00
|
|
|
}
|
|
|
|
|
2018-02-02 05:03:12 +03:00
|
|
|
/// Sends a request for a specific compact block by hash
|
2018-01-31 23:39:55 +03:00
|
|
|
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
|
2018-10-21 23:30:56 +03:00
|
|
|
debug!("Requesting compact block {} from {}", h, self.info.addr);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(&h, msg::Type::GetCompactBlock)
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
|
2017-02-19 05:42:34 +03:00
|
|
|
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
|
2018-11-13 13:59:33 +03:00
|
|
|
trace!("Asking {} for more peers {:?}", self.info.addr, capab);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(
|
2018-02-02 05:03:12 +03:00
|
|
|
&GetPeerAddrs {
|
|
|
|
capabilities: capab,
|
|
|
|
},
|
2018-03-04 03:19:54 +03:00
|
|
|
msg::Type::GetPeerAddrs,
|
|
|
|
)
|
2017-02-19 05:42:34 +03:00
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
pub fn send_txhashset_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"Asking {} for txhashset archive at {} {}.",
|
|
|
|
self.info.addr, height, hash
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2019-08-01 19:46:06 +03:00
|
|
|
self.state_sync_requested.store(true, Ordering::Relaxed);
|
2019-05-03 17:56:25 +03:00
|
|
|
self.send(
|
2018-03-05 22:33:44 +03:00
|
|
|
&TxHashSetRequest { hash, height },
|
|
|
|
msg::Type::TxHashSetRequest,
|
2018-03-04 03:19:54 +03:00
|
|
|
)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2019-05-14 19:17:38 +03:00
|
|
|
pub fn send_kernel_data_request(&self) -> Result<(), Error> {
|
|
|
|
debug!("Asking {} for kernel data.", self.info.addr);
|
2019-05-15 18:51:35 +03:00
|
|
|
self.send(&KernelDataRequest {}, msg::Type::KernelDataRequest)
|
2019-05-14 19:17:38 +03:00
|
|
|
}
|
|
|
|
|
2019-05-15 18:51:35 +03:00
|
|
|
/// Stops the peer
|
2016-10-31 04:23:52 +03:00
|
|
|
pub fn stop(&self) {
|
2019-05-30 03:03:12 +03:00
|
|
|
debug!("Stopping peer {:?}", self.info.addr);
|
2019-05-15 18:51:35 +03:00
|
|
|
match self.stop_handle.try_lock() {
|
|
|
|
Some(handle) => handle.stop(),
|
|
|
|
None => error!("can't get stop lock for peer"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-05-30 03:03:12 +03:00
|
|
|
/// Waits until the peer's thread exit
|
|
|
|
pub fn wait(&self) {
|
|
|
|
debug!("Waiting for peer {:?} to stop", self.info.addr);
|
2019-05-15 18:51:35 +03:00
|
|
|
match self.stop_handle.try_lock() {
|
2019-05-30 03:03:12 +03:00
|
|
|
Some(mut handle) => handle.wait(),
|
2019-05-15 18:51:35 +03:00
|
|
|
None => error!("can't get stop lock for peer"),
|
|
|
|
}
|
2018-02-02 05:03:12 +03:00
|
|
|
}
|
2018-11-07 03:42:36 +03:00
|
|
|
}
|
|
|
|
|
2017-10-26 20:48:51 +03:00
|
|
|
/// Adapter implementation that forwards everything to an underlying adapter
|
2019-01-12 20:28:03 +03:00
|
|
|
/// but keeps track of the block and transaction hashes that were requested or
|
|
|
|
/// received.
|
2017-10-26 20:48:51 +03:00
|
|
|
#[derive(Clone)]
|
|
|
|
struct TrackingAdapter {
|
2018-12-08 02:59:40 +03:00
|
|
|
adapter: Arc<dyn NetAdapter>,
|
2019-10-10 11:38:25 +03:00
|
|
|
received: Arc<RwLock<LruCache<Hash, ()>>>,
|
|
|
|
requested: Arc<RwLock<LruCache<Hash, chain::Options>>>,
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl TrackingAdapter {
|
2018-12-08 02:59:40 +03:00
|
|
|
fn new(adapter: Arc<dyn NetAdapter>) -> TrackingAdapter {
|
2017-10-26 20:48:51 +03:00
|
|
|
TrackingAdapter {
|
|
|
|
adapter: adapter,
|
2019-10-10 11:38:25 +03:00
|
|
|
received: Arc::new(RwLock::new(LruCache::new(MAX_TRACK_SIZE))),
|
|
|
|
requested: Arc::new(RwLock::new(LruCache::new(MAX_TRACK_SIZE))),
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-12 20:28:03 +03:00
|
|
|
fn has_recv(&self, hash: Hash) -> bool {
|
2019-10-10 11:38:25 +03:00
|
|
|
self.received.write().contains_key(&hash)
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
|
|
|
|
2019-01-12 20:28:03 +03:00
|
|
|
fn push_recv(&self, hash: Hash) {
|
2019-10-10 11:38:25 +03:00
|
|
|
self.received.write().insert(hash, ());
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
2019-01-12 20:28:03 +03:00
|
|
|
|
2019-10-10 11:38:25 +03:00
|
|
|
/// Track a block or transaction hash requested by us.
|
|
|
|
/// Track the opts alongside the hash so we know if this was due to us syncing or not.
|
|
|
|
fn push_req(&self, hash: Hash, opts: chain::Options) {
|
|
|
|
self.requested.write().insert(hash, opts);
|
2019-01-12 20:28:03 +03:00
|
|
|
}
|
|
|
|
|
2019-10-10 11:38:25 +03:00
|
|
|
fn req_opts(&self, hash: Hash) -> Option<chain::Options> {
|
|
|
|
self.requested.write().get_mut(&hash).cloned()
|
2019-01-12 20:28:03 +03:00
|
|
|
}
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
impl ChainAdapter for TrackingAdapter {
|
2019-04-08 23:13:28 +03:00
|
|
|
fn total_difficulty(&self) -> Result<Difficulty, chain::Error> {
|
2017-10-26 20:48:51 +03:00
|
|
|
self.adapter.total_difficulty()
|
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn total_height(&self) -> Result<u64, chain::Error> {
|
2017-12-14 00:52:21 +03:00
|
|
|
self.adapter.total_height()
|
|
|
|
}
|
|
|
|
|
2018-11-07 12:28:17 +03:00
|
|
|
fn get_transaction(&self, kernel_hash: Hash) -> Option<core::Transaction> {
|
|
|
|
self.adapter.get_transaction(kernel_hash)
|
|
|
|
}
|
|
|
|
|
2019-04-18 16:11:06 +03:00
|
|
|
fn tx_kernel_received(
|
|
|
|
&self,
|
|
|
|
kernel_hash: Hash,
|
|
|
|
peer_info: &PeerInfo,
|
|
|
|
) -> Result<bool, chain::Error> {
|
2019-01-12 20:28:03 +03:00
|
|
|
self.push_recv(kernel_hash);
|
2019-04-18 16:11:06 +03:00
|
|
|
self.adapter.tx_kernel_received(kernel_hash, peer_info)
|
2018-11-07 12:28:17 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn transaction_received(
|
|
|
|
&self,
|
|
|
|
tx: core::Transaction,
|
|
|
|
stem: bool,
|
|
|
|
) -> Result<bool, chain::Error> {
|
2018-05-30 23:57:13 +03:00
|
|
|
// Do not track the tx hash for stem txs.
|
|
|
|
// Otherwise we fail to handle the subsequent fluff or embargo expiration
|
|
|
|
// correctly.
|
|
|
|
if !stem {
|
2018-11-07 12:28:17 +03:00
|
|
|
let kernel = &tx.kernels()[0];
|
2019-01-12 20:28:03 +03:00
|
|
|
self.push_recv(kernel.hash());
|
2018-05-30 23:57:13 +03:00
|
|
|
}
|
2018-03-20 06:18:54 +03:00
|
|
|
self.adapter.transaction_received(tx, stem)
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn block_received(
|
|
|
|
&self,
|
|
|
|
b: core::Block,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-10-10 11:38:25 +03:00
|
|
|
opts: chain::Options,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-01-12 20:28:03 +03:00
|
|
|
let bh = b.hash();
|
|
|
|
self.push_recv(bh);
|
2019-10-10 11:38:25 +03:00
|
|
|
|
|
|
|
// If we are currently tracking a request for this block then
|
|
|
|
// use the opts specified when we made the request.
|
|
|
|
// If we requested this block as part of sync then we want to
|
|
|
|
// let our adapter know this when we receive it.
|
|
|
|
let req_opts = self.req_opts(bh).unwrap_or(opts);
|
|
|
|
self.adapter.block_received(b, peer_info, req_opts)
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn compact_block_received(
|
|
|
|
&self,
|
|
|
|
cb: core::CompactBlock,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-01-12 20:28:03 +03:00
|
|
|
self.push_recv(cb.hash());
|
2019-04-18 16:11:06 +03:00
|
|
|
self.adapter.compact_block_received(cb, peer_info)
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
|
2019-04-18 16:11:06 +03:00
|
|
|
fn header_received(
|
|
|
|
&self,
|
|
|
|
bh: core::BlockHeader,
|
|
|
|
peer_info: &PeerInfo,
|
|
|
|
) -> Result<bool, chain::Error> {
|
2019-01-12 20:28:03 +03:00
|
|
|
self.push_recv(bh.hash());
|
2019-04-18 16:11:06 +03:00
|
|
|
self.adapter.header_received(bh, peer_info)
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn headers_received(
|
|
|
|
&self,
|
|
|
|
bh: &[core::BlockHeader],
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-04-18 16:11:06 +03:00
|
|
|
self.adapter.headers_received(bh, peer_info)
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn locate_headers(&self, locator: &[Hash]) -> Result<Vec<core::BlockHeader>, chain::Error> {
|
2017-10-26 20:48:51 +03:00
|
|
|
self.adapter.locate_headers(locator)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
|
|
|
self.adapter.get_block(h)
|
|
|
|
}
|
2018-02-10 01:32:16 +03:00
|
|
|
|
2019-05-14 19:17:38 +03:00
|
|
|
fn kernel_data_read(&self) -> Result<File, chain::Error> {
|
|
|
|
self.adapter.kernel_data_read()
|
|
|
|
}
|
|
|
|
|
2019-08-26 23:17:47 +03:00
|
|
|
fn kernel_data_write(&self, reader: &mut dyn Read) -> Result<bool, chain::Error> {
|
2019-05-14 19:17:38 +03:00
|
|
|
self.adapter.kernel_data_write(reader)
|
|
|
|
}
|
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
fn txhashset_read(&self, h: Hash) -> Option<TxHashSetRead> {
|
|
|
|
self.adapter.txhashset_read(h)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
|
2019-07-23 11:46:29 +03:00
|
|
|
fn txhashset_archive_header(&self) -> Result<core::BlockHeader, chain::Error> {
|
|
|
|
self.adapter.txhashset_archive_header()
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:06:52 +03:00
|
|
|
fn txhashset_receive_ready(&self) -> bool {
|
|
|
|
self.adapter.txhashset_receive_ready()
|
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn txhashset_write(
|
|
|
|
&self,
|
|
|
|
h: Hash,
|
|
|
|
txhashset_data: File,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-04-18 16:11:06 +03:00
|
|
|
self.adapter.txhashset_write(h, txhashset_data, peer_info)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-10-13 01:53:50 +03:00
|
|
|
|
|
|
|
fn txhashset_download_update(
|
|
|
|
&self,
|
|
|
|
start_time: DateTime<Utc>,
|
|
|
|
downloaded_size: u64,
|
|
|
|
total_size: u64,
|
|
|
|
) -> bool {
|
|
|
|
self.adapter
|
|
|
|
.txhashset_download_update(start_time, downloaded_size, total_size)
|
|
|
|
}
|
2019-04-23 02:54:36 +03:00
|
|
|
|
|
|
|
fn get_tmp_dir(&self) -> PathBuf {
|
|
|
|
self.adapter.get_tmp_dir()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_tmpfile_pathname(&self, tmpfile_name: String) -> PathBuf {
|
|
|
|
self.adapter.get_tmpfile_pathname(tmpfile_name)
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2017-10-26 20:48:51 +03:00
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
impl NetAdapter for TrackingAdapter {
|
2019-02-18 15:15:32 +03:00
|
|
|
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<PeerAddr> {
|
2017-10-26 20:48:51 +03:00
|
|
|
self.adapter.find_peer_addrs(capab)
|
|
|
|
}
|
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
fn peer_addrs_received(&self, addrs: Vec<PeerAddr>) {
|
2017-10-26 20:48:51 +03:00
|
|
|
self.adapter.peer_addrs_received(addrs)
|
|
|
|
}
|
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
fn peer_difficulty(&self, addr: PeerAddr, diff: Difficulty, height: u64) {
|
2017-12-14 00:52:21 +03:00
|
|
|
self.adapter.peer_difficulty(addr, diff, height)
|
2017-11-21 17:24:29 +03:00
|
|
|
}
|
2018-03-27 19:09:41 +03:00
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
fn is_banned(&self, addr: PeerAddr) -> bool {
|
2018-03-27 19:09:41 +03:00
|
|
|
self.adapter.is_banned(addr)
|
|
|
|
}
|
2017-10-26 20:48:51 +03:00
|
|
|
}
|