2020-01-20 14:40:58 +03:00
|
|
|
// Copyright 2020 The Grin Developers
|
2017-12-12 19:40:26 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::util::RwLock;
|
2017-12-12 19:40:26 +03:00
|
|
|
use std::collections::HashMap;
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
2019-04-23 02:54:36 +03:00
|
|
|
use std::path::PathBuf;
|
2018-10-20 03:13:07 +03:00
|
|
|
use std::sync::Arc;
|
2017-12-12 19:40:26 +03:00
|
|
|
|
2020-10-27 15:36:00 +03:00
|
|
|
use rand::prelude::*;
|
2017-12-12 19:40:26 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
use crate::chain;
|
2018-12-08 02:59:40 +03:00
|
|
|
use crate::core::core;
|
|
|
|
use crate::core::core::hash::{Hash, Hashed};
|
|
|
|
use crate::core::global;
|
|
|
|
use crate::core::pow::Difficulty;
|
|
|
|
use crate::peer::Peer;
|
|
|
|
use crate::store::{PeerData, PeerStore, State};
|
|
|
|
use crate::types::{
|
2019-04-18 16:11:06 +03:00
|
|
|
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerAddr, PeerInfo, ReasonForBan,
|
2018-08-01 12:44:07 +03:00
|
|
|
TxHashSetRead, MAX_PEER_ADDRS,
|
|
|
|
};
|
2019-05-15 18:51:35 +03:00
|
|
|
use chrono::prelude::*;
|
|
|
|
use chrono::Duration;
|
|
|
|
|
|
|
|
const LOCK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(2);
|
2017-12-12 19:40:26 +03:00
|
|
|
|
|
|
|
pub struct Peers {
|
2018-12-08 02:59:40 +03:00
|
|
|
pub adapter: Arc<dyn ChainAdapter>,
|
2018-02-13 03:38:52 +03:00
|
|
|
store: PeerStore,
|
2019-02-18 15:15:32 +03:00
|
|
|
peers: RwLock<HashMap<PeerAddr, Arc<Peer>>>,
|
2018-09-07 23:01:54 +03:00
|
|
|
config: P2PConfig,
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Peers {
|
2018-12-08 02:59:40 +03:00
|
|
|
pub fn new(store: PeerStore, adapter: Arc<dyn ChainAdapter>, config: P2PConfig) -> Peers {
|
2017-12-12 19:40:26 +03:00
|
|
|
Peers {
|
2018-01-31 00:44:13 +03:00
|
|
|
adapter,
|
2018-02-13 03:38:52 +03:00
|
|
|
store,
|
2018-09-07 23:01:54 +03:00
|
|
|
config,
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: RwLock::new(HashMap::new()),
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adds the peer to our internal peer mapping. Note that the peer is still
|
|
|
|
/// returned so the server can run it.
|
2018-10-09 10:27:34 +03:00
|
|
|
pub fn add_connected(&self, peer: Arc<Peer>) -> Result<(), Error> {
|
2019-10-05 01:00:49 +03:00
|
|
|
let mut peers = self.peers.try_write_for(LOCK_TIMEOUT).ok_or_else(|| {
|
|
|
|
error!("add_connected: failed to get peers lock");
|
|
|
|
Error::Timeout
|
|
|
|
})?;
|
2019-02-18 15:15:32 +03:00
|
|
|
let peer_data = PeerData {
|
|
|
|
addr: peer.info.addr,
|
|
|
|
capabilities: peer.info.capabilities,
|
|
|
|
user_agent: peer.info.user_agent.clone(),
|
|
|
|
flags: State::Healthy,
|
|
|
|
last_banned: 0,
|
|
|
|
ban_reason: ReasonForBan::None,
|
|
|
|
last_connected: Utc::now().timestamp(),
|
|
|
|
};
|
|
|
|
debug!("Saving newly connected peer {}.", peer_data.addr);
|
2018-08-21 01:32:13 +03:00
|
|
|
self.save_peer(&peer_data)?;
|
2020-02-12 21:35:33 +03:00
|
|
|
peers.insert(peer_data.addr, peer);
|
2017-12-12 19:40:26 +03:00
|
|
|
|
2018-10-02 17:17:29 +03:00
|
|
|
Ok(())
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2019-01-10 04:22:48 +03:00
|
|
|
/// Add a peer as banned to block future connections, usually due to failed
|
|
|
|
/// handshake
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn add_banned(&self, addr: PeerAddr, ban_reason: ReasonForBan) -> Result<(), Error> {
|
2019-01-10 04:22:48 +03:00
|
|
|
let peer_data = PeerData {
|
|
|
|
addr,
|
|
|
|
capabilities: Capabilities::UNKNOWN,
|
|
|
|
user_agent: "".to_string(),
|
|
|
|
flags: State::Banned,
|
|
|
|
last_banned: Utc::now().timestamp(),
|
|
|
|
ban_reason,
|
|
|
|
last_connected: Utc::now().timestamp(),
|
|
|
|
};
|
|
|
|
debug!("Banning peer {}.", addr);
|
|
|
|
self.save_peer(&peer_data)
|
|
|
|
}
|
|
|
|
|
2019-09-12 23:04:09 +03:00
|
|
|
/// Check if this peer address is already known (are we already connected to it)?
|
|
|
|
/// We try to get the read lock but if we experience contention
|
|
|
|
/// and this attempt fails then return an error allowing the caller
|
|
|
|
/// to decide how best to handle this.
|
|
|
|
pub fn is_known(&self, addr: PeerAddr) -> Result<bool, Error> {
|
2019-10-05 01:00:49 +03:00
|
|
|
let peers = self.peers.try_read_for(LOCK_TIMEOUT).ok_or_else(|| {
|
|
|
|
error!("is_known: failed to get peers lock");
|
|
|
|
Error::Internal
|
|
|
|
})?;
|
2019-09-12 23:04:09 +03:00
|
|
|
Ok(peers.contains_key(&addr))
|
2019-01-07 09:41:41 +03:00
|
|
|
}
|
|
|
|
|
2020-10-27 15:36:00 +03:00
|
|
|
/// Iterator over our current peers.
|
|
|
|
/// This allows us to hide try_read_for() behind a cleaner interface.
|
|
|
|
/// PeersIter lets us chain various adaptors for convenience.
|
|
|
|
pub fn iter(&self) -> PeersIter<impl Iterator<Item = Arc<Peer>>> {
|
2019-05-15 18:51:35 +03:00
|
|
|
let peers = match self.peers.try_read_for(LOCK_TIMEOUT) {
|
2020-10-27 15:36:00 +03:00
|
|
|
Some(peers) => peers.values().cloned().collect(),
|
2019-05-15 18:51:35 +03:00
|
|
|
None => {
|
|
|
|
error!("connected_peers: failed to get peers lock");
|
2020-10-27 15:36:00 +03:00
|
|
|
vec![]
|
2019-05-15 18:51:35 +03:00
|
|
|
}
|
|
|
|
};
|
2020-10-27 15:36:00 +03:00
|
|
|
PeersIter {
|
|
|
|
iter: peers.into_iter(),
|
|
|
|
}
|
2019-08-21 21:58:43 +03:00
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Get a peer we're connected to by address.
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn get_connected_peer(&self, addr: PeerAddr) -> Option<Arc<Peer>> {
|
2020-10-27 15:36:00 +03:00
|
|
|
self.iter().connected().by_addr(addr)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2020-10-27 15:36:00 +03:00
|
|
|
pub fn max_peer_difficulty(&self) -> Difficulty {
|
|
|
|
self.iter()
|
|
|
|
.connected()
|
|
|
|
.max_difficulty()
|
|
|
|
.unwrap_or(Difficulty::zero())
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn is_banned(&self, peer_addr: PeerAddr) -> bool {
|
|
|
|
if let Ok(peer) = self.store.get_peer(peer_addr) {
|
2019-05-15 18:51:35 +03:00
|
|
|
return peer.flags == State::Banned;
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Ban a peer, disconnecting it if we're currently connected
|
2019-10-05 01:00:49 +03:00
|
|
|
pub fn ban_peer(&self, peer_addr: PeerAddr, ban_reason: ReasonForBan) -> Result<(), Error> {
|
|
|
|
self.update_state(peer_addr, State::Banned)?;
|
|
|
|
|
|
|
|
match self.get_connected_peer(peer_addr) {
|
|
|
|
Some(peer) => {
|
|
|
|
debug!("Banning peer {}", peer_addr);
|
|
|
|
// setting peer status will get it removed at the next clean_peer
|
|
|
|
peer.send_ban_reason(ban_reason)?;
|
|
|
|
peer.set_banned();
|
|
|
|
peer.stop();
|
|
|
|
let mut peers = self.peers.try_write_for(LOCK_TIMEOUT).ok_or_else(|| {
|
2019-05-15 18:51:35 +03:00
|
|
|
error!("ban_peer: failed to get peers lock");
|
2019-10-05 01:00:49 +03:00
|
|
|
Error::PeerException
|
|
|
|
})?;
|
|
|
|
peers.remove(&peer.info.addr);
|
|
|
|
Ok(())
|
|
|
|
}
|
2020-02-12 21:35:33 +03:00
|
|
|
None => Err(Error::PeerNotFound),
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Unban a peer, checks if it exists and banned then unban
|
2019-10-05 01:00:49 +03:00
|
|
|
pub fn unban_peer(&self, peer_addr: PeerAddr) -> Result<(), Error> {
|
2018-11-10 06:27:52 +03:00
|
|
|
debug!("unban_peer: peer {}", peer_addr);
|
2019-10-05 01:00:49 +03:00
|
|
|
// check if peer exist
|
|
|
|
self.get_peer(peer_addr)?;
|
|
|
|
if self.is_banned(peer_addr) {
|
2020-02-12 21:35:33 +03:00
|
|
|
self.update_state(peer_addr, State::Healthy)
|
2019-10-05 01:00:49 +03:00
|
|
|
} else {
|
2020-02-12 21:35:33 +03:00
|
|
|
Err(Error::PeerNotBanned)
|
2019-10-05 01:00:49 +03:00
|
|
|
}
|
2018-01-04 06:25:14 +03:00
|
|
|
}
|
|
|
|
|
2019-08-22 18:35:31 +03:00
|
|
|
fn broadcast<F>(&self, obj_name: &str, inner: F) -> u32
|
2018-08-21 01:32:13 +03:00
|
|
|
where
|
2018-10-09 10:27:34 +03:00
|
|
|
F: Fn(&Peer) -> Result<bool, Error>,
|
2018-08-21 01:32:13 +03:00
|
|
|
{
|
2017-12-12 19:40:26 +03:00
|
|
|
let mut count = 0;
|
2018-10-06 02:53:55 +03:00
|
|
|
|
2020-10-27 15:36:00 +03:00
|
|
|
for p in self.iter().connected() {
|
2018-10-09 10:27:34 +03:00
|
|
|
match inner(&p) {
|
|
|
|
Ok(true) => count += 1,
|
|
|
|
Ok(false) => (),
|
2019-05-03 17:35:43 +03:00
|
|
|
Err(e) => {
|
|
|
|
debug!(
|
|
|
|
"Error sending {:?} to peer {:?}: {:?}",
|
|
|
|
obj_name, &p.info.addr, e
|
|
|
|
);
|
2019-05-15 18:51:35 +03:00
|
|
|
|
|
|
|
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("broadcast: failed to get peers lock");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
};
|
2019-05-03 17:35:43 +03:00
|
|
|
p.stop();
|
2019-05-15 18:51:35 +03:00
|
|
|
peers.remove(&p.info.addr);
|
2019-05-03 17:35:43 +03:00
|
|
|
}
|
2018-10-06 02:53:55 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-08-21 01:32:13 +03:00
|
|
|
count
|
|
|
|
}
|
|
|
|
|
2019-08-22 18:35:31 +03:00
|
|
|
/// Broadcast a compact block to all our connected peers.
|
|
|
|
/// This is only used when initially broadcasting a newly mined block.
|
2018-01-31 23:39:55 +03:00
|
|
|
pub fn broadcast_compact_block(&self, b: &core::CompactBlock) {
|
2019-08-22 18:35:31 +03:00
|
|
|
let count = self.broadcast("compact block", |p| p.send_compact_block(b));
|
2018-01-31 23:39:55 +03:00
|
|
|
debug!(
|
|
|
|
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
|
|
|
|
b.hash(),
|
2018-09-11 01:36:57 +03:00
|
|
|
b.header.pow.total_difficulty,
|
2018-01-31 23:39:55 +03:00
|
|
|
b.header.height,
|
|
|
|
count,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-08-22 18:35:31 +03:00
|
|
|
/// Broadcast a block header to all our connected peers.
|
2018-01-30 17:42:04 +03:00
|
|
|
/// A peer implementation may drop the broadcast request
|
2018-08-21 01:32:13 +03:00
|
|
|
/// if it knows the remote peer already has the header.
|
2018-01-30 17:42:04 +03:00
|
|
|
pub fn broadcast_header(&self, bh: &core::BlockHeader) {
|
2019-08-22 18:35:31 +03:00
|
|
|
let count = self.broadcast("header", |p| p.send_header(bh));
|
2018-10-06 02:53:55 +03:00
|
|
|
debug!(
|
2018-01-30 17:42:04 +03:00
|
|
|
"broadcast_header: {}, {} at {}, to {} peers, done.",
|
|
|
|
bh.hash(),
|
2018-09-11 01:36:57 +03:00
|
|
|
bh.pow.total_difficulty,
|
2018-01-30 17:42:04 +03:00
|
|
|
bh.height,
|
|
|
|
count,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2019-08-22 18:35:31 +03:00
|
|
|
/// Broadcasts the provided transaction to all our connected peers.
|
2017-12-14 23:33:22 +03:00
|
|
|
/// A peer implementation may drop the broadcast request
|
|
|
|
/// if it knows the remote peer already has the transaction.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn broadcast_transaction(&self, tx: &core::Transaction) {
|
2019-08-22 18:35:31 +03:00
|
|
|
let count = self.broadcast("transaction", |p| p.send_transaction(tx));
|
2018-11-07 12:28:17 +03:00
|
|
|
debug!(
|
|
|
|
"broadcast_transaction: {} to {} peers, done.",
|
2018-08-21 01:32:13 +03:00
|
|
|
tx.hash(),
|
|
|
|
count,
|
|
|
|
);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Ping all our connected peers. Always automatically expects a pong back
|
|
|
|
/// or disconnects. This acts as a liveness test.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn check_all(&self, total_difficulty: Difficulty, height: u64) {
|
2020-10-27 15:36:00 +03:00
|
|
|
for p in self.iter().connected() {
|
2019-05-03 17:35:43 +03:00
|
|
|
if let Err(e) = p.send_ping(total_difficulty, height) {
|
|
|
|
debug!("Error pinging peer {:?}: {:?}", &p.info.addr, e);
|
2019-05-15 18:51:35 +03:00
|
|
|
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("check_all: failed to get peers lock");
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
};
|
2019-05-03 17:35:43 +03:00
|
|
|
p.stop();
|
2019-05-15 18:51:35 +03:00
|
|
|
peers.remove(&p.info.addr);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-07 13:47:25 +03:00
|
|
|
/// Iterator over all peers we know about (stored in our db).
|
2020-10-27 15:36:00 +03:00
|
|
|
pub fn peer_data_iter(&self) -> Result<impl Iterator<Item = PeerData>, Error> {
|
2020-10-07 13:47:25 +03:00
|
|
|
self.store.peers_iter().map_err(From::from)
|
|
|
|
}
|
|
|
|
|
2020-10-27 15:36:00 +03:00
|
|
|
/// Convenience for reading all peer data from the db.
|
|
|
|
pub fn all_peer_data(&self) -> Vec<PeerData> {
|
|
|
|
self.peer_data_iter()
|
2020-10-07 13:47:25 +03:00
|
|
|
.map(|peers| peers.collect())
|
|
|
|
.unwrap_or(vec![])
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Find peers in store (not necessarily connected) and return their data
|
|
|
|
pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> {
|
[1.1.0] Merge master into 1.1.0 (#2720)
* cleanup legacy "3 dot" check (#2625)
* Allow to peers behind NAT to get up to preferred_max connections (#2543)
Allow to peers behind NAT to get up to preffered_max connections
If peer has only outbound connections it's mot likely behind NAT and we should not stop it from getting more outbound connections
* Reduce usage of unwrap in p2p crate (#2627)
Also change store crate a bit
* Simplify (and fix) output_pos cleanup during chain compaction (#2609)
* expose leaf pos iterator
use it for various things in txhashset when iterating over outputs
* fix
* cleanup
* rebuild output_pos index (and clear it out first) when compacting the chain
* fixup tests
* refactor to match on (output, proof) tuple
* add comments to compact() to explain what is going on.
* get rid of some boxing around the leaf_set iterator
* cleanup
* [docs] Add switch commitment documentation (#2526)
* remove references to no-longer existing switch commitment hash
(as switch commitments were removed in ca8447f3bd49e80578770da841e5fbbac2c23cde
and moved into the blinding factor of the Pedersen Commitment)
* some rewording (points vs curves) and fix of small formatting issues
* Add switch commitment documentation
* [docs] Documents in grin repo had translated in Korean. (#2604)
* Start to M/W intro translate in Korean
* translate in Korean
* add korean translation on intro
* table_of_content.md translate in Korean.
* table_of_content_KR.md finish translate in Korean, start to translate State_KR.md
* add state_KR.md & commit some translation in State_KR.md
* WIP stat_KR.md translation
* add build_KR.md && stratum_KR.md
* finish translate stratum_KR.md & table_of_content_KR.md
* rename intro.KR.md to intro_KR.md
* add intro_KR.md file path each language's intro.md
* add Korean translation file path to stratum.md & table_of_contents.md
* fix difference with grin/master
* Fix TxHashSet file filter for Windows. (#2641)
* Fix TxHashSet file filter for Windows.
* rustfmt
* Updating regexp
* Adding in test case
* Display the current download rate rather than the average when syncing the chain (#2633)
* When syncing the chain, calculate the displayed download speed using the current rate from the most recent iteration, rather than the average download speed from the entire syncing process.
* Replace the explicitly ignored variables in the pattern with an implicit ignore
* remove root = true from editorconfig (#2655)
* Add Medium post to intro (#2654)
Spoke to @yeastplume who agreed it makes sense to add the "Grin Transactions Explained, Step-by-Step" Medium post to intro.md
Open for suggestions on a better location.
* add a new configure item for log_max_files (#2601)
* add a new configure item for log_max_files
* rustfmt
* use a constant instead of multiple 32
* rustfmt
* Fix the build warning of deprecated trim_right_matches (#2662)
* [DOC] state.md, build.md and chain directory documents translate in Korean. (#2649)
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* add md files for translation.
* start to translation fast-sync, code_structure. add file build_KR.md, states_KR.md
* add dandelion_KR.md && simulation_KR.md for Korean translation.
* remove some useless md files for translation. this is rearrange set up translation order.
* add dot end of sentence & translate build.md in korean
* remove fast-sync_KR.md
* finish build_KR.md translation
* finish build_KR.md translation
* finish translation state_KR.md & add phrase in state.md to move other language md file
* translate blocks_and_headers.md && chain_sync.md in Korean
* add . in chain_sync.md , translation finished in doc/chain dir.
* fix some miss typos
* Api documentation fixes (#2646)
* Fix the API documentation for Chain Validate (v1/chain/validate). It was documented as a POST, but it is actually a GET request, which can be seen in its handler ChainValidationHandler
* Update the API V1 route list response to include the headers and merkleproof routes. Also clarify that for the chain/outputs route you must specify either byids or byheight to select outputs.
* refactor(ci): reorganize CI related code (#2658)
Break-down the CI related code into smaller more maintainable pieces.
* Specify grin or nanogrins in API docs where applicable (#2642)
* Set Content-Type in API client (#2680)
* Reduce number of unwraps in chain crate (#2679)
* fix: the restart of state sync doesn't work sometimes (#2687)
* let check_txhashset_needed return true on abnormal case (#2684)
* Reduce number of unwwaps in api crate (#2681)
* Reduce number of unwwaps in api crate
* Format use section
* Small QoL improvements for wallet developers (#2651)
* Small changes for wallet devs
* Move create_nonce into Keychain trait
* Replace match by map_err
* Add flag to Slate to skip fee check
* Fix secp dependency
* Remove check_fee flag in Slate
* Add Japanese edition of build.md (#2697)
* catch the panic to avoid peer thread quit early (#2686)
* catch the panic to avoid peer thread quit before taking the chance to ban
* move catch wrapper logic down into the util crate
* log the panic info
* keep txhashset.rs untouched
* remove a warning
* [DOC] dandelion.md, simulation.md ,fast-sync.md and pruning.md documents translate in Korean. (#2678)
* Show response code in API client error message (#2683)
It's hard to investigate what happens when an API client error is
printed out
* Add some better logging for get_outputs_by_id failure states (#2705)
* Switch commitment doc fixes (#2645)
Fix some typos and remove the use of parentheses in a
couple of places to make the reading flow a bit better.
* docs: update/add new README.md badges (#2708)
Replace existing badges with SVG counterparts and add a bunch of new ones.
* Update intro.md (#2702)
Add mention of censoring attack prevented by range proofs
* use sandbox folder for txhashset validation on state sync (#2685)
* use sandbox folder for txhashset validation on state sync
* rustfmt
* use temp directory as the sandbox instead actual db_root txhashset dir
* rustfmt
* move txhashset overwrite to the end of full validation
* fix travis-ci test
* rustfmt
* fix: hashset have 2 folders including txhashset and header
* rustfmt
*
(1)switch to rebuild_header_mmr instead of copy the sandbox header mmr
(2)lock txhashset when overwriting and opening and rebuild
* minor improve on sandbox_dir
* add Japanese edition of state.md (#2703)
* Attempt to fix broken TUI locale (#2713)
Can confirm that on the same machine 1.0.2 TUI looks great and is broken on
the current master. Bump of `cursive` version fixed it for me.
Fixes #2676
* clean the header folder in sandbox (#2716)
* forgot to clean the header folder in sandbox in #2685
* Reduce number of unwraps in servers crate (#2707)
It doesn't include stratum server which is sufficiently changed in 1.1
branch and adapters, which is big enough for a separate PR.
* rustfmt
* change version to beta
2019-04-01 13:47:48 +03:00
|
|
|
match self.store.find_peers(state, cap, count) {
|
|
|
|
Ok(peers) => peers,
|
|
|
|
Err(e) => {
|
|
|
|
error!("failed to find peers: {:?}", e);
|
|
|
|
vec![]
|
|
|
|
}
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-01-03 04:03:44 +03:00
|
|
|
/// Get peer in store by address
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
|
2018-01-03 04:03:44 +03:00
|
|
|
self.store.get_peer(peer_addr).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Whether we've already seen a peer with the provided address
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn exists_peer(&self, peer_addr: PeerAddr) -> Result<bool, Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.store.exists_peer(peer_addr).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Saves updated information about a peer
|
|
|
|
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
|
|
|
|
self.store.save_peer(p).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Updates the state of a peer in store
|
2019-02-18 15:15:32 +03:00
|
|
|
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
|
2018-01-18 21:39:56 +03:00
|
|
|
self.store
|
|
|
|
.update_state(peer_addr, new_state)
|
|
|
|
.map_err(From::from)
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Iterate over the peer list and prune all peers we have
|
|
|
|
/// lost connection to or have been deemed problematic.
|
|
|
|
/// Also avoid connected peer count getting too high.
|
2020-09-08 10:22:19 +03:00
|
|
|
pub fn clean_peers(
|
|
|
|
&self,
|
|
|
|
max_inbound_count: usize,
|
|
|
|
max_outbound_count: usize,
|
|
|
|
preferred_peers: &[PeerAddr],
|
|
|
|
) {
|
2017-12-12 19:40:26 +03:00
|
|
|
let mut rm = vec![];
|
|
|
|
|
|
|
|
// build a list of peers to be cleaned up
|
2019-05-15 18:51:35 +03:00
|
|
|
{
|
2020-10-27 15:36:00 +03:00
|
|
|
for peer in self.iter() {
|
|
|
|
let ref peer: &Peer = peer.as_ref();
|
2019-05-15 18:51:35 +03:00
|
|
|
if peer.is_banned() {
|
|
|
|
debug!("clean_peers {:?}, peer banned", peer.info.addr);
|
|
|
|
rm.push(peer.info.addr.clone());
|
|
|
|
} else if !peer.is_connected() {
|
|
|
|
debug!("clean_peers {:?}, not connected", peer.info.addr);
|
|
|
|
rm.push(peer.info.addr.clone());
|
|
|
|
} else if peer.is_abusive() {
|
|
|
|
if let Some(counts) = peer.last_min_message_counts() {
|
|
|
|
debug!(
|
|
|
|
"clean_peers {:?}, abusive ({} sent, {} recv)",
|
|
|
|
peer.info.addr, counts.0, counts.1,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
let _ = self.update_state(peer.info.addr, State::Banned);
|
|
|
|
rm.push(peer.info.addr.clone());
|
|
|
|
} else {
|
|
|
|
let (stuck, diff) = peer.is_stuck();
|
|
|
|
match self.adapter.total_difficulty() {
|
|
|
|
Ok(total_difficulty) => {
|
|
|
|
if stuck && diff < total_difficulty {
|
|
|
|
debug!("clean_peers {:?}, stuck peer", peer.info.addr);
|
|
|
|
let _ = self.update_state(peer.info.addr, State::Defunct);
|
|
|
|
rm.push(peer.info.addr.clone());
|
|
|
|
}
|
2019-04-08 23:13:28 +03:00
|
|
|
}
|
2019-05-15 18:51:35 +03:00
|
|
|
Err(e) => error!("failed to get total difficulty: {:?}", e),
|
2019-04-08 23:13:28 +03:00
|
|
|
}
|
2018-10-16 19:14:16 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-27 15:36:00 +03:00
|
|
|
// closure to build an iterator of our inbound peers
|
|
|
|
let outbound_peers = || self.iter().outbound().connected().into_iter();
|
|
|
|
|
2019-08-21 21:58:43 +03:00
|
|
|
// check here to make sure we don't have too many outgoing connections
|
2020-10-27 15:36:00 +03:00
|
|
|
// Preferred peers are treated preferentially here.
|
|
|
|
// Also choose outbound peers with lowest total difficulty to drop.
|
|
|
|
let excess_outgoing_count = outbound_peers().count().saturating_sub(max_outbound_count);
|
2019-08-21 21:58:43 +03:00
|
|
|
if excess_outgoing_count > 0 {
|
2020-10-27 15:36:00 +03:00
|
|
|
let mut peer_infos: Vec<_> = outbound_peers()
|
|
|
|
.map(|x| x.info.clone())
|
|
|
|
.filter(|x| !preferred_peers.contains(&x.addr))
|
|
|
|
.collect();
|
|
|
|
peer_infos.sort_unstable_by_key(|x| x.total_difficulty());
|
|
|
|
let mut addrs = peer_infos
|
|
|
|
.into_iter()
|
|
|
|
.map(|x| x.addr)
|
2019-08-21 21:58:43 +03:00
|
|
|
.take(excess_outgoing_count)
|
2020-09-08 10:22:19 +03:00
|
|
|
.collect();
|
2019-08-21 21:58:43 +03:00
|
|
|
rm.append(&mut addrs);
|
|
|
|
}
|
|
|
|
|
2020-10-27 15:36:00 +03:00
|
|
|
// closure to build an iterator of our inbound peers
|
|
|
|
let inbound_peers = || self.iter().inbound().connected().into_iter();
|
|
|
|
|
2019-08-21 21:58:43 +03:00
|
|
|
// check here to make sure we don't have too many incoming connections
|
2020-10-27 15:36:00 +03:00
|
|
|
let excess_incoming_count = inbound_peers().count().saturating_sub(max_inbound_count);
|
2019-08-21 21:58:43 +03:00
|
|
|
if excess_incoming_count > 0 {
|
2020-10-27 15:36:00 +03:00
|
|
|
let mut addrs: Vec<_> = inbound_peers()
|
2020-09-08 10:22:19 +03:00
|
|
|
.filter(|x| !preferred_peers.contains(&x.info.addr))
|
2019-08-21 21:58:43 +03:00
|
|
|
.take(excess_incoming_count)
|
2020-02-12 21:35:33 +03:00
|
|
|
.map(|x| x.info.addr)
|
2020-09-08 10:22:19 +03:00
|
|
|
.collect();
|
2018-11-07 04:51:22 +03:00
|
|
|
rm.append(&mut addrs);
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
// now clean up peer map based on the list to remove
|
|
|
|
{
|
2019-05-15 18:51:35 +03:00
|
|
|
let mut peers = match self.peers.try_write_for(LOCK_TIMEOUT) {
|
|
|
|
Some(peers) => peers,
|
|
|
|
None => {
|
|
|
|
error!("clean_peers: failed to get peers lock");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
};
|
2019-02-18 15:15:32 +03:00
|
|
|
for addr in rm {
|
|
|
|
let _ = peers.get(&addr).map(|peer| peer.stop());
|
|
|
|
peers.remove(&addr);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-01-18 21:39:56 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn stop(&self) {
|
2018-10-20 03:13:07 +03:00
|
|
|
let mut peers = self.peers.write();
|
2019-05-30 03:03:12 +03:00
|
|
|
for peer in peers.values() {
|
|
|
|
peer.stop();
|
|
|
|
}
|
2018-02-13 03:38:52 +03:00
|
|
|
for (_, peer) in peers.drain() {
|
2019-05-30 03:03:12 +03:00
|
|
|
peer.wait();
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
2018-09-07 23:01:54 +03:00
|
|
|
|
2019-08-21 21:58:43 +03:00
|
|
|
/// We have enough outbound connected peers
|
|
|
|
pub fn enough_outbound_peers(&self) -> bool {
|
2020-10-27 15:36:00 +03:00
|
|
|
self.iter().outbound().connected().count()
|
|
|
|
>= self.config.peer_min_preferred_outbound_count() as usize
|
2018-09-07 23:01:54 +03:00
|
|
|
}
|
2018-10-22 23:59:40 +03:00
|
|
|
|
|
|
|
/// Removes those peers that seem to have expired
|
|
|
|
pub fn remove_expired(&self) {
|
|
|
|
let now = Utc::now();
|
|
|
|
|
|
|
|
// Delete defunct peers from storage
|
|
|
|
let _ = self.store.delete_peers(|peer| {
|
|
|
|
let diff = now - Utc.timestamp(peer.last_connected, 0);
|
|
|
|
|
|
|
|
let should_remove = peer.flags == State::Defunct
|
|
|
|
&& diff > Duration::seconds(global::PEER_EXPIRATION_REMOVE_TIME);
|
|
|
|
|
|
|
|
if should_remove {
|
|
|
|
debug!(
|
|
|
|
"removing peer {:?}: last connected {} days {} hours {} minutes ago.",
|
|
|
|
peer.addr,
|
|
|
|
diff.num_days(),
|
|
|
|
diff.num_hours(),
|
|
|
|
diff.num_minutes()
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
should_remove
|
|
|
|
});
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ChainAdapter for Peers {
|
2019-04-08 23:13:28 +03:00
|
|
|
fn total_difficulty(&self) -> Result<Difficulty, chain::Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.adapter.total_difficulty()
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn total_height(&self) -> Result<u64, chain::Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.adapter.total_height()
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-11-07 12:28:17 +03:00
|
|
|
fn get_transaction(&self, kernel_hash: Hash) -> Option<core::Transaction> {
|
|
|
|
self.adapter.get_transaction(kernel_hash)
|
|
|
|
}
|
|
|
|
|
2019-04-18 16:11:06 +03:00
|
|
|
fn tx_kernel_received(
|
|
|
|
&self,
|
|
|
|
kernel_hash: Hash,
|
|
|
|
peer_info: &PeerInfo,
|
|
|
|
) -> Result<bool, chain::Error> {
|
|
|
|
self.adapter.tx_kernel_received(kernel_hash, peer_info)
|
2018-11-07 12:28:17 +03:00
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn transaction_received(
|
|
|
|
&self,
|
|
|
|
tx: core::Transaction,
|
|
|
|
stem: bool,
|
|
|
|
) -> Result<bool, chain::Error> {
|
2018-03-20 06:18:54 +03:00
|
|
|
self.adapter.transaction_received(tx, stem)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn block_received(
|
|
|
|
&self,
|
|
|
|
b: core::Block,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-10-10 11:38:25 +03:00
|
|
|
opts: chain::Options,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2018-02-27 23:33:40 +03:00
|
|
|
let hash = b.hash();
|
2019-10-10 11:38:25 +03:00
|
|
|
if !self.adapter.block_received(b, peer_info, opts)? {
|
2018-01-30 17:42:04 +03:00
|
|
|
// if the peer sent us a block that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"Received a bad block {} from {}, the peer will be banned",
|
2019-04-18 16:11:06 +03:00
|
|
|
hash, peer_info.addr,
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2019-10-05 01:00:49 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadBlock)
|
|
|
|
.map_err(|e| {
|
|
|
|
let err: chain::Error =
|
|
|
|
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
|
|
|
|
err
|
|
|
|
})?;
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(false)
|
2018-01-30 17:42:04 +03:00
|
|
|
} else {
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2018-01-30 17:42:04 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn compact_block_received(
|
|
|
|
&self,
|
|
|
|
cb: core::CompactBlock,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2018-02-27 23:33:40 +03:00
|
|
|
let hash = cb.hash();
|
2019-04-18 16:11:06 +03:00
|
|
|
if !self.adapter.compact_block_received(cb, peer_info)? {
|
2018-01-31 23:39:55 +03:00
|
|
|
// if the peer sent us a block that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
"Received a bad compact block {} from {}, the peer will be banned",
|
2019-04-18 16:11:06 +03:00
|
|
|
hash, peer_info.addr
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2019-10-05 01:00:49 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadCompactBlock)
|
|
|
|
.map_err(|e| {
|
|
|
|
let err: chain::Error =
|
|
|
|
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
|
|
|
|
err
|
|
|
|
})?;
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(false)
|
2018-01-31 23:39:55 +03:00
|
|
|
} else {
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2018-01-31 23:39:55 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn header_received(
|
|
|
|
&self,
|
|
|
|
bh: core::BlockHeader,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-04-18 16:11:06 +03:00
|
|
|
if !self.adapter.header_received(bh, peer_info)? {
|
2018-01-30 17:42:04 +03:00
|
|
|
// if the peer sent us a block header that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2019-10-05 01:00:49 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadBlockHeader)
|
|
|
|
.map_err(|e| {
|
|
|
|
let err: chain::Error =
|
|
|
|
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
|
|
|
|
err
|
|
|
|
})?;
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(false)
|
2017-12-12 19:40:26 +03:00
|
|
|
} else {
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn headers_received(
|
|
|
|
&self,
|
|
|
|
headers: &[core::BlockHeader],
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-04-18 16:11:06 +03:00
|
|
|
if !self.adapter.headers_received(headers, peer_info)? {
|
2018-08-17 05:30:05 +03:00
|
|
|
// if the peer sent us a block header that's intrinsically bad
|
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2019-10-05 01:00:49 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadBlockHeader)
|
|
|
|
.map_err(|e| {
|
|
|
|
let err: chain::Error =
|
|
|
|
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
|
|
|
|
err
|
|
|
|
})?;
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(false)
|
2018-08-17 05:30:05 +03:00
|
|
|
} else {
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2018-08-17 05:30:05 +03:00
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn locate_headers(&self, hs: &[Hash]) -> Result<Vec<core::BlockHeader>, chain::Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.adapter.locate_headers(hs)
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2020-09-07 18:58:41 +03:00
|
|
|
fn get_block(&self, h: Hash, peer_info: &PeerInfo) -> Option<core::Block> {
|
|
|
|
self.adapter.get_block(h, peer_info)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
fn txhashset_read(&self, h: Hash) -> Option<TxHashSetRead> {
|
|
|
|
self.adapter.txhashset_read(h)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2019-07-23 11:46:29 +03:00
|
|
|
fn txhashset_archive_header(&self) -> Result<core::BlockHeader, chain::Error> {
|
|
|
|
self.adapter.txhashset_archive_header()
|
|
|
|
}
|
|
|
|
|
2018-07-12 19:06:52 +03:00
|
|
|
fn txhashset_receive_ready(&self) -> bool {
|
|
|
|
self.adapter.txhashset_receive_ready()
|
|
|
|
}
|
|
|
|
|
2019-04-08 23:13:28 +03:00
|
|
|
fn txhashset_write(
|
|
|
|
&self,
|
|
|
|
h: Hash,
|
|
|
|
txhashset_data: File,
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info: &PeerInfo,
|
2019-04-08 23:13:28 +03:00
|
|
|
) -> Result<bool, chain::Error> {
|
2019-08-01 19:46:06 +03:00
|
|
|
if self.adapter.txhashset_write(h, txhashset_data, peer_info)? {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
2018-10-21 23:30:56 +03:00
|
|
|
"Received a bad txhashset data from {}, the peer will be banned",
|
2019-04-18 16:11:06 +03:00
|
|
|
peer_info.addr
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2019-10-05 01:00:49 +03:00
|
|
|
self.ban_peer(peer_info.addr, ReasonForBan::BadTxHashSet)
|
|
|
|
.map_err(|e| {
|
|
|
|
let err: chain::Error =
|
|
|
|
chain::ErrorKind::Other(format!("ban peer error :{:?}", e)).into();
|
|
|
|
err
|
|
|
|
})?;
|
2019-04-08 23:13:28 +03:00
|
|
|
Ok(true)
|
2019-08-01 19:46:06 +03:00
|
|
|
} else {
|
|
|
|
Ok(false)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
|
|
|
}
|
2018-10-13 01:53:50 +03:00
|
|
|
|
|
|
|
fn txhashset_download_update(
|
|
|
|
&self,
|
|
|
|
start_time: DateTime<Utc>,
|
|
|
|
downloaded_size: u64,
|
|
|
|
total_size: u64,
|
|
|
|
) -> bool {
|
|
|
|
self.adapter
|
|
|
|
.txhashset_download_update(start_time, downloaded_size, total_size)
|
|
|
|
}
|
2019-04-23 02:54:36 +03:00
|
|
|
|
|
|
|
fn get_tmp_dir(&self) -> PathBuf {
|
|
|
|
self.adapter.get_tmp_dir()
|
|
|
|
}
|
|
|
|
|
|
|
|
fn get_tmpfile_pathname(&self, tmpfile_name: String) -> PathBuf {
|
|
|
|
self.adapter.get_tmpfile_pathname(tmpfile_name)
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetAdapter for Peers {
|
|
|
|
/// Find good peers we know with the provided capability and return their
|
|
|
|
/// addresses.
|
2019-02-18 15:15:32 +03:00
|
|
|
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<PeerAddr> {
|
2017-12-12 19:40:26 +03:00
|
|
|
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("find_peer_addrs: {} healthy peers picked", peers.len());
|
2017-12-12 19:40:26 +03:00
|
|
|
map_vec!(peers, |p| p.addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A list of peers has been received from one of our peers.
|
2019-02-18 15:15:32 +03:00
|
|
|
fn peer_addrs_received(&self, peer_addrs: Vec<PeerAddr>) {
|
2018-10-21 23:30:56 +03:00
|
|
|
trace!("Received {} peer addrs, saving.", peer_addrs.len());
|
2017-12-12 19:40:26 +03:00
|
|
|
for pa in peer_addrs {
|
|
|
|
if let Ok(e) = self.exists_peer(pa) {
|
|
|
|
if e {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let peer = PeerData {
|
|
|
|
addr: pa,
|
2018-02-01 21:14:32 +03:00
|
|
|
capabilities: Capabilities::UNKNOWN,
|
2017-12-12 19:40:26 +03:00
|
|
|
user_agent: "".to_string(),
|
|
|
|
flags: State::Healthy,
|
2018-01-18 21:39:56 +03:00
|
|
|
last_banned: 0,
|
2018-05-29 05:45:31 +03:00
|
|
|
ban_reason: ReasonForBan::None,
|
2018-10-22 23:59:40 +03:00
|
|
|
last_connected: Utc::now().timestamp(),
|
2017-12-12 19:40:26 +03:00
|
|
|
};
|
|
|
|
if let Err(e) = self.save_peer(&peer) {
|
2018-10-21 23:30:56 +03:00
|
|
|
error!("Could not save received peer address: {:?}", e);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
fn peer_difficulty(&self, addr: PeerAddr, diff: Difficulty, height: u64) {
|
|
|
|
if let Some(peer) = self.get_connected_peer(addr) {
|
2018-10-09 10:27:34 +03:00
|
|
|
peer.info.update(height, diff);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
2018-03-27 19:09:41 +03:00
|
|
|
|
2019-02-18 15:15:32 +03:00
|
|
|
fn is_banned(&self, addr: PeerAddr) -> bool {
|
2018-12-29 00:54:37 +03:00
|
|
|
if let Ok(peer) = self.get_peer(addr) {
|
|
|
|
peer.flags == State::Banned
|
2018-03-27 19:09:41 +03:00
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2020-10-27 15:36:00 +03:00
|
|
|
|
|
|
|
pub struct PeersIter<I> {
|
|
|
|
iter: I,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<I: Iterator> IntoIterator for PeersIter<I> {
|
|
|
|
type Item = I::Item;
|
|
|
|
type IntoIter = I;
|
|
|
|
|
|
|
|
fn into_iter(self) -> Self::IntoIter {
|
|
|
|
self.iter.into_iter()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<I: Iterator<Item = Arc<Peer>>> PeersIter<I> {
|
|
|
|
/// Filter peers that are currently connected.
|
|
|
|
/// Note: This adaptor takes a read lock internally.
|
|
|
|
/// So if we are chaining adaptors then defer this toward the end of the chain.
|
|
|
|
pub fn connected(self) -> PeersIter<impl Iterator<Item = Arc<Peer>>> {
|
|
|
|
PeersIter {
|
|
|
|
iter: self.iter.filter(|p| p.is_connected()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Filter inbound peers.
|
|
|
|
pub fn inbound(self) -> PeersIter<impl Iterator<Item = Arc<Peer>>> {
|
|
|
|
PeersIter {
|
|
|
|
iter: self.iter.filter(|p| p.info.is_inbound()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Filter outbound peers.
|
|
|
|
pub fn outbound(self) -> PeersIter<impl Iterator<Item = Arc<Peer>>> {
|
|
|
|
PeersIter {
|
|
|
|
iter: self.iter.filter(|p| p.info.is_outbound()),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Filter peers with the provided difficulty comparison fn.
|
|
|
|
///
|
|
|
|
/// with_difficulty(|x| x > diff)
|
|
|
|
///
|
|
|
|
/// Note: This adaptor takes a read lock internally for each peer.
|
|
|
|
/// So if we are chaining adaptors then put this toward later in the chain.
|
|
|
|
pub fn with_difficulty<F>(self, f: F) -> PeersIter<impl Iterator<Item = Arc<Peer>>>
|
|
|
|
where
|
|
|
|
F: Fn(Difficulty) -> bool,
|
|
|
|
{
|
|
|
|
PeersIter {
|
|
|
|
iter: self.iter.filter(move |p| f(p.info.total_difficulty())),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Filter peers that support the provided capabilities.
|
|
|
|
pub fn with_capabilities(
|
|
|
|
self,
|
|
|
|
cap: Capabilities,
|
|
|
|
) -> PeersIter<impl Iterator<Item = Arc<Peer>>> {
|
|
|
|
PeersIter {
|
|
|
|
iter: self.iter.filter(move |p| p.info.capabilities.contains(cap)),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn by_addr(&mut self, addr: PeerAddr) -> Option<Arc<Peer>> {
|
|
|
|
self.iter.find(|p| p.info.addr == addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Choose a random peer from the current (filtered) peers.
|
|
|
|
pub fn choose_random(self) -> Option<Arc<Peer>> {
|
|
|
|
let mut rng = rand::thread_rng();
|
|
|
|
self.iter.choose(&mut rng)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Find the max difficulty of the current (filtered) peers.
|
|
|
|
pub fn max_difficulty(self) -> Option<Difficulty> {
|
|
|
|
self.iter.map(|p| p.info.total_difficulty()).max()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Count the current (filtered) peers.
|
|
|
|
pub fn count(self) -> usize {
|
|
|
|
self.iter.count()
|
|
|
|
}
|
|
|
|
}
|