2018-03-05 22:33:44 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2017-12-12 19:40:26 +03:00
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
use std::collections::HashMap;
|
2018-02-10 01:32:16 +03:00
|
|
|
use std::fs::File;
|
2017-12-12 19:40:26 +03:00
|
|
|
use std::net::SocketAddr;
|
2018-02-17 20:56:22 +03:00
|
|
|
use std::sync::{Arc, RwLock};
|
2017-12-12 19:40:26 +03:00
|
|
|
|
|
|
|
use rand::{thread_rng, Rng};
|
|
|
|
|
2018-08-17 05:30:05 +03:00
|
|
|
use chrono::prelude::Utc;
|
2017-12-12 19:40:26 +03:00
|
|
|
use core::core;
|
2018-01-30 17:42:04 +03:00
|
|
|
use core::core::hash::{Hash, Hashed};
|
2018-09-19 01:12:57 +03:00
|
|
|
use core::pow::Difficulty;
|
2018-05-29 05:45:31 +03:00
|
|
|
use util::LOGGER;
|
2017-12-12 19:40:26 +03:00
|
|
|
|
|
|
|
use peer::Peer;
|
2018-03-04 03:19:54 +03:00
|
|
|
use store::{PeerData, PeerStore, State};
|
2018-08-01 12:44:07 +03:00
|
|
|
use types::{
|
|
|
|
Capabilities, ChainAdapter, Direction, Error, NetAdapter, P2PConfig, ReasonForBan,
|
|
|
|
TxHashSetRead, MAX_PEER_ADDRS,
|
|
|
|
};
|
2017-12-12 19:40:26 +03:00
|
|
|
|
|
|
|
pub struct Peers {
|
|
|
|
pub adapter: Arc<ChainAdapter>,
|
2018-02-13 03:38:52 +03:00
|
|
|
store: PeerStore,
|
|
|
|
peers: RwLock<HashMap<SocketAddr, Arc<RwLock<Peer>>>>,
|
2018-03-20 06:18:54 +03:00
|
|
|
dandelion_relay: RwLock<HashMap<i64, Arc<RwLock<Peer>>>>,
|
2018-09-07 23:01:54 +03:00
|
|
|
config: P2PConfig,
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
unsafe impl Send for Peers {}
|
|
|
|
unsafe impl Sync for Peers {}
|
|
|
|
|
|
|
|
impl Peers {
|
2018-09-07 23:01:54 +03:00
|
|
|
pub fn new(store: PeerStore, adapter: Arc<ChainAdapter>, config: P2PConfig) -> Peers {
|
2017-12-12 19:40:26 +03:00
|
|
|
Peers {
|
2018-01-31 00:44:13 +03:00
|
|
|
adapter,
|
2018-02-13 03:38:52 +03:00
|
|
|
store,
|
2018-09-07 23:01:54 +03:00
|
|
|
config,
|
2018-02-13 03:38:52 +03:00
|
|
|
peers: RwLock::new(HashMap::new()),
|
2018-03-20 06:18:54 +03:00
|
|
|
dandelion_relay: RwLock::new(HashMap::new()),
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Adds the peer to our internal peer mapping. Note that the peer is still
|
|
|
|
/// returned so the server can run it.
|
2018-08-21 01:32:13 +03:00
|
|
|
pub fn add_connected(&self, p: Peer) -> Result<Arc<RwLock<Peer>>, Error> {
|
2017-12-12 19:40:26 +03:00
|
|
|
debug!(LOGGER, "Saving newly connected peer {}.", p.info.addr);
|
|
|
|
let peer_data = PeerData {
|
|
|
|
addr: p.info.addr,
|
|
|
|
capabilities: p.info.capabilities,
|
|
|
|
user_agent: p.info.user_agent.clone(),
|
|
|
|
flags: State::Healthy,
|
2018-01-18 21:39:56 +03:00
|
|
|
last_banned: 0,
|
2018-05-29 05:45:31 +03:00
|
|
|
ban_reason: ReasonForBan::None,
|
2017-12-12 19:40:26 +03:00
|
|
|
};
|
2018-08-21 01:32:13 +03:00
|
|
|
self.save_peer(&peer_data)?;
|
2017-12-12 19:40:26 +03:00
|
|
|
|
|
|
|
let addr = p.info.addr.clone();
|
|
|
|
let apeer = Arc::new(RwLock::new(p));
|
|
|
|
{
|
|
|
|
let mut peers = self.peers.write().unwrap();
|
|
|
|
peers.insert(addr, apeer.clone());
|
|
|
|
}
|
2018-08-21 01:32:13 +03:00
|
|
|
Ok(apeer)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-03-20 06:18:54 +03:00
|
|
|
// Update the dandelion relay
|
|
|
|
pub fn update_dandelion_relay(&self) {
|
|
|
|
let peers = self.outgoing_connected_peers();
|
|
|
|
|
|
|
|
match thread_rng().choose(&peers) {
|
|
|
|
Some(peer) => {
|
|
|
|
// Clear the map and add new relay
|
|
|
|
let dandelion_relay = &self.dandelion_relay;
|
|
|
|
dandelion_relay.write().unwrap().clear();
|
|
|
|
dandelion_relay
|
|
|
|
.write()
|
|
|
|
.unwrap()
|
2018-07-30 11:33:28 +03:00
|
|
|
.insert(Utc::now().timestamp(), peer.clone());
|
2018-03-20 06:18:54 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Successfully updated Dandelion relay to: {}",
|
|
|
|
peer.try_read().unwrap().info.addr
|
|
|
|
);
|
|
|
|
}
|
2018-03-23 01:51:09 +03:00
|
|
|
None => debug!(LOGGER, "Could not update dandelion relay"),
|
2018-03-20 06:18:54 +03:00
|
|
|
};
|
|
|
|
}
|
2018-05-30 23:57:13 +03:00
|
|
|
|
2018-03-20 06:18:54 +03:00
|
|
|
// Get the dandelion relay
|
|
|
|
pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<RwLock<Peer>>> {
|
2018-05-30 23:57:13 +03:00
|
|
|
self.dandelion_relay.read().unwrap().clone()
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn is_known(&self, addr: &SocketAddr) -> bool {
|
2018-08-21 01:32:13 +03:00
|
|
|
self.peers.read().unwrap().contains_key(addr)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2017-12-14 23:33:22 +03:00
|
|
|
/// Get vec of peers we are currently connected to.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn connected_peers(&self) -> Vec<Arc<RwLock<Peer>>> {
|
2018-08-28 12:37:08 +03:00
|
|
|
let mut res = self
|
|
|
|
.peers
|
2018-01-18 21:39:56 +03:00
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.values()
|
2018-08-23 04:31:06 +03:00
|
|
|
.filter(|p| p.read().unwrap().is_connected())
|
2018-01-18 21:39:56 +03:00
|
|
|
.cloned()
|
|
|
|
.collect::<Vec<_>>();
|
2017-12-14 23:33:22 +03:00
|
|
|
thread_rng().shuffle(&mut res);
|
|
|
|
res
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-03-20 06:18:54 +03:00
|
|
|
pub fn outgoing_connected_peers(&self) -> Vec<Arc<RwLock<Peer>>> {
|
|
|
|
let peers = self.connected_peers();
|
|
|
|
let res = peers
|
2018-08-21 01:32:13 +03:00
|
|
|
.into_iter()
|
2018-03-20 06:18:54 +03:00
|
|
|
.filter(|x| match x.try_read() {
|
|
|
|
Ok(peer) => peer.info.direction == Direction::Outbound,
|
|
|
|
Err(_) => false,
|
2018-09-18 20:51:37 +03:00
|
|
|
}).collect::<Vec<_>>();
|
2018-03-20 06:18:54 +03:00
|
|
|
res
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Get a peer we're connected to by address.
|
2018-01-03 04:03:44 +03:00
|
|
|
pub fn get_connected_peer(&self, addr: &SocketAddr) -> Option<Arc<RwLock<Peer>>> {
|
2017-12-12 19:40:26 +03:00
|
|
|
self.peers.read().unwrap().get(addr).map(|p| p.clone())
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Number of peers we're currently connected to.
|
|
|
|
pub fn peer_count(&self) -> u32 {
|
2018-08-23 04:31:06 +03:00
|
|
|
self.peers
|
|
|
|
.read()
|
|
|
|
.unwrap()
|
|
|
|
.values()
|
|
|
|
.filter(|p| p.read().unwrap().is_connected())
|
|
|
|
.count() as u32
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-01-18 21:39:56 +03:00
|
|
|
// Return vec of connected peers that currently advertise more work
|
|
|
|
// (total_difficulty) than we do.
|
2017-12-29 03:49:27 +03:00
|
|
|
pub fn more_work_peers(&self) -> Vec<Arc<RwLock<Peer>>> {
|
|
|
|
let peers = self.connected_peers();
|
|
|
|
if peers.len() == 0 {
|
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
|
|
|
|
let total_difficulty = self.total_difficulty();
|
|
|
|
|
|
|
|
let mut max_peers = peers
|
2018-08-21 01:32:13 +03:00
|
|
|
.into_iter()
|
2018-01-18 21:39:56 +03:00
|
|
|
.filter(|x| match x.try_read() {
|
|
|
|
Ok(peer) => peer.info.total_difficulty > total_difficulty,
|
|
|
|
Err(_) => false,
|
2018-09-18 20:51:37 +03:00
|
|
|
}).collect::<Vec<_>>();
|
2017-12-29 03:49:27 +03:00
|
|
|
|
|
|
|
thread_rng().shuffle(&mut max_peers);
|
|
|
|
max_peers
|
|
|
|
}
|
|
|
|
|
2018-04-06 06:06:34 +03:00
|
|
|
// Return vec of connected peers that currently advertise more work
|
|
|
|
// (total_difficulty) than we do and are also full archival nodes.
|
|
|
|
pub fn more_work_archival_peers(&self) -> Vec<Arc<RwLock<Peer>>> {
|
|
|
|
let peers = self.connected_peers();
|
|
|
|
if peers.len() == 0 {
|
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
|
|
|
|
let total_difficulty = self.total_difficulty();
|
|
|
|
|
|
|
|
let mut max_peers = peers
|
2018-08-21 01:32:13 +03:00
|
|
|
.into_iter()
|
2018-04-06 06:06:34 +03:00
|
|
|
.filter(|x| match x.try_read() {
|
|
|
|
Ok(peer) => {
|
|
|
|
peer.info.total_difficulty > total_difficulty
|
|
|
|
&& peer.info.capabilities.contains(Capabilities::FULL_HIST)
|
|
|
|
}
|
|
|
|
Err(_) => false,
|
2018-09-18 20:51:37 +03:00
|
|
|
}).collect::<Vec<_>>();
|
2018-04-06 06:06:34 +03:00
|
|
|
|
|
|
|
thread_rng().shuffle(&mut max_peers);
|
|
|
|
max_peers
|
|
|
|
}
|
|
|
|
|
2017-12-29 03:49:27 +03:00
|
|
|
/// Returns single random peer with more work than us.
|
|
|
|
pub fn more_work_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
2018-08-21 01:32:13 +03:00
|
|
|
self.more_work_peers().pop()
|
2017-12-29 03:49:27 +03:00
|
|
|
}
|
|
|
|
|
2018-04-06 06:06:34 +03:00
|
|
|
/// Returns single random archival peer with more work than us.
|
|
|
|
pub fn more_work_archival_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
2018-08-21 01:32:13 +03:00
|
|
|
self.more_work_archival_peers().pop()
|
2018-04-06 06:06:34 +03:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Return vec of connected peers that currently have the most worked
|
|
|
|
/// branch, showing the highest total difficulty.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn most_work_peers(&self) -> Vec<Arc<RwLock<Peer>>> {
|
|
|
|
let peers = self.connected_peers();
|
|
|
|
if peers.len() == 0 {
|
|
|
|
return vec![];
|
|
|
|
}
|
|
|
|
|
|
|
|
let max_total_difficulty = peers
|
|
|
|
.iter()
|
2018-01-18 21:39:56 +03:00
|
|
|
.map(|x| match x.try_read() {
|
|
|
|
Ok(peer) => peer.info.total_difficulty.clone(),
|
|
|
|
Err(_) => Difficulty::zero(),
|
2018-09-18 20:51:37 +03:00
|
|
|
}).max()
|
2017-12-12 19:40:26 +03:00
|
|
|
.unwrap();
|
|
|
|
|
|
|
|
let mut max_peers = peers
|
2018-08-21 01:32:13 +03:00
|
|
|
.into_iter()
|
2018-01-18 21:39:56 +03:00
|
|
|
.filter(|x| match x.try_read() {
|
|
|
|
Ok(peer) => peer.info.total_difficulty == max_total_difficulty,
|
|
|
|
Err(_) => false,
|
2018-09-18 20:51:37 +03:00
|
|
|
}).collect::<Vec<_>>();
|
2017-12-12 19:40:26 +03:00
|
|
|
|
|
|
|
thread_rng().shuffle(&mut max_peers);
|
|
|
|
max_peers
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Returns single random peer with the most worked branch, showing the
|
|
|
|
/// highest total difficulty.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn most_work_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
2018-08-21 01:32:13 +03:00
|
|
|
self.most_work_peers().pop()
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
pub fn is_banned(&self, peer_addr: SocketAddr) -> bool {
|
|
|
|
if let Ok(peer_data) = self.store.get_peer(peer_addr) {
|
|
|
|
if peer_data.flags == State::Banned {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Ban a peer, disconnecting it if we're currently connected
|
|
|
|
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
|
2018-08-21 01:32:13 +03:00
|
|
|
if let Err(e) = self.update_state(*peer_addr, State::Banned) {
|
2017-12-12 19:40:26 +03:00
|
|
|
error!(LOGGER, "Couldn't ban {}: {:?}", peer_addr, e);
|
|
|
|
}
|
|
|
|
|
2018-01-03 04:03:44 +03:00
|
|
|
if let Some(peer) = self.get_connected_peer(peer_addr) {
|
2017-12-12 19:40:26 +03:00
|
|
|
debug!(LOGGER, "Banning peer {}", peer_addr);
|
|
|
|
// setting peer status will get it removed at the next clean_peer
|
|
|
|
let peer = peer.write().unwrap();
|
2018-05-29 05:45:31 +03:00
|
|
|
peer.send_ban_reason(ban_reason);
|
2017-12-12 19:40:26 +03:00
|
|
|
peer.set_banned();
|
|
|
|
peer.stop();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Unban a peer, checks if it exists and banned then unban
|
2018-01-04 06:25:14 +03:00
|
|
|
pub fn unban_peer(&self, peer_addr: &SocketAddr) {
|
2018-08-21 01:32:13 +03:00
|
|
|
match self.get_peer(*peer_addr) {
|
2018-01-04 06:25:14 +03:00
|
|
|
Ok(_) => {
|
2018-08-21 01:32:13 +03:00
|
|
|
if self.is_banned(*peer_addr) {
|
|
|
|
if let Err(e) = self.update_state(*peer_addr, State::Healthy) {
|
2018-08-23 04:31:06 +03:00
|
|
|
error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e);
|
2018-01-04 06:25:14 +03:00
|
|
|
}
|
|
|
|
} else {
|
2018-08-23 04:31:06 +03:00
|
|
|
error!(LOGGER, "Couldn't unban {}: peer is not banned", peer_addr);
|
2018-01-04 06:25:14 +03:00
|
|
|
}
|
2018-01-18 21:39:56 +03:00
|
|
|
}
|
|
|
|
Err(e) => error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e),
|
2018-01-04 06:25:14 +03:00
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2018-08-21 01:32:13 +03:00
|
|
|
fn broadcast<F>(&self, obj_name: &str, f: F) -> u8
|
|
|
|
where
|
|
|
|
F: Fn(&Peer) -> Result<(), Error>,
|
|
|
|
{
|
2017-12-12 19:40:26 +03:00
|
|
|
let peers = self.connected_peers();
|
2017-12-19 00:18:36 +03:00
|
|
|
let preferred_peers = 8;
|
2017-12-12 19:40:26 +03:00
|
|
|
let mut count = 0;
|
2017-12-19 00:18:36 +03:00
|
|
|
for p in peers.iter().take(preferred_peers) {
|
2017-12-12 19:40:26 +03:00
|
|
|
let p = p.read().unwrap();
|
|
|
|
if p.is_connected() {
|
2018-08-21 01:32:13 +03:00
|
|
|
if let Err(e) = f(&p) {
|
|
|
|
debug!(LOGGER, "Error sending {} to peer: {:?}", obj_name, e);
|
2017-12-12 19:40:26 +03:00
|
|
|
} else {
|
|
|
|
count += 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-08-21 01:32:13 +03:00
|
|
|
count
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Broadcasts the provided compact block to PEER_PREFERRED_COUNT of our peers.
|
|
|
|
/// We may be connected to PEER_MAX_COUNT peers so we only
|
|
|
|
/// want to broadcast to a random subset of peers.
|
|
|
|
/// A peer implementation may drop the broadcast request
|
|
|
|
/// if it knows the remote peer already has the block.
|
2018-01-31 23:39:55 +03:00
|
|
|
pub fn broadcast_compact_block(&self, b: &core::CompactBlock) {
|
2018-08-21 01:32:13 +03:00
|
|
|
let count = self.broadcast("compact block", |p| p.send_compact_block(b));
|
2018-01-31 23:39:55 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
|
|
|
|
b.hash(),
|
2018-09-11 01:36:57 +03:00
|
|
|
b.header.pow.total_difficulty,
|
2018-01-31 23:39:55 +03:00
|
|
|
b.header.height,
|
|
|
|
count,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2018-08-21 01:32:13 +03:00
|
|
|
/// Broadcasts the provided header to PEER_PREFERRED_COUNT of our peers.
|
2018-01-30 17:42:04 +03:00
|
|
|
/// We may be connected to PEER_MAX_COUNT peers so we only
|
|
|
|
/// want to broadcast to a random subset of peers.
|
|
|
|
/// A peer implementation may drop the broadcast request
|
2018-08-21 01:32:13 +03:00
|
|
|
/// if it knows the remote peer already has the header.
|
2018-01-30 17:42:04 +03:00
|
|
|
pub fn broadcast_header(&self, bh: &core::BlockHeader) {
|
2018-08-21 01:32:13 +03:00
|
|
|
let count = self.broadcast("header", |p| p.send_header(bh));
|
2018-03-25 19:41:12 +03:00
|
|
|
trace!(
|
2018-01-30 17:42:04 +03:00
|
|
|
LOGGER,
|
|
|
|
"broadcast_header: {}, {} at {}, to {} peers, done.",
|
|
|
|
bh.hash(),
|
2018-09-11 01:36:57 +03:00
|
|
|
bh.pow.total_difficulty,
|
2018-01-30 17:42:04 +03:00
|
|
|
bh.height,
|
|
|
|
count,
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2018-03-20 06:18:54 +03:00
|
|
|
/// Broadcasts the provided stem transaction to our peer relay.
|
2018-05-30 23:57:13 +03:00
|
|
|
pub fn broadcast_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
2018-03-20 06:18:54 +03:00
|
|
|
let dandelion_relay = self.get_dandelion_relay();
|
|
|
|
if dandelion_relay.is_empty() {
|
2018-05-30 23:57:13 +03:00
|
|
|
debug!(LOGGER, "No dandelion relay, updating.");
|
2018-03-20 06:18:54 +03:00
|
|
|
self.update_dandelion_relay();
|
|
|
|
}
|
2018-05-30 23:57:13 +03:00
|
|
|
// If still return an error, let the caller handle this as they see fit.
|
|
|
|
// The caller will "fluff" at this point as the stem phase is finished.
|
2018-03-20 06:18:54 +03:00
|
|
|
if dandelion_relay.is_empty() {
|
2018-05-30 23:57:13 +03:00
|
|
|
return Err(Error::NoDandelionRelay);
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
for relay in dandelion_relay.values() {
|
|
|
|
let relay = relay.read().unwrap();
|
|
|
|
if relay.is_connected() {
|
|
|
|
if let Err(e) = relay.send_stem_transaction(tx) {
|
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Error sending stem transaction to peer relay: {:?}", e
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-05-30 23:57:13 +03:00
|
|
|
Ok(())
|
2018-03-20 06:18:54 +03:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Broadcasts the provided transaction to PEER_PREFERRED_COUNT of our
|
|
|
|
/// peers. We may be connected to PEER_MAX_COUNT peers so we only
|
2017-12-14 23:33:22 +03:00
|
|
|
/// want to broadcast to a random subset of peers.
|
|
|
|
/// A peer implementation may drop the broadcast request
|
|
|
|
/// if it knows the remote peer already has the transaction.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn broadcast_transaction(&self, tx: &core::Transaction) {
|
2018-08-21 01:32:13 +03:00
|
|
|
let count = self.broadcast("transaction", |p| p.send_transaction(tx));
|
|
|
|
trace!(
|
|
|
|
LOGGER,
|
|
|
|
"broadcast_transaction: {}, to {} peers, done.",
|
|
|
|
tx.hash(),
|
|
|
|
count,
|
|
|
|
);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-05-29 05:45:31 +03:00
|
|
|
/// Ping all our connected peers. Always automatically expects a pong back
|
|
|
|
/// or disconnects. This acts as a liveness test.
|
2017-12-12 19:40:26 +03:00
|
|
|
pub fn check_all(&self, total_difficulty: Difficulty, height: u64) {
|
|
|
|
let peers_map = self.peers.read().unwrap();
|
|
|
|
for p in peers_map.values() {
|
|
|
|
let p = p.read().unwrap();
|
|
|
|
if p.is_connected() {
|
2018-08-21 01:32:13 +03:00
|
|
|
let _ = p.send_ping(total_difficulty, height);
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// All peer information we have in storage
|
|
|
|
pub fn all_peers(&self) -> Vec<PeerData> {
|
|
|
|
self.store.all_peers()
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Find peers in store (not necessarily connected) and return their data
|
|
|
|
pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> {
|
|
|
|
self.store.find_peers(state, cap, count)
|
|
|
|
}
|
|
|
|
|
2018-01-03 04:03:44 +03:00
|
|
|
/// Get peer in store by address
|
|
|
|
pub fn get_peer(&self, peer_addr: SocketAddr) -> Result<PeerData, Error> {
|
|
|
|
self.store.get_peer(peer_addr).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Whether we've already seen a peer with the provided address
|
|
|
|
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
|
|
|
|
self.store.exists_peer(peer_addr).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Saves updated information about a peer
|
|
|
|
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
|
|
|
|
self.store.save_peer(p).map_err(From::from)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Updates the state of a peer in store
|
|
|
|
pub fn update_state(&self, peer_addr: SocketAddr, new_state: State) -> Result<(), Error> {
|
2018-01-18 21:39:56 +03:00
|
|
|
self.store
|
|
|
|
.update_state(peer_addr, new_state)
|
|
|
|
.map_err(From::from)
|
|
|
|
}
|
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
/// Iterate over the peer list and prune all peers we have
|
|
|
|
/// lost connection to or have been deemed problematic.
|
|
|
|
/// Also avoid connected peer count getting too high.
|
2017-12-14 20:23:35 +03:00
|
|
|
pub fn clean_peers(&self, max_count: usize) {
|
2017-12-12 19:40:26 +03:00
|
|
|
let mut rm = vec![];
|
|
|
|
|
|
|
|
// build a list of peers to be cleaned up
|
2018-09-13 19:53:35 +03:00
|
|
|
for peer in self.peers.read().unwrap().values() {
|
2017-12-12 19:40:26 +03:00
|
|
|
let peer_inner = peer.read().unwrap();
|
|
|
|
if peer_inner.is_banned() {
|
2018-09-13 19:53:35 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"clean_peers {:?}, peer banned", peer_inner.info.addr
|
|
|
|
);
|
2017-12-12 19:40:26 +03:00
|
|
|
rm.push(peer.clone());
|
|
|
|
} else if !peer_inner.is_connected() {
|
2018-09-13 19:53:35 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"clean_peers {:?}, not connected", peer_inner.info.addr
|
|
|
|
);
|
2017-12-12 19:40:26 +03:00
|
|
|
rm.push(peer.clone());
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// now clean up peer map based on the list to remove
|
|
|
|
{
|
|
|
|
let mut peers = self.peers.write().unwrap();
|
2018-08-21 01:32:13 +03:00
|
|
|
for p in rm {
|
2017-12-12 19:40:26 +03:00
|
|
|
let p = p.read().unwrap();
|
|
|
|
peers.remove(&p.info.addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// ensure we do not have too many connected peers
|
|
|
|
let excess_count = {
|
2018-08-21 01:32:13 +03:00
|
|
|
let peer_count = self.peer_count() as usize;
|
2017-12-14 20:23:35 +03:00
|
|
|
if peer_count > max_count {
|
|
|
|
peer_count - max_count
|
2017-12-12 19:40:26 +03:00
|
|
|
} else {
|
|
|
|
0
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// map peers to addrs in a block to bound how long we keep the read lock for
|
|
|
|
let addrs = {
|
2018-01-18 21:39:56 +03:00
|
|
|
self.connected_peers()
|
|
|
|
.iter()
|
|
|
|
.map(|x| {
|
|
|
|
let p = x.read().unwrap();
|
|
|
|
p.info.addr.clone()
|
2018-09-18 20:51:37 +03:00
|
|
|
}).collect::<Vec<_>>()
|
2017-12-12 19:40:26 +03:00
|
|
|
};
|
|
|
|
|
|
|
|
// now remove them taking a short-lived write lock each time
|
|
|
|
// maybe better to take write lock once and remove them all?
|
2018-01-18 21:39:56 +03:00
|
|
|
for x in addrs.iter().take(excess_count) {
|
|
|
|
let mut peers = self.peers.write().unwrap();
|
|
|
|
peers.remove(x);
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
2018-02-13 03:38:52 +03:00
|
|
|
pub fn stop(&self) {
|
|
|
|
let mut peers = self.peers.write().unwrap();
|
|
|
|
for (_, peer) in peers.drain() {
|
2017-12-12 19:40:26 +03:00
|
|
|
let peer = peer.read().unwrap();
|
|
|
|
peer.stop();
|
|
|
|
}
|
|
|
|
}
|
2018-09-07 23:01:54 +03:00
|
|
|
|
|
|
|
pub fn enough_peers(&self) -> bool {
|
|
|
|
self.connected_peers().len() >= self.config.peer_min_preferred_count() as usize
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl ChainAdapter for Peers {
|
|
|
|
fn total_difficulty(&self) -> Difficulty {
|
|
|
|
self.adapter.total_difficulty()
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
fn total_height(&self) -> u64 {
|
|
|
|
self.adapter.total_height()
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-03-20 06:18:54 +03:00
|
|
|
fn transaction_received(&self, tx: core::Transaction, stem: bool) {
|
|
|
|
self.adapter.transaction_received(tx, stem)
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
fn block_received(&self, b: core::Block, peer_addr: SocketAddr) -> bool {
|
2018-02-27 23:33:40 +03:00
|
|
|
let hash = b.hash();
|
2017-12-12 19:40:26 +03:00
|
|
|
if !self.adapter.block_received(b, peer_addr) {
|
2018-01-30 17:42:04 +03:00
|
|
|
// if the peer sent us a block that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Received a bad block {} from {}, the peer will be banned", hash, peer_addr
|
|
|
|
);
|
2018-05-29 05:45:31 +03:00
|
|
|
self.ban_peer(&peer_addr, ReasonForBan::BadBlock);
|
2018-01-30 17:42:04 +03:00
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-01-31 23:39:55 +03:00
|
|
|
fn compact_block_received(&self, cb: core::CompactBlock, peer_addr: SocketAddr) -> bool {
|
2018-02-27 23:33:40 +03:00
|
|
|
let hash = cb.hash();
|
2018-01-31 23:39:55 +03:00
|
|
|
if !self.adapter.compact_block_received(cb, peer_addr) {
|
|
|
|
// if the peer sent us a block that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
|
|
|
"Received a bad compact block {} from {}, the peer will be banned",
|
|
|
|
hash,
|
|
|
|
&peer_addr
|
|
|
|
);
|
2018-05-29 05:45:31 +03:00
|
|
|
self.ban_peer(&peer_addr, ReasonForBan::BadCompactBlock);
|
2018-01-31 23:39:55 +03:00
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-01-30 17:42:04 +03:00
|
|
|
fn header_received(&self, bh: core::BlockHeader, peer_addr: SocketAddr) -> bool {
|
|
|
|
if !self.adapter.header_received(bh, peer_addr) {
|
|
|
|
// if the peer sent us a block header that's intrinsically bad
|
2018-06-13 19:03:34 +03:00
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
2018-05-29 05:45:31 +03:00
|
|
|
self.ban_peer(&peer_addr, ReasonForBan::BadBlockHeader);
|
2017-12-12 19:40:26 +03:00
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-08-17 05:30:05 +03:00
|
|
|
fn headers_received(&self, headers: Vec<core::BlockHeader>, peer_addr: SocketAddr) -> bool {
|
|
|
|
if !self.adapter.headers_received(headers, peer_addr) {
|
|
|
|
// if the peer sent us a block header that's intrinsically bad
|
|
|
|
// they are either mistaken or malevolent, both of which require a ban
|
|
|
|
self.ban_peer(&peer_addr, ReasonForBan::BadBlockHeader);
|
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
fn locate_headers(&self, hs: Vec<Hash>) -> Vec<core::BlockHeader> {
|
|
|
|
self.adapter.locate_headers(hs)
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2017-12-12 19:40:26 +03:00
|
|
|
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
|
|
|
self.adapter.get_block(h)
|
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-03-05 22:33:44 +03:00
|
|
|
fn txhashset_read(&self, h: Hash) -> Option<TxHashSetRead> {
|
|
|
|
self.adapter.txhashset_read(h)
|
2018-02-10 01:32:16 +03:00
|
|
|
}
|
2018-05-02 04:39:22 +03:00
|
|
|
|
2018-07-12 19:06:52 +03:00
|
|
|
fn txhashset_receive_ready(&self) -> bool {
|
|
|
|
self.adapter.txhashset_receive_ready()
|
|
|
|
}
|
|
|
|
|
2018-08-01 12:44:07 +03:00
|
|
|
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
|
|
|
|
if !self.adapter.txhashset_write(h, txhashset_data, peer_addr) {
|
2018-03-04 03:19:54 +03:00
|
|
|
debug!(
|
|
|
|
LOGGER,
|
2018-03-05 22:33:44 +03:00
|
|
|
"Received a bad txhashset data from {}, the peer will be banned", &peer_addr
|
2018-03-04 03:19:54 +03:00
|
|
|
);
|
2018-05-29 05:45:31 +03:00
|
|
|
self.ban_peer(&peer_addr, ReasonForBan::BadTxHashSet);
|
2018-02-10 01:32:16 +03:00
|
|
|
false
|
|
|
|
} else {
|
|
|
|
true
|
|
|
|
}
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
impl NetAdapter for Peers {
|
|
|
|
/// Find good peers we know with the provided capability and return their
|
|
|
|
/// addresses.
|
|
|
|
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
|
|
|
|
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
|
2018-04-06 06:06:34 +03:00
|
|
|
trace!(
|
|
|
|
LOGGER,
|
|
|
|
"find_peer_addrs: {} healthy peers picked",
|
|
|
|
peers.len()
|
|
|
|
);
|
2017-12-12 19:40:26 +03:00
|
|
|
map_vec!(peers, |p| p.addr)
|
|
|
|
}
|
|
|
|
|
|
|
|
/// A list of peers has been received from one of our peers.
|
|
|
|
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
|
2018-03-25 19:41:12 +03:00
|
|
|
trace!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len());
|
2017-12-12 19:40:26 +03:00
|
|
|
for pa in peer_addrs {
|
|
|
|
if let Ok(e) = self.exists_peer(pa) {
|
|
|
|
if e {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
let peer = PeerData {
|
|
|
|
addr: pa,
|
2018-02-01 21:14:32 +03:00
|
|
|
capabilities: Capabilities::UNKNOWN,
|
2017-12-12 19:40:26 +03:00
|
|
|
user_agent: "".to_string(),
|
|
|
|
flags: State::Healthy,
|
2018-01-18 21:39:56 +03:00
|
|
|
last_banned: 0,
|
2018-05-29 05:45:31 +03:00
|
|
|
ban_reason: ReasonForBan::None,
|
2017-12-12 19:40:26 +03:00
|
|
|
};
|
|
|
|
if let Err(e) = self.save_peer(&peer) {
|
|
|
|
error!(LOGGER, "Could not save received peer address: {:?}", e);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn peer_difficulty(&self, addr: SocketAddr, diff: Difficulty, height: u64) {
|
2018-03-28 00:33:13 +03:00
|
|
|
if diff != self.total_difficulty() || height != self.total_height() {
|
2018-06-13 23:58:45 +03:00
|
|
|
trace!(
|
2018-03-28 00:33:13 +03:00
|
|
|
LOGGER,
|
|
|
|
"ping/pong: {}: {} @ {} vs us: {} @ {}",
|
|
|
|
addr,
|
|
|
|
diff,
|
|
|
|
height,
|
|
|
|
self.total_difficulty(),
|
|
|
|
self.total_height()
|
|
|
|
);
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
|
2018-06-01 22:41:26 +03:00
|
|
|
if diff.to_num() > 0 {
|
2018-01-03 04:03:44 +03:00
|
|
|
if let Some(peer) = self.get_connected_peer(&addr) {
|
2017-12-12 19:40:26 +03:00
|
|
|
let mut peer = peer.write().unwrap();
|
|
|
|
peer.info.total_difficulty = diff;
|
2018-03-29 14:29:17 +03:00
|
|
|
peer.info.height = height;
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2018-03-27 19:09:41 +03:00
|
|
|
|
|
|
|
fn is_banned(&self, addr: SocketAddr) -> bool {
|
|
|
|
if let Some(peer) = self.get_connected_peer(&addr) {
|
2018-03-27 21:06:54 +03:00
|
|
|
let peer = peer.write().unwrap();
|
2018-03-27 19:09:41 +03:00
|
|
|
peer.is_banned()
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
2017-12-12 19:40:26 +03:00
|
|
|
}
|