mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-21 03:21:08 +03:00
Refactor and optimise peers.rs (#1389)
* Use internal hashmap for count and contains methods instead of relying on connected_peers method which is expensive (creates vector from hashmap) * Reduce number of `clone()` * Refactor broadcast_xxx
This commit is contained in:
parent
8c820a8929
commit
631201358f
2 changed files with 60 additions and 94 deletions
150
p2p/src/peers.rs
150
p2p/src/peers.rs
|
@ -54,7 +54,7 @@ impl Peers {
|
||||||
|
|
||||||
/// Adds the peer to our internal peer mapping. Note that the peer is still
|
/// Adds the peer to our internal peer mapping. Note that the peer is still
|
||||||
/// returned so the server can run it.
|
/// returned so the server can run it.
|
||||||
pub fn add_connected(&self, p: Peer) -> Arc<RwLock<Peer>> {
|
pub fn add_connected(&self, p: Peer) -> Result<Arc<RwLock<Peer>>, Error> {
|
||||||
debug!(LOGGER, "Saving newly connected peer {}.", p.info.addr);
|
debug!(LOGGER, "Saving newly connected peer {}.", p.info.addr);
|
||||||
let peer_data = PeerData {
|
let peer_data = PeerData {
|
||||||
addr: p.info.addr,
|
addr: p.info.addr,
|
||||||
|
@ -64,9 +64,7 @@ impl Peers {
|
||||||
last_banned: 0,
|
last_banned: 0,
|
||||||
ban_reason: ReasonForBan::None,
|
ban_reason: ReasonForBan::None,
|
||||||
};
|
};
|
||||||
if let Err(e) = self.save_peer(&peer_data) {
|
self.save_peer(&peer_data)?;
|
||||||
error!(LOGGER, "Could not save connected peer: {:?}", e);
|
|
||||||
}
|
|
||||||
|
|
||||||
let addr = p.info.addr.clone();
|
let addr = p.info.addr.clone();
|
||||||
let apeer = Arc::new(RwLock::new(p));
|
let apeer = Arc::new(RwLock::new(p));
|
||||||
|
@ -74,7 +72,7 @@ impl Peers {
|
||||||
let mut peers = self.peers.write().unwrap();
|
let mut peers = self.peers.write().unwrap();
|
||||||
peers.insert(addr, apeer.clone());
|
peers.insert(addr, apeer.clone());
|
||||||
}
|
}
|
||||||
apeer.clone()
|
Ok(apeer)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update the dandelion relay
|
// Update the dandelion relay
|
||||||
|
@ -106,7 +104,7 @@ impl Peers {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_known(&self, addr: &SocketAddr) -> bool {
|
pub fn is_known(&self, addr: &SocketAddr) -> bool {
|
||||||
self.get_connected_peer(addr).is_some()
|
self.peers.read().unwrap().contains_key(addr)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Get vec of peers we are currently connected to.
|
/// Get vec of peers we are currently connected to.
|
||||||
|
@ -125,13 +123,11 @@ impl Peers {
|
||||||
pub fn outgoing_connected_peers(&self) -> Vec<Arc<RwLock<Peer>>> {
|
pub fn outgoing_connected_peers(&self) -> Vec<Arc<RwLock<Peer>>> {
|
||||||
let peers = self.connected_peers();
|
let peers = self.connected_peers();
|
||||||
let res = peers
|
let res = peers
|
||||||
.iter()
|
.into_iter()
|
||||||
.filter(|x| match x.try_read() {
|
.filter(|x| match x.try_read() {
|
||||||
Ok(peer) => peer.info.direction == Direction::Outbound,
|
Ok(peer) => peer.info.direction == Direction::Outbound,
|
||||||
Err(_) => false,
|
Err(_) => false,
|
||||||
})
|
}).collect::<Vec<_>>();
|
||||||
.cloned()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,7 +138,7 @@ impl Peers {
|
||||||
|
|
||||||
/// Number of peers we're currently connected to.
|
/// Number of peers we're currently connected to.
|
||||||
pub fn peer_count(&self) -> u32 {
|
pub fn peer_count(&self) -> u32 {
|
||||||
self.connected_peers().len() as u32
|
self.peers.read().unwrap().len() as u32
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return vec of connected peers that currently advertise more work
|
// Return vec of connected peers that currently advertise more work
|
||||||
|
@ -156,13 +152,11 @@ impl Peers {
|
||||||
let total_difficulty = self.total_difficulty();
|
let total_difficulty = self.total_difficulty();
|
||||||
|
|
||||||
let mut max_peers = peers
|
let mut max_peers = peers
|
||||||
.iter()
|
.into_iter()
|
||||||
.filter(|x| match x.try_read() {
|
.filter(|x| match x.try_read() {
|
||||||
Ok(peer) => peer.info.total_difficulty > total_difficulty,
|
Ok(peer) => peer.info.total_difficulty > total_difficulty,
|
||||||
Err(_) => false,
|
Err(_) => false,
|
||||||
})
|
}).collect::<Vec<_>>();
|
||||||
.cloned()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
thread_rng().shuffle(&mut max_peers);
|
thread_rng().shuffle(&mut max_peers);
|
||||||
max_peers
|
max_peers
|
||||||
|
@ -179,16 +173,14 @@ impl Peers {
|
||||||
let total_difficulty = self.total_difficulty();
|
let total_difficulty = self.total_difficulty();
|
||||||
|
|
||||||
let mut max_peers = peers
|
let mut max_peers = peers
|
||||||
.iter()
|
.into_iter()
|
||||||
.filter(|x| match x.try_read() {
|
.filter(|x| match x.try_read() {
|
||||||
Ok(peer) => {
|
Ok(peer) => {
|
||||||
peer.info.total_difficulty > total_difficulty
|
peer.info.total_difficulty > total_difficulty
|
||||||
&& peer.info.capabilities.contains(Capabilities::FULL_HIST)
|
&& peer.info.capabilities.contains(Capabilities::FULL_HIST)
|
||||||
}
|
}
|
||||||
Err(_) => false,
|
Err(_) => false,
|
||||||
})
|
}).collect::<Vec<_>>();
|
||||||
.cloned()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
thread_rng().shuffle(&mut max_peers);
|
thread_rng().shuffle(&mut max_peers);
|
||||||
max_peers
|
max_peers
|
||||||
|
@ -196,18 +188,12 @@ impl Peers {
|
||||||
|
|
||||||
/// Returns single random peer with more work than us.
|
/// Returns single random peer with more work than us.
|
||||||
pub fn more_work_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
pub fn more_work_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
||||||
match self.more_work_peers().first() {
|
self.more_work_peers().pop()
|
||||||
Some(x) => Some(x.clone()),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns single random archival peer with more work than us.
|
/// Returns single random archival peer with more work than us.
|
||||||
pub fn more_work_archival_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
pub fn more_work_archival_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
||||||
match self.more_work_archival_peers().first() {
|
self.more_work_archival_peers().pop()
|
||||||
Some(x) => Some(x.clone()),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return vec of connected peers that currently have the most worked
|
/// Return vec of connected peers that currently have the most worked
|
||||||
|
@ -223,18 +209,15 @@ impl Peers {
|
||||||
.map(|x| match x.try_read() {
|
.map(|x| match x.try_read() {
|
||||||
Ok(peer) => peer.info.total_difficulty.clone(),
|
Ok(peer) => peer.info.total_difficulty.clone(),
|
||||||
Err(_) => Difficulty::zero(),
|
Err(_) => Difficulty::zero(),
|
||||||
})
|
}).max()
|
||||||
.max()
|
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
let mut max_peers = peers
|
let mut max_peers = peers
|
||||||
.iter()
|
.into_iter()
|
||||||
.filter(|x| match x.try_read() {
|
.filter(|x| match x.try_read() {
|
||||||
Ok(peer) => peer.info.total_difficulty == max_total_difficulty,
|
Ok(peer) => peer.info.total_difficulty == max_total_difficulty,
|
||||||
Err(_) => false,
|
Err(_) => false,
|
||||||
})
|
}).collect::<Vec<_>>();
|
||||||
.cloned()
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
thread_rng().shuffle(&mut max_peers);
|
thread_rng().shuffle(&mut max_peers);
|
||||||
max_peers
|
max_peers
|
||||||
|
@ -243,10 +226,7 @@ impl Peers {
|
||||||
/// Returns single random peer with the most worked branch, showing the
|
/// Returns single random peer with the most worked branch, showing the
|
||||||
/// highest total difficulty.
|
/// highest total difficulty.
|
||||||
pub fn most_work_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
pub fn most_work_peer(&self) -> Option<Arc<RwLock<Peer>>> {
|
||||||
match self.most_work_peers().first() {
|
self.most_work_peers().pop()
|
||||||
Some(x) => Some(x.clone()),
|
|
||||||
None => None,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn is_banned(&self, peer_addr: SocketAddr) -> bool {
|
pub fn is_banned(&self, peer_addr: SocketAddr) -> bool {
|
||||||
|
@ -260,11 +240,11 @@ impl Peers {
|
||||||
|
|
||||||
/// Ban a peer, disconnecting it if we're currently connected
|
/// Ban a peer, disconnecting it if we're currently connected
|
||||||
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
|
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
|
||||||
if let Err(e) = self.update_state(peer_addr.clone(), State::Banned) {
|
if let Err(e) = self.update_state(*peer_addr, State::Banned) {
|
||||||
error!(LOGGER, "Couldn't ban {}: {:?}", peer_addr, e);
|
error!(LOGGER, "Couldn't ban {}: {:?}", peer_addr, e);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Err(e) = self.update_last_banned(peer_addr.clone(), Utc::now().timestamp()) {
|
if let Err(e) = self.update_last_banned(*peer_addr, Utc::now().timestamp()) {
|
||||||
error!(
|
error!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"Couldn't update last_banned time {}: {:?}", peer_addr, e
|
"Couldn't update last_banned time {}: {:?}", peer_addr, e
|
||||||
|
@ -283,10 +263,10 @@ impl Peers {
|
||||||
|
|
||||||
/// Unban a peer, checks if it exists and banned then unban
|
/// Unban a peer, checks if it exists and banned then unban
|
||||||
pub fn unban_peer(&self, peer_addr: &SocketAddr) {
|
pub fn unban_peer(&self, peer_addr: &SocketAddr) {
|
||||||
match self.get_peer(peer_addr.clone()) {
|
match self.get_peer(*peer_addr) {
|
||||||
Ok(_) => {
|
Ok(_) => {
|
||||||
if self.is_banned(peer_addr.clone()) {
|
if self.is_banned(*peer_addr) {
|
||||||
if let Err(e) = self.update_state(peer_addr.clone(), State::Healthy) {
|
if let Err(e) = self.update_state(*peer_addr, State::Healthy) {
|
||||||
error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e)
|
error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -297,25 +277,33 @@ impl Peers {
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Broadcasts the provided block to PEER_PREFERRED_COUNT of our peers.
|
fn broadcast<F>(&self, obj_name: &str, f: F) -> u8
|
||||||
/// We may be connected to PEER_MAX_COUNT peers so we only
|
where
|
||||||
/// want to broadcast to a random subset of peers.
|
F: Fn(&Peer) -> Result<(), Error>,
|
||||||
/// A peer implementation may drop the broadcast request
|
{
|
||||||
/// if it knows the remote peer already has the block.
|
|
||||||
pub fn broadcast_block(&self, b: &core::Block) {
|
|
||||||
let peers = self.connected_peers();
|
let peers = self.connected_peers();
|
||||||
let preferred_peers = 8;
|
let preferred_peers = 8;
|
||||||
let mut count = 0;
|
let mut count = 0;
|
||||||
for p in peers.iter().take(preferred_peers) {
|
for p in peers.iter().take(preferred_peers) {
|
||||||
let p = p.read().unwrap();
|
let p = p.read().unwrap();
|
||||||
if p.is_connected() {
|
if p.is_connected() {
|
||||||
if let Err(e) = p.send_block(b) {
|
if let Err(e) = f(&p) {
|
||||||
debug!(LOGGER, "Error sending block to peer: {:?}", e);
|
debug!(LOGGER, "Error sending {} to peer: {:?}", obj_name, e);
|
||||||
} else {
|
} else {
|
||||||
count += 1;
|
count += 1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
count
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Broadcasts the provided block to PEER_PREFERRED_COUNT of our peers.
|
||||||
|
/// We may be connected to PEER_MAX_COUNT peers so we only
|
||||||
|
/// want to broadcast to a random subset of peers.
|
||||||
|
/// A peer implementation may drop the broadcast request
|
||||||
|
/// if it knows the remote peer already has the block.
|
||||||
|
pub fn broadcast_block(&self, b: &core::Block) {
|
||||||
|
let count = self.broadcast("block", |p| p.send_block(b));
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"broadcast_block: {} @ {} [{}] was sent to {} peers.",
|
"broadcast_block: {} @ {} [{}] was sent to {} peers.",
|
||||||
|
@ -326,20 +314,13 @@ impl Peers {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Broadcasts the provided compact block to PEER_PREFERRED_COUNT of our peers.
|
||||||
|
/// We may be connected to PEER_MAX_COUNT peers so we only
|
||||||
|
/// want to broadcast to a random subset of peers.
|
||||||
|
/// A peer implementation may drop the broadcast request
|
||||||
|
/// if it knows the remote peer already has the block.
|
||||||
pub fn broadcast_compact_block(&self, b: &core::CompactBlock) {
|
pub fn broadcast_compact_block(&self, b: &core::CompactBlock) {
|
||||||
let peers = self.connected_peers();
|
let count = self.broadcast("compact block", |p| p.send_compact_block(b));
|
||||||
let preferred_peers = 8;
|
|
||||||
let mut count = 0;
|
|
||||||
for p in peers.iter().take(preferred_peers) {
|
|
||||||
let p = p.read().unwrap();
|
|
||||||
if p.is_connected() {
|
|
||||||
if let Err(e) = p.send_compact_block(b) {
|
|
||||||
debug!(LOGGER, "Error sending compact block to peer: {:?}", e);
|
|
||||||
} else {
|
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
debug!(
|
debug!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
|
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
|
||||||
|
@ -350,25 +331,13 @@ impl Peers {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Broadcasts the provided block to PEER_PREFERRED_COUNT of our peers.
|
/// Broadcasts the provided header to PEER_PREFERRED_COUNT of our peers.
|
||||||
/// We may be connected to PEER_MAX_COUNT peers so we only
|
/// We may be connected to PEER_MAX_COUNT peers so we only
|
||||||
/// want to broadcast to a random subset of peers.
|
/// want to broadcast to a random subset of peers.
|
||||||
/// A peer implementation may drop the broadcast request
|
/// A peer implementation may drop the broadcast request
|
||||||
/// if it knows the remote peer already has the block.
|
/// if it knows the remote peer already has the header.
|
||||||
pub fn broadcast_header(&self, bh: &core::BlockHeader) {
|
pub fn broadcast_header(&self, bh: &core::BlockHeader) {
|
||||||
let peers = self.connected_peers();
|
let count = self.broadcast("header", |p| p.send_header(bh));
|
||||||
let preferred_peers = 8;
|
|
||||||
let mut count = 0;
|
|
||||||
for p in peers.iter().take(preferred_peers) {
|
|
||||||
let p = p.read().unwrap();
|
|
||||||
if p.is_connected() {
|
|
||||||
if let Err(e) = p.send_header(bh) {
|
|
||||||
debug!(LOGGER, "Error sending header to peer: {:?}", e);
|
|
||||||
} else {
|
|
||||||
count += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
trace!(
|
trace!(
|
||||||
LOGGER,
|
LOGGER,
|
||||||
"broadcast_header: {}, {} at {}, to {} peers, done.",
|
"broadcast_header: {}, {} at {}, to {} peers, done.",
|
||||||
|
@ -411,15 +380,13 @@ impl Peers {
|
||||||
/// A peer implementation may drop the broadcast request
|
/// A peer implementation may drop the broadcast request
|
||||||
/// if it knows the remote peer already has the transaction.
|
/// if it knows the remote peer already has the transaction.
|
||||||
pub fn broadcast_transaction(&self, tx: &core::Transaction) {
|
pub fn broadcast_transaction(&self, tx: &core::Transaction) {
|
||||||
let peers = self.connected_peers();
|
let count = self.broadcast("transaction", |p| p.send_transaction(tx));
|
||||||
for p in peers.iter().take(8) {
|
trace!(
|
||||||
let p = p.read().unwrap();
|
LOGGER,
|
||||||
if p.is_connected() {
|
"broadcast_transaction: {}, to {} peers, done.",
|
||||||
if let Err(e) = p.send_transaction(tx) {
|
tx.hash(),
|
||||||
debug!(LOGGER, "Error sending transaction to peer: {:?}", e);
|
count,
|
||||||
}
|
);
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Ping all our connected peers. Always automatically expects a pong back
|
/// Ping all our connected peers. Always automatically expects a pong back
|
||||||
|
@ -429,7 +396,7 @@ impl Peers {
|
||||||
for p in peers_map.values() {
|
for p in peers_map.values() {
|
||||||
let p = p.read().unwrap();
|
let p = p.read().unwrap();
|
||||||
if p.is_connected() {
|
if p.is_connected() {
|
||||||
let _ = p.send_ping(total_difficulty.clone(), height);
|
let _ = p.send_ping(total_difficulty, height);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -494,7 +461,7 @@ impl Peers {
|
||||||
// now clean up peer map based on the list to remove
|
// now clean up peer map based on the list to remove
|
||||||
{
|
{
|
||||||
let mut peers = self.peers.write().unwrap();
|
let mut peers = self.peers.write().unwrap();
|
||||||
for p in rm.clone() {
|
for p in rm {
|
||||||
let p = p.read().unwrap();
|
let p = p.read().unwrap();
|
||||||
peers.remove(&p.info.addr);
|
peers.remove(&p.info.addr);
|
||||||
}
|
}
|
||||||
|
@ -502,7 +469,7 @@ impl Peers {
|
||||||
|
|
||||||
// ensure we do not have too many connected peers
|
// ensure we do not have too many connected peers
|
||||||
let excess_count = {
|
let excess_count = {
|
||||||
let peer_count = self.peer_count().clone() as usize;
|
let peer_count = self.peer_count() as usize;
|
||||||
if peer_count > max_count {
|
if peer_count > max_count {
|
||||||
peer_count - max_count
|
peer_count - max_count
|
||||||
} else {
|
} else {
|
||||||
|
@ -517,8 +484,7 @@ impl Peers {
|
||||||
.map(|x| {
|
.map(|x| {
|
||||||
let p = x.read().unwrap();
|
let p = x.read().unwrap();
|
||||||
p.info.addr.clone()
|
p.info.addr.clone()
|
||||||
})
|
}).collect::<Vec<_>>()
|
||||||
.collect::<Vec<_>>()
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// now remove them taking a short-lived write lock each time
|
// now remove them taking a short-lived write lock each time
|
||||||
|
|
|
@ -168,7 +168,7 @@ impl Server {
|
||||||
&self.handshake,
|
&self.handshake,
|
||||||
self.peers.clone(),
|
self.peers.clone(),
|
||||||
)?;
|
)?;
|
||||||
let added = self.peers.add_connected(peer);
|
let added = self.peers.add_connected(peer)?;
|
||||||
{
|
{
|
||||||
let mut peer = added.write().unwrap();
|
let mut peer = added.write().unwrap();
|
||||||
peer.start(stream);
|
peer.start(stream);
|
||||||
|
@ -193,7 +193,7 @@ impl Server {
|
||||||
&self.handshake,
|
&self.handshake,
|
||||||
self.peers.clone(),
|
self.peers.clone(),
|
||||||
)?;
|
)?;
|
||||||
let added = self.peers.add_connected(peer);
|
let added = self.peers.add_connected(peer)?;
|
||||||
let mut peer = added.write().unwrap();
|
let mut peer = added.write().unwrap();
|
||||||
peer.start(stream);
|
peer.start(stream);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
Loading…
Reference in a new issue