mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
[5.0.x] inefficient locking on recv of peers lists can result in failure to get peers lock (#3566) (#3570)
* fix for: inefficient locking of peers lists can result in failure to get peers lock * dont hold the peers Vec lock while writing to the peers lmdb Co-authored-by: Blade Doyle <bladedoyle@gmail.com>
This commit is contained in:
parent
4de2d92433
commit
a3c9b478e2
2 changed files with 43 additions and 19 deletions
|
@ -59,23 +59,29 @@ impl Peers {
|
||||||
/// Adds the peer to our internal peer mapping. Note that the peer is still
|
/// Adds the peer to our internal peer mapping. Note that the peer is still
|
||||||
/// returned so the server can run it.
|
/// returned so the server can run it.
|
||||||
pub fn add_connected(&self, peer: Arc<Peer>) -> Result<(), Error> {
|
pub fn add_connected(&self, peer: Arc<Peer>) -> Result<(), Error> {
|
||||||
let mut peers = self.peers.try_write_for(LOCK_TIMEOUT).ok_or_else(|| {
|
let peer_data: PeerData;
|
||||||
error!("add_connected: failed to get peers lock");
|
{
|
||||||
Error::Timeout
|
// Scope for peers vector lock - dont hold the peers lock while adding to lmdb
|
||||||
})?;
|
let mut peers = self.peers.try_write_for(LOCK_TIMEOUT).ok_or_else(|| {
|
||||||
let peer_data = PeerData {
|
error!("add_connected: failed to get peers lock");
|
||||||
addr: peer.info.addr,
|
Error::Timeout
|
||||||
capabilities: peer.info.capabilities,
|
})?;
|
||||||
user_agent: peer.info.user_agent.clone(),
|
peer_data = PeerData {
|
||||||
flags: State::Healthy,
|
addr: peer.info.addr,
|
||||||
last_banned: 0,
|
capabilities: peer.info.capabilities,
|
||||||
ban_reason: ReasonForBan::None,
|
user_agent: peer.info.user_agent.clone(),
|
||||||
last_connected: Utc::now().timestamp(),
|
flags: State::Healthy,
|
||||||
};
|
last_banned: 0,
|
||||||
|
ban_reason: ReasonForBan::None,
|
||||||
|
last_connected: Utc::now().timestamp(),
|
||||||
|
};
|
||||||
|
debug!("Adding newly connected peer {}.", peer_data.addr);
|
||||||
|
peers.insert(peer_data.addr, peer);
|
||||||
|
}
|
||||||
debug!("Saving newly connected peer {}.", peer_data.addr);
|
debug!("Saving newly connected peer {}.", peer_data.addr);
|
||||||
self.save_peer(&peer_data)?;
|
if let Err(e) = self.save_peer(&peer_data) {
|
||||||
peers.insert(peer_data.addr, peer);
|
error!("Could not save connected peer address: {:?}", e);
|
||||||
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -136,8 +142,10 @@ impl Peers {
|
||||||
}
|
}
|
||||||
/// Ban a peer, disconnecting it if we're currently connected
|
/// Ban a peer, disconnecting it if we're currently connected
|
||||||
pub fn ban_peer(&self, peer_addr: PeerAddr, ban_reason: ReasonForBan) -> Result<(), Error> {
|
pub fn ban_peer(&self, peer_addr: PeerAddr, ban_reason: ReasonForBan) -> Result<(), Error> {
|
||||||
|
// Update the peer in peers db
|
||||||
self.update_state(peer_addr, State::Banned)?;
|
self.update_state(peer_addr, State::Banned)?;
|
||||||
|
|
||||||
|
// Update the peer in the peers Vec
|
||||||
match self.get_connected_peer(peer_addr) {
|
match self.get_connected_peer(peer_addr) {
|
||||||
Some(peer) => {
|
Some(peer) => {
|
||||||
debug!("Banning peer {}", peer_addr);
|
debug!("Banning peer {}", peer_addr);
|
||||||
|
@ -295,6 +303,11 @@ impl Peers {
|
||||||
self.store.save_peer(p).map_err(From::from)
|
self.store.save_peer(p).map_err(From::from)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Saves updated information about mulitple peers in batch
|
||||||
|
pub fn save_peers(&self, p: Vec<PeerData>) -> Result<(), Error> {
|
||||||
|
self.store.save_peers(p).map_err(From::from)
|
||||||
|
}
|
||||||
|
|
||||||
/// Updates the state of a peer in store
|
/// Updates the state of a peer in store
|
||||||
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
|
pub fn update_state(&self, peer_addr: PeerAddr, new_state: State) -> Result<(), Error> {
|
||||||
self.store
|
self.store
|
||||||
|
@ -667,6 +680,7 @@ impl NetAdapter for Peers {
|
||||||
/// A list of peers has been received from one of our peers.
|
/// A list of peers has been received from one of our peers.
|
||||||
fn peer_addrs_received(&self, peer_addrs: Vec<PeerAddr>) {
|
fn peer_addrs_received(&self, peer_addrs: Vec<PeerAddr>) {
|
||||||
trace!("Received {} peer addrs, saving.", peer_addrs.len());
|
trace!("Received {} peer addrs, saving.", peer_addrs.len());
|
||||||
|
let mut to_save: Vec<PeerData> = Vec::new();
|
||||||
for pa in peer_addrs {
|
for pa in peer_addrs {
|
||||||
if let Ok(e) = self.exists_peer(pa) {
|
if let Ok(e) = self.exists_peer(pa) {
|
||||||
if e {
|
if e {
|
||||||
|
@ -682,9 +696,10 @@ impl NetAdapter for Peers {
|
||||||
ban_reason: ReasonForBan::None,
|
ban_reason: ReasonForBan::None,
|
||||||
last_connected: Utc::now().timestamp(),
|
last_connected: Utc::now().timestamp(),
|
||||||
};
|
};
|
||||||
if let Err(e) = self.save_peer(&peer) {
|
to_save.push(peer);
|
||||||
error!("Could not save received peer address: {:?}", e);
|
}
|
||||||
}
|
if let Err(e) = self.save_peers(to_save) {
|
||||||
|
error!("Could not save received peer addresses: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -127,6 +127,15 @@ impl PeerStore {
|
||||||
batch.commit()
|
batch.commit()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn save_peers(&self, p: Vec<PeerData>) -> Result<(), Error> {
|
||||||
|
let batch = self.db.batch()?;
|
||||||
|
for pd in p {
|
||||||
|
debug!("save_peers: {:?} marked {:?}", pd.addr, pd.flags);
|
||||||
|
batch.put_ser(&peer_key(pd.addr)[..], &pd)?;
|
||||||
|
}
|
||||||
|
batch.commit()
|
||||||
|
}
|
||||||
|
|
||||||
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
|
pub fn get_peer(&self, peer_addr: PeerAddr) -> Result<PeerData, Error> {
|
||||||
option_to_not_found(self.db.get_ser(&peer_key(peer_addr)[..]), || {
|
option_to_not_found(self.db.get_ser(&peer_key(peer_addr)[..]), || {
|
||||||
format!("Peer at address: {}", peer_addr)
|
format!("Peer at address: {}", peer_addr)
|
||||||
|
|
Loading…
Reference in a new issue