mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-01 17:01:09 +03:00
fix: Fix race condition with dandelion_relay peer map and make more semantic (#2548)
* fix: Fix race condition with dandelion_relay peer map and make more semantic * Fix bug where we don't actually re-read the dandelion_relay variable after refreshing it
This commit is contained in:
parent
5d904250d5
commit
41ed10940d
2 changed files with 26 additions and 31 deletions
|
@ -38,7 +38,7 @@ pub struct Peers {
|
||||||
pub adapter: Arc<dyn ChainAdapter>,
|
pub adapter: Arc<dyn ChainAdapter>,
|
||||||
store: PeerStore,
|
store: PeerStore,
|
||||||
peers: RwLock<HashMap<SocketAddr, Arc<Peer>>>,
|
peers: RwLock<HashMap<SocketAddr, Arc<Peer>>>,
|
||||||
dandelion_relay: RwLock<HashMap<i64, Arc<Peer>>>,
|
dandelion_relay: RwLock<Option<(i64, Arc<Peer>)>>,
|
||||||
config: P2PConfig,
|
config: P2PConfig,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ impl Peers {
|
||||||
store,
|
store,
|
||||||
config,
|
config,
|
||||||
peers: RwLock::new(HashMap::new()),
|
peers: RwLock::new(HashMap::new()),
|
||||||
dandelion_relay: RwLock::new(HashMap::new()),
|
dandelion_relay: RwLock::new(None),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -115,10 +115,9 @@ impl Peers {
|
||||||
fn set_dandelion_relay(&self, peer: &Arc<Peer>) {
|
fn set_dandelion_relay(&self, peer: &Arc<Peer>) {
|
||||||
// Clear the map and add new relay
|
// Clear the map and add new relay
|
||||||
let dandelion_relay = &self.dandelion_relay;
|
let dandelion_relay = &self.dandelion_relay;
|
||||||
dandelion_relay.write().clear();
|
|
||||||
dandelion_relay
|
dandelion_relay
|
||||||
.write()
|
.write()
|
||||||
.insert(Utc::now().timestamp(), peer.clone());
|
.replace((Utc::now().timestamp(), peer.clone()));
|
||||||
debug!(
|
debug!(
|
||||||
"Successfully updated Dandelion relay to: {}",
|
"Successfully updated Dandelion relay to: {}",
|
||||||
peer.info.addr
|
peer.info.addr
|
||||||
|
@ -126,7 +125,7 @@ impl Peers {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the dandelion relay
|
// Get the dandelion relay
|
||||||
pub fn get_dandelion_relay(&self) -> HashMap<i64, Arc<Peer>> {
|
pub fn get_dandelion_relay(&self) -> Option<(i64, Arc<Peer>)> {
|
||||||
self.dandelion_relay.read().clone()
|
self.dandelion_relay.read().clone()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,24 +369,22 @@ impl Peers {
|
||||||
|
|
||||||
/// Relays the provided stem transaction to our single stem peer.
|
/// Relays the provided stem transaction to our single stem peer.
|
||||||
pub fn relay_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
pub fn relay_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
||||||
let dandelion_relay = self.get_dandelion_relay();
|
self.get_dandelion_relay()
|
||||||
if dandelion_relay.is_empty() {
|
.or_else(|| {
|
||||||
debug!("No dandelion relay, updating.");
|
debug!("No dandelion relay, updating.");
|
||||||
self.update_dandelion_relay();
|
self.update_dandelion_relay();
|
||||||
}
|
self.get_dandelion_relay()
|
||||||
|
})
|
||||||
// If still return an error, let the caller handle this as they see fit.
|
// If still return an error, let the caller handle this as they see fit.
|
||||||
// The caller will "fluff" at this point as the stem phase is finished.
|
// The caller will "fluff" at this point as the stem phase is finished.
|
||||||
if dandelion_relay.is_empty() {
|
.ok_or(Error::NoDandelionRelay)
|
||||||
return Err(Error::NoDandelionRelay);
|
.map(|(_, relay)| {
|
||||||
}
|
|
||||||
for relay in dandelion_relay.values() {
|
|
||||||
if relay.is_connected() {
|
if relay.is_connected() {
|
||||||
if let Err(e) = relay.send_stem_transaction(tx) {
|
if let Err(e) = relay.send_stem_transaction(tx) {
|
||||||
debug!("Error sending stem transaction to peer relay: {:?}", e);
|
debug!("Error sending stem transaction to peer relay: {:?}", e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
})
|
||||||
Ok(())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Broadcasts the provided transaction to PEER_PREFERRED_COUNT of our
|
/// Broadcasts the provided transaction to PEER_PREFERRED_COUNT of our
|
||||||
|
|
|
@ -249,17 +249,15 @@ fn monitor_peers(
|
||||||
fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionConfig) {
|
fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionConfig) {
|
||||||
// Dandelion Relay Updater
|
// Dandelion Relay Updater
|
||||||
let dandelion_relay = peers.get_dandelion_relay();
|
let dandelion_relay = peers.get_dandelion_relay();
|
||||||
if dandelion_relay.is_empty() {
|
if let Some((last_added, _)) = dandelion_relay {
|
||||||
debug!("monitor_peers: no dandelion relay updating");
|
|
||||||
peers.update_dandelion_relay();
|
|
||||||
} else {
|
|
||||||
for last_added in dandelion_relay.keys() {
|
|
||||||
let dandelion_interval = Utc::now().timestamp() - last_added;
|
let dandelion_interval = Utc::now().timestamp() - last_added;
|
||||||
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
|
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
|
||||||
debug!("monitor_peers: updating expired dandelion relay");
|
debug!("monitor_peers: updating expired dandelion relay");
|
||||||
peers.update_dandelion_relay();
|
peers.update_dandelion_relay();
|
||||||
}
|
}
|
||||||
}
|
} else {
|
||||||
|
debug!("monitor_peers: no dandelion relay updating");
|
||||||
|
peers.update_dandelion_relay();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in a new issue