diff --git a/api/src/handlers/chain_api.rs b/api/src/handlers/chain_api.rs index 33b434d16..598bcf8cb 100644 --- a/api/src/handlers/chain_api.rs +++ b/api/src/handlers/chain_api.rs @@ -144,7 +144,7 @@ impl OutputHandler { "Failure to get output for commitment {} with error {}", commit, e ); - return Err(e.into()); + return Err(e); } }; } @@ -220,7 +220,7 @@ impl OutputHandler { "Failure to get output for commitment {} with error {}", x, e ); - return Err(e.into()); + return Err(e); } }; } diff --git a/api/src/handlers/utils.rs b/api/src/handlers/utils.rs index ba0e78574..7b6848ebb 100644 --- a/api/src/handlers/utils.rs +++ b/api/src/handlers/utils.rs @@ -94,7 +94,7 @@ pub fn get_output_v2( let output_printable = OutputPrintable::from_output( &output, - chain.clone(), + chain, header.as_ref(), include_proof, include_merkle_proof, diff --git a/api/src/types.rs b/api/src/types.rs index 37848f478..aa7b23890 100644 --- a/api/src/types.rs +++ b/api/src/types.rs @@ -344,9 +344,7 @@ impl OutputPrintable { let p_vec = util::from_hex(&proof_str) .map_err(|_| ser::Error::HexError("invalid output range_proof".to_string()))?; let mut p_bytes = [0; util::secp::constants::MAX_PROOF_SIZE]; - for i in 0..p_bytes.len() { - p_bytes[i] = p_vec[i]; - } + p_bytes.clone_from_slice(&p_vec[..util::secp::constants::MAX_PROOF_SIZE]); Ok(pedersen::RangeProof { proof: p_bytes, plen: p_bytes.len(), diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 3c71fa7e5..087c0b342 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -585,8 +585,8 @@ impl Chain { let previous_header = batch.get_previous_header(&b.header)?; pipe::rewind_and_apply_fork(&previous_header, ext, batch)?; - let ref mut extension = ext.extension; - let ref mut header_extension = ext.header_extension; + let extension = &mut ext.extension; + let header_extension = &mut ext.header_extension; // Retrieve the header root before we apply the new block let prev_root = header_extension.root()?; @@ -1451,7 +1451,7 @@ fn setup_head( let res = txhashset::extending(header_pmmr, txhashset, &mut batch, |ext, batch| { pipe::rewind_and_apply_fork(&header, ext, batch)?; - let ref mut extension = ext.extension; + let extension = &mut ext.extension; extension.validate_roots(&header)?; diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 56e0b901c..66651f51c 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -118,9 +118,9 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result, batch: &store::Batch<'_>, ) -> Result<(), Error> { - let ref extension = ext.extension; - let ref header_extension = ext.header_extension; + let extension = &ext.extension; + let header_extension = &ext.header_extension; extension .utxo_view(header_extension) .verify_coinbase_maturity(&block.inputs(), block.header.height, batch) @@ -541,8 +541,8 @@ pub fn rewind_and_apply_fork( ext: &mut txhashset::ExtensionPair<'_>, batch: &store::Batch<'_>, ) -> Result<(), Error> { - let ref mut extension = ext.extension; - let ref mut header_extension = ext.header_extension; + let extension = &mut ext.extension; + let header_extension = &mut ext.header_extension; // Prepare the header MMR. rewind_and_apply_header_fork(header, header_extension, batch)?; @@ -592,8 +592,8 @@ fn validate_utxo( ext: &mut txhashset::ExtensionPair<'_>, batch: &store::Batch<'_>, ) -> Result<(), Error> { - let ref mut extension = ext.extension; - let ref mut header_extension = ext.header_extension; + let extension = &ext.extension; + let header_extension = &ext.header_extension; extension .utxo_view(header_extension) .validate_block(block, batch) diff --git a/chain/src/store.rs b/chain/src/store.rs index 780323f60..028f21e6e 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -377,7 +377,7 @@ impl<'a> Batch<'a> { { Ok(Bitmap::deserialize(&bytes)) } else { - Err(Error::NotFoundErr("legacy block input bitmap".to_string()).into()) + Err(Error::NotFoundErr("legacy block input bitmap".to_string())) } } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index e0b9472a0..af7b099fc 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -224,7 +224,7 @@ impl TxHashSet { let output_pmmr: ReadonlyPMMR<'_, Output, _> = ReadonlyPMMR::at(&self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); if let Some(out) = output_pmmr.get_data(pos) { - if OutputIdentifier::from(out) == *output_id { + if out == *output_id { Ok(Some(CommitPos { pos, height })) } else { Ok(None) @@ -1103,7 +1103,7 @@ impl<'a> Extension<'a> { if head_header.height <= header.height { // Nothing to rewind but we do want to truncate the MMRs at header for consistency. - self.rewind_mmrs_to_pos(header.output_mmr_size, header.kernel_mmr_size, &vec![])?; + self.rewind_mmrs_to_pos(header.output_mmr_size, header.kernel_mmr_size, &[])?; self.apply_to_bitmap_accumulator(&[header.output_mmr_size])?; } else { let mut affected_pos = vec![]; @@ -1156,7 +1156,7 @@ impl<'a> Extension<'a> { // Update our BitmapAccumulator based on affected outputs. // We want to "unspend" every rewound spent output. // Treat last_pos as an affected output to ensure we rebuild far enough back. - let mut affected_pos = spent_pos.clone(); + let mut affected_pos = spent_pos; affected_pos.push(self.output_pmmr.last_pos); // Remove any entries from the output_pos created by the block being rewound. @@ -1181,7 +1181,7 @@ impl<'a> Extension<'a> { // reused output commitment. For example an output at pos 1, spent, reused at pos 2. // The output_pos index should be updated to reflect the old pos 1 when unspent. if let Ok(spent) = spent { - for (x, y) in block.inputs().into_iter().zip(spent) { + for (x, y) in block.inputs().iter().zip(spent) { batch.save_output_pos_height(&x.commitment(), y.pos, y.height)?; } } @@ -1197,7 +1197,7 @@ impl<'a> Extension<'a> { kernel_pos: u64, spent_pos: &[u64], ) -> Result<(), Error> { - let bitmap: Bitmap = spent_pos.into_iter().map(|x| *x as u32).collect(); + let bitmap: Bitmap = spent_pos.iter().map(|x| *x as u32).collect(); self.output_pmmr .rewind(output_pos, &bitmap) .map_err(&ErrorKind::TxHashSetErr)?; diff --git a/chain/src/types.rs b/chain/src/types.rs index 0cc9d3273..9e3155ddd 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -188,10 +188,7 @@ impl SyncState { /// Get sync error pub fn sync_error(&self) -> Option { - self.sync_error - .read() - .as_ref() - .and_then(|e| Some(e.to_string())) + self.sync_error.read().as_ref().map(|e| e.to_string()) } /// Clear sync error diff --git a/config/src/comments.rs b/config/src/comments.rs index 06c8bd88c..41d823521 100644 --- a/config/src/comments.rs +++ b/config/src/comments.rs @@ -486,10 +486,10 @@ fn comments() -> HashMap { } fn get_key(line: &str) -> String { - if line.contains("[") && line.contains("]") { + if line.contains('[') && line.contains(']') { return line.to_owned(); - } else if line.contains("=") { - return line.split("=").collect::>()[0].trim().to_owned(); + } else if line.contains('=') { + return line.split('=').collect::>()[0].trim().to_owned(); } else { return "NOT_FOUND".to_owned(); } @@ -497,7 +497,7 @@ fn get_key(line: &str) -> String { pub fn insert_comments(orig: String) -> String { let comments = comments(); - let lines: Vec<&str> = orig.split("\n").collect(); + let lines: Vec<&str> = orig.split('\n').collect(); let mut out_lines = vec![]; for l in lines { let key = get_key(l); @@ -511,5 +511,5 @@ pub fn insert_comments(orig: String) -> String { for l in out_lines { ret_val.push_str(&l); } - ret_val.to_owned() + ret_val } diff --git a/config/src/config.rs b/config/src/config.rs index e63f92652..bbc969a23 100644 --- a/config/src/config.rs +++ b/config/src/config.rs @@ -34,14 +34,14 @@ use crate::util::logger::LoggingConfig; /// The default file name to use when trying to derive /// the node config file location -pub const SERVER_CONFIG_FILE_NAME: &'static str = "grin-server.toml"; -const SERVER_LOG_FILE_NAME: &'static str = "grin-server.log"; -const GRIN_HOME: &'static str = ".grin"; -const GRIN_CHAIN_DIR: &'static str = "chain_data"; +pub const SERVER_CONFIG_FILE_NAME: &str = "grin-server.toml"; +const SERVER_LOG_FILE_NAME: &str = "grin-server.log"; +const GRIN_HOME: &str = ".grin"; +const GRIN_CHAIN_DIR: &str = "chain_data"; /// Node Rest API and V2 Owner API secret -pub const API_SECRET_FILE_NAME: &'static str = ".api_secret"; +pub const API_SECRET_FILE_NAME: &str = ".api_secret"; /// Foreign API secret -pub const FOREIGN_API_SECRET_FILE_NAME: &'static str = ".foreign_api_secret"; +pub const FOREIGN_API_SECRET_FILE_NAME: &str = ".foreign_api_secret"; fn get_grin_path(chain_type: &global::ChainTypes) -> Result { // Check if grin dir exists @@ -103,7 +103,7 @@ fn check_api_secret_files( secret_file_name: &str, ) -> Result<(), ConfigError> { let grin_path = get_grin_path(chain_type)?; - let mut api_secret_path = grin_path.clone(); + let mut api_secret_path = grin_path; api_secret_path.push(secret_file_name); if !api_secret_path.exists() { init_api_secret(&api_secret_path) @@ -236,15 +236,8 @@ impl GlobalConfig { } Err(e) => { return Err(ConfigError::ParseError( - String::from( - self.config_file_path - .as_mut() - .unwrap() - .to_str() - .unwrap() - .clone(), - ), - String::from(format!("{}", e)), + self.config_file_path.unwrap().to_str().unwrap().to_string(), + format!("{}", e), )); } } @@ -292,10 +285,7 @@ impl GlobalConfig { match encoded { Ok(enc) => return Ok(enc), Err(e) => { - return Err(ConfigError::SerializationError(String::from(format!( - "{}", - e - )))); + return Err(ConfigError::SerializationError(format!("{}", e))); } } } diff --git a/config/src/types.rs b/config/src/types.rs index 617feb894..4387e49f7 100644 --- a/config/src/types.rs +++ b/config/src/types.rs @@ -62,7 +62,7 @@ impl From for ConfigError { fn from(error: io::Error) -> ConfigError { ConfigError::FileIOError( String::from(""), - String::from(format!("Error loading config file: {}", error)), + format!("Error loading config file: {}", error), ) } } diff --git a/core/src/core/block.rs b/core/src/core/block.rs index adfee7898..f916c156f 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -363,8 +363,8 @@ impl BlockHeader { proof: Proof, ) -> Result { // Convert hex pre pow string - let mut header_bytes = from_hex(&pre_pow) - .map_err(|e| Error::Serialization(ser::Error::HexError(e.to_string())))?; + let mut header_bytes = + from_hex(&pre_pow).map_err(|e| Error::Serialization(ser::Error::HexError(e)))?; // Serialize and append serialized nonce and proof serialize_default(&mut header_bytes, &nonce)?; serialize_default(&mut header_bytes, &proof)?; diff --git a/core/src/libtx/secp_ser.rs b/core/src/libtx/secp_ser.rs index 704f24e0b..e0989fd60 100644 --- a/core/src/libtx/secp_ser.rs +++ b/core/src/libtx/secp_ser.rs @@ -44,10 +44,9 @@ pub mod pubkey_serde { let static_secp = static_secp_instance(); let static_secp = static_secp.lock(); String::deserialize(deserializer) - .and_then(|string| from_hex(&string).map_err(|err| Error::custom(err.to_string()))) + .and_then(|string| from_hex(&string).map_err(Error::custom)) .and_then(|bytes: Vec| { - PublicKey::from_slice(&static_secp, &bytes) - .map_err(|err| Error::custom(err.to_string())) + PublicKey::from_slice(&static_secp, &bytes).map_err(Error::custom) }) } } @@ -82,13 +81,13 @@ pub mod option_sig_serde { let static_secp = static_secp.lock(); Option::::deserialize(deserializer).and_then(|res| match res { Some(string) => from_hex(&string) - .map_err(|err| Error::custom(err.to_string())) + .map_err(Error::custom) .and_then(|bytes: Vec| { let mut b = [0u8; 64]; b.copy_from_slice(&bytes[0..64]); secp::Signature::from_compact(&static_secp, &b) .map(Some) - .map_err(|err| Error::custom(err.to_string())) + .map_err(Error::custom) }), None => Ok(None), }) @@ -124,13 +123,13 @@ pub mod option_seckey_serde { let static_secp = static_secp.lock(); Option::::deserialize(deserializer).and_then(|res| match res { Some(string) => from_hex(&string) - .map_err(|err| Error::custom(err.to_string())) + .map_err(Error::custom) .and_then(|bytes: Vec| { let mut b = [0u8; 32]; b.copy_from_slice(&bytes[0..32]); secp::key::SecretKey::from_slice(&static_secp, &b) .map(Some) - .map_err(|err| Error::custom(err.to_string())) + .map_err(Error::custom) }), None => Ok(None), }) @@ -161,12 +160,11 @@ pub mod sig_serde { let static_secp = static_secp_instance(); let static_secp = static_secp.lock(); String::deserialize(deserializer) - .and_then(|string| from_hex(&string).map_err(|err| Error::custom(err.to_string()))) + .and_then(|string| from_hex(&string).map_err(Error::custom)) .and_then(|bytes: Vec| { let mut b = [0u8; 64]; b.copy_from_slice(&bytes[0..64]); - secp::Signature::from_compact(&static_secp, &b) - .map_err(|err| Error::custom(err.to_string())) + secp::Signature::from_compact(&static_secp, &b).map_err(Error::custom) }) } } @@ -196,7 +194,7 @@ pub mod option_commitment_serde { { Option::::deserialize(deserializer).and_then(|res| match res { Some(string) => from_hex(&string) - .map_err(|err| Error::custom(err.to_string())) + .map_err(Error::custom) .and_then(|bytes: Vec| Ok(Some(Commitment::from_vec(bytes.to_vec())))), None => Ok(None), }) @@ -208,9 +206,8 @@ where D: Deserializer<'de>, { use serde::de::Error; - String::deserialize(deserializer).and_then(|string| { - BlindingFactor::from_hex(&string).map_err(|err| Error::custom(err.to_string())) - }) + String::deserialize(deserializer) + .and_then(|string| BlindingFactor::from_hex(&string).map_err(Error::custom)) } /// Creates a RangeProof from a hex string @@ -221,7 +218,7 @@ where use serde::de::{Error, IntoDeserializer}; let val = String::deserialize(deserializer) - .and_then(|string| from_hex(&string).map_err(|err| Error::custom(err.to_string())))?; + .and_then(|string| from_hex(&string).map_err(Error::custom))?; RangeProof::deserialize(val.into_deserializer()) } @@ -232,7 +229,7 @@ where { use serde::de::Error; String::deserialize(deserializer) - .and_then(|string| from_hex(&string).map_err(|err| Error::custom(err.to_string()))) + .and_then(|string| from_hex(&string).map_err(Error::custom)) .and_then(|bytes: Vec| Ok(Commitment::from_vec(bytes.to_vec()))) } @@ -390,9 +387,9 @@ mod test { SerTest { opt_skey: Some(sk.clone()), pub_key: PublicKey::from_secret_key(&secp, &sk).unwrap(), - opt_sig: Some(sig.clone()), + opt_sig: Some(sig), opt_commit: Some(commit), - sig: sig.clone(), + sig: sig, num: 30, opt_num: Some(33), } diff --git a/p2p/src/types.rs b/p2p/src/types.rs index b455984fc..8e24d624d 100644 --- a/p2p/src/types.rs +++ b/p2p/src/types.rs @@ -183,8 +183,8 @@ impl<'de> Visitor<'de> for PeerAddrs { Err(_) => { let socket_addrs = entry .to_socket_addrs() - .expect(format!("Unable to resolve DNS: {}", entry).as_str()); - peers.append(&mut socket_addrs.map(|addr| PeerAddr(addr)).collect()); + .unwrap_or_else(|_| panic!("Unable to resolve DNS: {}", entry)); + peers.append(&mut socket_addrs.map(PeerAddr).collect()); } } } diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index 8af204788..8d1701069 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -197,7 +197,7 @@ impl p2p::ChainAdapter for NetToChainAdapter { ); // If we have missing kernels then we know we cannot hydrate this compact block. - if missing_short_ids.len() > 0 { + if !missing_short_ids.is_empty() { self.request_block(&cb.header, peer_info, chain::Options::NONE); return Ok(true); } @@ -224,15 +224,13 @@ impl p2p::ChainAdapter for NetToChainAdapter { { debug!("successfully hydrated block from tx pool!"); self.process_block(block, peer_info, chain::Options::NONE) + } else if self.sync_state.status() == SyncStatus::NoSync { + debug!("adapter: block invalid after hydration, requesting full block"); + self.request_block(&cb.header, peer_info, chain::Options::NONE); + Ok(true) } else { - if self.sync_state.status() == SyncStatus::NoSync { - debug!("adapter: block invalid after hydration, requesting full block"); - self.request_block(&cb.header, peer_info, chain::Options::NONE); - Ok(true) - } else { - debug!("block invalid after hydration, ignoring it, cause still syncing"); - Ok(true) - } + debug!("block invalid after hydration, ignoring it, cause still syncing"); + Ok(true) } } else { debug!("failed to retrieve previous block header (still syncing?)"); @@ -294,7 +292,7 @@ impl p2p::ChainAdapter for NetToChainAdapter { peer_info.addr ); - if bhs.len() == 0 { + if bhs.is_empty() { return Ok(false); } @@ -304,7 +302,7 @@ impl p2p::ChainAdapter for NetToChainAdapter { Err(e) => { debug!("Block headers refused by chain: {:?}", e); if e.is_bad_data() { - return Ok(false); + Ok(false) } else { Err(e) } @@ -361,8 +359,7 @@ impl p2p::ChainAdapter for NetToChainAdapter { } fn kernel_data_write(&self, reader: &mut dyn Read) -> Result { - let res = self.chain().kernel_data_write(reader)?; - error!("***** kernel_data_write: {:?}", res); + self.chain().kernel_data_write(reader)?; Ok(true) } @@ -623,7 +620,7 @@ impl NetToChainAdapter { // uses a different thread to avoid blocking the caller thread (likely a peer) let mut rng = thread_rng(); if 0 == rng.gen_range(0, global::COMPACTION_CHECK) { - let chain = self.chain().clone(); + let chain = self.chain(); let _ = thread::Builder::new() .name("compactor".to_string()) .spawn(move || { @@ -904,25 +901,25 @@ impl pool::BlockChain for PoolToChainAdapter { fn chain_head(&self) -> Result { self.chain() .head_header() - .map_err(|_| pool::PoolError::Other(format!("failed to get head_header"))) + .map_err(|_| pool::PoolError::Other("failed to get head_header".to_string())) } fn get_block_header(&self, hash: &Hash) -> Result { self.chain() .get_block_header(hash) - .map_err(|_| pool::PoolError::Other(format!("failed to get block_header"))) + .map_err(|_| pool::PoolError::Other("failed to get block_header".to_string())) } fn get_block_sums(&self, hash: &Hash) -> Result { self.chain() .get_block_sums(hash) - .map_err(|_| pool::PoolError::Other(format!("failed to get block_sums"))) + .map_err(|_| pool::PoolError::Other("failed to get block_sums".to_string())) } fn validate_tx(&self, tx: &Transaction) -> Result<(), pool::PoolError> { self.chain() .validate_tx(tx) - .map_err(|_| pool::PoolError::Other(format!("failed to validate tx"))) + .map_err(|_| pool::PoolError::Other("failed to validate tx".to_string())) } fn verify_coinbase_maturity(&self, tx: &Transaction) -> Result<(), pool::PoolError> { diff --git a/servers/src/common/stats.rs b/servers/src/common/stats.rs index bae8bd2ef..926101f0b 100644 --- a/servers/src/common/stats.rs +++ b/servers/src/common/stats.rs @@ -223,13 +223,13 @@ impl PeerStats { /// Convert from a peer directly pub fn from_peer(peer: &p2p::Peer) -> PeerStats { // State - let mut state = "Disconnected"; - if peer.is_connected() { - state = "Connected"; - } - if peer.is_banned() { - state = "Banned"; - } + let state = if peer.is_banned() { + "Banned" + } else if peer.is_connected() { + "Connected" + } else { + "Disconnected" + }; let addr = peer.info.addr.to_string(); let direction = match peer.info.direction { p2p::types::Direction::Inbound => "Inbound", diff --git a/servers/src/grin/dandelion_monitor.rs b/servers/src/grin/dandelion_monitor.rs index d4442a460..90cd5a9fd 100644 --- a/servers/src/grin/dandelion_monitor.rs +++ b/servers/src/grin/dandelion_monitor.rs @@ -48,7 +48,7 @@ pub fn monitor_transactions( let run_interval = Duration::from_secs(10); let mut last_run = Instant::now() .checked_sub(Duration::from_secs(20)) - .unwrap_or_else(|| Instant::now()); + .unwrap_or_else(Instant::now); loop { // Halt Dandelion monitor if we have been notified that we are stopping. if stop_state.is_stopped() { diff --git a/servers/src/grin/seed.rs b/servers/src/grin/seed.rs index 2b419fcca..3dc1fcc0f 100644 --- a/servers/src/grin/seed.rs +++ b/servers/src/grin/seed.rs @@ -33,7 +33,7 @@ use crate::p2p::ChainAdapter; use crate::util::StopState; // DNS Seeds with contact email associated -const MAINNET_DNS_SEEDS: &'static [&'static str] = &[ +const MAINNET_DNS_SEEDS: &[&str] = &[ "mainnet.seed.grin.icu", // gary.peverell@protonmail.com "mainnet.seed.713.mw", // jasper@713.mw "mainnet.seed.grin.lesceller.com", // q.lesceller@gmail.com @@ -41,7 +41,7 @@ const MAINNET_DNS_SEEDS: &'static [&'static str] = &[ "grinseed.yeastplume.org", // yeastplume@protonmail.com "mainnet-seed.grinnode.live", // info@grinnode.live ]; -const FLOONET_DNS_SEEDS: &'static [&'static str] = &[ +const FLOONET_DNS_SEEDS: &[&str] = &[ "floonet.seed.grin.icu", // gary.peverell@protonmail.com "floonet.seed.713.mw", // jasper@713.mw "floonet.seed.grin.lesceller.com", // q.lesceller@gmail.com @@ -124,8 +124,8 @@ pub fn connect_and_monitor( if Utc::now() - prev_ping > Duration::seconds(10) { let total_diff = peers.total_difficulty(); let total_height = peers.total_height(); - if total_diff.is_ok() && total_height.is_ok() { - peers.check_all(total_diff.unwrap(), total_height.unwrap()); + if let (Ok(total_diff), Ok(total_height)) = (total_diff, total_height) { + peers.check_all(total_diff, total_height); prev_ping = Utc::now(); } else { error!("failed to get peers difficulty and/or height"); @@ -224,7 +224,7 @@ fn monitor_peers( // take a random defunct peer and mark it healthy: over a long period any // peer will see another as defunct eventually, gives us a chance to retry - if defuncts.len() > 0 { + if !defuncts.is_empty() { defuncts.shuffle(&mut thread_rng()); let _ = peers.update_state(defuncts[0].addr, p2p::State::Healthy); } @@ -276,7 +276,7 @@ fn connect_to_seeds_and_preferred_peers( None => trace!("No preferred peers"), }; - if peer_addrs.len() == 0 { + if peer_addrs.is_empty() { warn!("No seeds were retrieved."); } @@ -322,10 +322,8 @@ fn listen_for_addrs( last_connect_time.format("%H:%M:%S%.3f").to_string(), ); continue; - } else { - if let Some(history) = connecting_history.get_mut(&addr) { - *history = now; - } + } else if let Some(history) = connecting_history.get_mut(&addr) { + *history = now; } } connecting_history.insert(addr, now); @@ -354,7 +352,7 @@ fn listen_for_addrs( let old: Vec<_> = connecting_history .iter() .filter(|&(_, t)| *t + Duration::seconds(connect_min_interval) < now) - .map(|(s, _)| s.clone()) + .map(|(s, _)| *s) .collect(); for addr in old { connecting_history.remove(&addr); @@ -392,7 +390,7 @@ fn resolve_dns_to_addrs(dns_records: &Vec) -> Vec { match dns.to_socket_addrs() { Ok(addrs) => addresses.append( &mut addrs - .map(|addr| PeerAddr(addr)) + .map(PeerAddr) .filter(|addr| !addresses.contains(addr)) .collect(), ), diff --git a/servers/src/grin/server.rs b/servers/src/grin/server.rs index 7819c49c7..adfd6f6dc 100644 --- a/servers/src/grin/server.rs +++ b/servers/src/grin/server.rs @@ -105,7 +105,7 @@ impl Server { let mut stratum_stats = serv.state_info.stratum_stats.write(); stratum_stats.is_enabled = true; } - serv.start_stratum_server(c.clone()); + serv.start_stratum_server(c); } } } @@ -125,7 +125,7 @@ impl Server { // This uses fs2 and should be safe cross-platform unless somebody abuses the file itself. fn one_grin_at_a_time(config: &ServerConfig) -> Result, Error> { let path = Path::new(&config.db_root); - fs::create_dir_all(path.clone())?; + fs::create_dir_all(&path)?; let path = path.join("grin.lock"); let lock_file = fs::OpenOptions::new() .read(true) @@ -283,7 +283,7 @@ impl Server { let key = match config.tls_certificate_key.clone() { Some(k) => k, None => { - let msg = format!("Private key for certificate is not set"); + let msg = "Private key for certificate is not set".to_string(); return Err(Error::ArgumentError(msg)); } }; @@ -298,16 +298,16 @@ impl Server { tx_pool.clone(), p2p_server.peers.clone(), sync_state.clone(), - api_secret.clone(), - foreign_api_secret.clone(), - tls_conf.clone(), + api_secret, + foreign_api_secret, + tls_conf, )?; info!("Starting dandelion monitor: {}", &config.api_http_addr); let dandelion_thread = dandelion_monitor::monitor_transactions( config.dandelion_config.clone(), tx_pool.clone(), - pool_net_adapter.clone(), + pool_net_adapter, verifier_cache.clone(), stop_state.clone(), )?; @@ -357,7 +357,7 @@ impl Server { let sync_state = self.sync_state.clone(); let mut stratum_server = stratumserver::StratumServer::new( - config.clone(), + config, self.chain.clone(), self.tx_pool.clone(), self.verifier_cache.clone(), @@ -395,7 +395,7 @@ impl Server { }; let mut miner = Miner::new( - config.clone(), + config, self.chain.clone(), self.tx_pool.clone(), self.verifier_cache.clone(), @@ -520,7 +520,7 @@ impl Server { .filter(|metadata| metadata.is_file()) .fold(0, |acc, m| acc + m.len()); - let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000 as f64)); + let disk_usage_gb = format!("{:.*}", 3, (disk_usage_bytes as f64 / 1_000_000_000_f64)); Ok(ServerStats { peer_count: self.peer_count(), diff --git a/servers/src/grin/sync/body_sync.rs b/servers/src/grin/sync/body_sync.rs index f2645d92d..d77e634a0 100644 --- a/servers/src/grin/sync/body_sync.rs +++ b/servers/src/grin/sync/body_sync.rs @@ -104,12 +104,12 @@ impl BodySync { .filter(|x| { // only ask for blocks that we have not yet processed // either successfully stored or in our orphan list - !self.chain.get_block(x).is_ok() && !self.chain.is_orphan(x) + self.chain.get_block(x).is_err() && !self.chain.is_orphan(x) }) .take(block_count) .collect::>(); - if hashes_to_get.len() > 0 { + if !hashes_to_get.is_empty() { let body_head = self.chain.head()?; let header_head = self.chain.header_head()?; diff --git a/servers/src/grin/sync/header_sync.rs b/servers/src/grin/sync/header_sync.rs index 360e6cb90..592a3b3ff 100644 --- a/servers/src/grin/sync/header_sync.rs +++ b/servers/src/grin/sync/header_sync.rs @@ -136,31 +136,29 @@ impl HeaderSync { if all_headers_received { // reset the stalling start time if syncing goes well self.stalling_ts = None; - } else { - if let Some(ref stalling_ts) = self.stalling_ts { - if let Some(ref peer) = self.syncing_peer { - match self.sync_state.status() { - SyncStatus::HeaderSync { .. } | SyncStatus::BodySync { .. } => { - // Ban this fraud peer which claims a higher work but can't send us the real headers - if now > *stalling_ts + Duration::seconds(120) - && header_head.total_difficulty < peer.info.total_difficulty() + } else if let Some(ref stalling_ts) = self.stalling_ts { + if let Some(ref peer) = self.syncing_peer { + match self.sync_state.status() { + SyncStatus::HeaderSync { .. } | SyncStatus::BodySync { .. } => { + // Ban this fraud peer which claims a higher work but can't send us the real headers + if now > *stalling_ts + Duration::seconds(120) + && header_head.total_difficulty < peer.info.total_difficulty() + { + if let Err(e) = self + .peers + .ban_peer(peer.info.addr, ReasonForBan::FraudHeight) { - if let Err(e) = self - .peers - .ban_peer(peer.info.addr, ReasonForBan::FraudHeight) - { - error!("failed to ban peer {}: {:?}", peer.info.addr, e); - } - info!( + error!("failed to ban peer {}: {:?}", peer.info.addr, e); + } + info!( "sync: ban a fraud peer: {}, claimed height: {}, total difficulty: {}", peer.info.addr, peer.info.height(), peer.info.total_difficulty(), ); - } } - _ => (), } + _ => (), } } } @@ -198,7 +196,7 @@ impl HeaderSync { ); let _ = peer.send_header_request(locator); - return Some(peer.clone()); + return Some(peer); } return None; } @@ -212,7 +210,7 @@ impl HeaderSync { // for security, clear history_locator[] in any case of header chain rollback, // the easiest way is to check whether the sync head and the header head are identical. - if self.history_locator.len() > 0 && tip.hash() != self.chain.header_head()?.hash() { + if !self.history_locator.is_empty() && tip.hash() != self.chain.header_head()?.hash() { self.history_locator.retain(|&x| x.0 == 0); } @@ -224,7 +222,7 @@ impl HeaderSync { locator.push(l); } else { // start at last known hash and go backward - let last_loc = locator.last().unwrap().clone(); + let last_loc = locator.last().unwrap(); let mut header_cursor = self.chain.get_block_header(&last_loc.1); while let Ok(header) = header_cursor { if header.height == h { @@ -246,13 +244,13 @@ impl HeaderSync { } // Whether we have a value close enough to the provided height in the locator -fn close_enough(locator: &Vec<(u64, Hash)>, height: u64) -> Option<(u64, Hash)> { - if locator.len() == 0 { +fn close_enough(locator: &[(u64, Hash)], height: u64) -> Option<(u64, Hash)> { + if locator.is_empty() { return None; } // bounds, lower that last is last if locator.last().unwrap().0 >= height { - return locator.last().map(|l| l.clone()); + return locator.last().copied(); } // higher than first is first if within an acceptable gap if locator[0].0 < height && height.saturating_sub(127) < locator[0].0 { @@ -261,9 +259,9 @@ fn close_enough(locator: &Vec<(u64, Hash)>, height: u64) -> Option<(u64, Hash)> for hh in locator.windows(2) { if height <= hh[0].0 && height > hh[1].0 { if hh[0].0 - height < height - hh[1].0 { - return Some(hh[0].clone()); + return Some(hh[0]); } else { - return Some(hh[1].clone()); + return Some(hh[1]); } } } diff --git a/servers/src/grin/sync/state_sync.rs b/servers/src/grin/sync/state_sync.rs index 6ea354204..e5ab4d30d 100644 --- a/servers/src/grin/sync/state_sync.rs +++ b/servers/src/grin/sync/state_sync.rs @@ -195,7 +195,7 @@ impl StateSync { error!("state_sync: send_txhashset_request err! {:?}", e); return Err(e); } - return Ok(peer.clone()); + return Ok(peer); } Err(p2p::Error::PeerException) } diff --git a/servers/src/grin/sync/syncer.rs b/servers/src/grin/sync/syncer.rs index b25494cd9..a5e66fb95 100644 --- a/servers/src/grin/sync/syncer.rs +++ b/servers/src/grin/sync/syncer.rs @@ -261,7 +261,7 @@ impl SyncRunner { }; let peer_diff = peer_info.total_difficulty(); - if peer_diff > local_diff.clone() + threshold.clone() { + if peer_diff > local_diff + threshold { info!( "sync: total_difficulty {}, peer_difficulty {}, threshold {} (last 5 blocks), enabling sync", local_diff, diff --git a/servers/src/mining/stratumserver.rs b/servers/src/mining/stratumserver.rs index e87883124..3cd83f18e 100644 --- a/servers/src/mining/stratumserver.rs +++ b/servers/src/mining/stratumserver.rs @@ -218,7 +218,7 @@ impl Handler { ) -> Self { Handler { id: id, - workers: Arc::new(WorkersList::new(stratum_stats.clone())), + workers: Arc::new(WorkersList::new(stratum_stats)), sync_state: sync_state, chain: chain, current_state: Arc::new(RwLock::new(State::new(minimum_share_difficulty))), @@ -450,7 +450,7 @@ impl Handler { } else { // Do some validation but dont submit let res = pow::verify_size(&b.header); - if !res.is_ok() { + if res.is_err() { // Return error status error!( "(Server ID: {}) Failed to validate share at height {}, hash {}, edge_bits {}, nonce {}, job_id {}. {:?}", @@ -471,7 +471,7 @@ impl Handler { let worker = self.workers.get_worker(worker_id)?; let submitted_by = match worker.login { None => worker.id.to_string(), - Some(login) => login.clone(), + Some(login) => login, }; info!( @@ -488,12 +488,11 @@ impl Handler { ); self.workers .update_stats(worker_id, |worker_stats| worker_stats.num_accepted += 1); - let submit_response; - if share_is_block { - submit_response = format!("blockfound - {}", b.hash().to_hex()); + let submit_response = if share_is_block { + format!("blockfound - {}", b.hash().to_hex()) } else { - submit_response = "ok".to_string(); - } + "ok".to_string() + }; return Ok(( serde_json::to_value(submit_response).unwrap(), share_is_block, @@ -518,7 +517,7 @@ impl Handler { "(Server ID: {}) sending block {} with id {} to stratum clients", self.id, job_template.height, job_template.job_id, ); - self.workers.broadcast(job_request_json.clone()); + self.workers.broadcast(job_request_json); } pub fn run( @@ -546,10 +545,11 @@ impl Handler { { debug!("resend updated block"); let mut state = self.current_state.write(); - let mut wallet_listener_url: Option = None; - if !config.burn_reward { - wallet_listener_url = Some(config.wallet_listener_url.clone()); - } + let wallet_listener_url = if !config.burn_reward { + Some(config.wallet_listener_url.clone()) + } else { + None + }; // If this is a new block, clear the current_block version history let clear_blocks = current_hash != latest_hash; @@ -599,10 +599,9 @@ impl Handler { fn accept_connections(listen_addr: SocketAddr, handler: Arc) { info!("Start tokio stratum server"); let task = async move { - let mut listener = TcpListener::bind(&listen_addr).await.expect(&format!( - "Stratum: Failed to bind to listen address {}", - listen_addr - )); + let mut listener = TcpListener::bind(&listen_addr).await.unwrap_or_else(|_| { + panic!("Stratum: Failed to bind to listen address {}", listen_addr) + }); let server = listener .incoming() .filter_map(|s| async { s.map_err(|e| error!("accept error = {:?}", e)).ok() }) @@ -726,7 +725,9 @@ impl WorkersList { pub fn login(&self, worker_id: usize, login: String, agent: String) -> Result<(), RpcError> { let mut wl = self.workers_list.write(); - let mut worker = wl.get_mut(&worker_id).ok_or(RpcError::internal_error())?; + let mut worker = wl + .get_mut(&worker_id) + .ok_or_else(RpcError::internal_error)?; worker.login = Some(login); // XXX TODO Future - Validate password? worker.agent = agent; @@ -750,7 +751,7 @@ impl WorkersList { .read() .worker_stats .get(worker_id) - .ok_or(RpcError::internal_error()) + .ok_or_else(RpcError::internal_error) .map(|ws| ws.clone()) } @@ -885,7 +886,7 @@ where { params .and_then(|v| serde_json::from_value(v).ok()) - .ok_or(RpcError::invalid_request()) + .ok_or_else(RpcError::invalid_request) } #[cfg(test)] diff --git a/src/bin/cmd/client.rs b/src/bin/cmd/client.rs index 2cacc1524..796062d91 100644 --- a/src/bin/cmd/client.rs +++ b/src/bin/cmd/client.rs @@ -61,7 +61,7 @@ pub fn client_command(client_args: &ArgMatches<'_>, global_config: GlobalConfig) pub fn show_status(config: &ServerConfig, api_secret: Option) { println!(); - let title = format!("Grin Server Status"); + let title = "Grin Server Status".to_string(); if term::stdout().is_none() { println!("Could not open terminal"); return; @@ -100,7 +100,7 @@ pub fn ban_peer(config: &ServerConfig, peer_addr: &SocketAddr, api_secret: Optio config.api_http_addr, peer_addr.to_string() ); - match api::client::post_no_ret(url.as_str(), api_secret, ¶ms).map_err(|e| Error::API(e)) { + match api::client::post_no_ret(url.as_str(), api_secret, ¶ms).map_err(Error::API) { Ok(_) => writeln!(e, "Successfully banned peer {}", peer_addr.to_string()).unwrap(), Err(_) => writeln!(e, "Failed to ban peer {}", peer_addr).unwrap(), }; @@ -118,7 +118,7 @@ pub fn unban_peer(config: &ServerConfig, peer_addr: &SocketAddr, api_secret: Opt let res: Result<(), api::Error>; res = api::client::post_no_ret(url.as_str(), api_secret, ¶ms); - match res.map_err(|e| Error::API(e)) { + match res.map_err(Error::API) { Ok(_) => writeln!(e, "Successfully unbanned peer {}", peer_addr).unwrap(), Err(_) => writeln!(e, "Failed to unban peer {}", peer_addr).unwrap(), }; @@ -132,10 +132,9 @@ pub fn list_connected_peers(config: &ServerConfig, api_secret: Option) { let peers_info = api::client::get::>(url.as_str(), api_secret); - match peers_info.map_err(|e| Error::API(e)) { + match peers_info.map_err(Error::API) { Ok(connected_peers) => { - let mut index = 0; - for connected_peer in connected_peers { + for (index, connected_peer) in connected_peers.into_iter().enumerate() { writeln!(e, "Peer {}:", index).unwrap(); writeln!(e, "Capabilities: {:?}", connected_peer.capabilities).unwrap(); writeln!(e, "User agent: {}", connected_peer.user_agent).unwrap(); @@ -145,7 +144,6 @@ pub fn list_connected_peers(config: &ServerConfig, api_secret: Option) { writeln!(e, "Total difficulty: {}", connected_peer.total_difficulty).unwrap(); writeln!(e, "Direction: {:?}", connected_peer.direction).unwrap(); println!(); - index = index + 1; } } Err(_) => writeln!(e, "Failed to get connected peers").unwrap(), @@ -159,7 +157,7 @@ fn get_status_from_node( api_secret: Option, ) -> Result { let url = format!("http://{}/v1/status", config.api_http_addr); - api::client::get::(url.as_str(), api_secret).map_err(|e| Error::API(e)) + api::client::get::(url.as_str(), api_secret).map_err(Error::API) } /// Error type wrapping underlying module errors. diff --git a/src/bin/cmd/server.rs b/src/bin/cmd/server.rs index 9754ba7fd..69838afdc 100644 --- a/src/bin/cmd/server.rs +++ b/src/bin/cmd/server.rs @@ -121,10 +121,7 @@ pub fn server_command( } if let Some(seeds) = a.values_of("seed") { - let peers = seeds - .filter_map(|s| s.parse().ok()) - .map(|sa| PeerAddr(sa)) - .collect(); + let peers = seeds.filter_map(|s| s.parse().ok()).map(PeerAddr).collect(); server_config.p2p_config.seeding_type = Seeding::List; server_config.p2p_config.seeds = Some(PeerAddrs { peers }); } diff --git a/src/bin/grin.rs b/src/bin/grin.rs index 826e73a88..70fdcbd74 100644 --- a/src/bin/grin.rs +++ b/src/bin/grin.rs @@ -49,14 +49,12 @@ pub fn info_strings() -> (String, String) { built_info::GIT_VERSION.map_or_else(|| "".to_owned(), |v| format!(" (git {})", v)), built_info::TARGET, built_info::RUSTC_VERSION, - ) - .to_string(), + ), format!( "Built with profile \"{}\", features \"{}\".", built_info::PROFILE, built_info::FEATURES_STR, - ) - .to_string(), + ), ) } @@ -79,17 +77,12 @@ fn real_main() -> i32 { let node_config; // Temporary wallet warning message - match args.subcommand() { - ("wallet", _) => { - println!(); - println!("As of v1.1.0, the wallet has been split into a separate executable."); - println!( - "Please visit https://github.com/mimblewimble/grin-wallet/releases to download" - ); - println!(); - return 0; - } - _ => {} + if let ("wallet", _) = args.subcommand() { + println!(); + println!("As of v1.1.0, the wallet has been split into a separate executable."); + println!("Please visit https://github.com/mimblewimble/grin-wallet/releases to download"); + println!(); + return 0; } let chain_type = if args.is_present("floonet") { @@ -101,15 +94,12 @@ fn real_main() -> i32 { }; // Deal with configuration file creation - match args.subcommand() { - ("server", Some(server_args)) => { - // If it's just a server config command, do it and exit - if let ("config", Some(_)) = server_args.subcommand() { - cmd::config_command_server(&chain_type, SERVER_CONFIG_FILE_NAME); - return 0; - } + if let ("server", Some(server_args)) = args.subcommand() { + // If it's just a server config command, do it and exit + if let ("config", Some(_)) = server_args.subcommand() { + cmd::config_command_server(&chain_type, SERVER_CONFIG_FILE_NAME); + return 0; } - _ => {} } // Load relevant config @@ -150,7 +140,7 @@ fn real_main() -> i32 { }; init_logger(Some(logging_config), logs_tx); - global::set_mining_mode(config.members.unwrap().server.clone().chain_type); + global::set_mining_mode(config.members.unwrap().server.chain_type); if let Some(file_path) = &config.config_file_path { info!( diff --git a/src/bin/tui/menu.rs b/src/bin/tui/menu.rs index 7a8202778..8df9c4e9a 100644 --- a/src/bin/tui/menu.rs +++ b/src/bin/tui/menu.rs @@ -16,7 +16,7 @@ use cursive::align::HAlign; use cursive::direction::Orientation; -use cursive::event::{EventResult, Key}; +use cursive::event::Key; use cursive::view::Identifiable; use cursive::view::View; use cursive::views::{ @@ -63,12 +63,10 @@ pub fn create() -> Box { .on_pre_event('j', move |c| { let mut s: ViewRef> = c.find_name(MAIN_MENU).unwrap(); s.select_down(1)(c); - Some(EventResult::Consumed(None)); }) .on_pre_event('k', move |c| { let mut s: ViewRef> = c.find_name(MAIN_MENU).unwrap(); s.select_up(1)(c); - Some(EventResult::Consumed(None)); }) .on_pre_event(Key::Tab, move |c| { let mut s: ViewRef> = c.find_name(MAIN_MENU).unwrap(); @@ -77,7 +75,6 @@ pub fn create() -> Box { } else { s.select_down(1)(c); } - Some(EventResult::Consumed(None)); }); let main_menu = LinearLayout::new(Orientation::Vertical) .child(ResizedView::with_full_height(main_menu)) diff --git a/src/bin/tui/mining.rs b/src/bin/tui/mining.rs index 5e1dc638c..9064e923b 100644 --- a/src/bin/tui/mining.rs +++ b/src/bin/tui/mining.rs @@ -142,8 +142,8 @@ impl TableViewItem for DiffBlock { DiffColumn::PoWType => pow_type, DiffColumn::Difficulty => self.difficulty.to_string(), DiffColumn::SecondaryScaling => self.secondary_scaling.to_string(), - DiffColumn::Time => format!("{}", datetime).to_string(), - DiffColumn::Duration => format!("{}s", self.duration).to_string(), + DiffColumn::Time => format!("{}", datetime), + DiffColumn::Duration => format!("{}s", self.duration), } } @@ -320,7 +320,7 @@ impl TUIStatusListener for TUIMiningView { }); let dur = time::Duration::from_secs(stats.diff_stats.average_block_time); c.call_on_name("diff_avg_block_time", |t: &mut TextView| { - t.set_content(format!("{} Secs", dur.as_secs()).to_string()); + t.set_content(format!("{} Secs", dur.as_secs())); }); c.call_on_name("diff_avg_difficulty", |t: &mut TextView| { t.set_content(stats.diff_stats.average_difficulty.to_string()); diff --git a/src/bin/tui/peers.rs b/src/bin/tui/peers.rs index 6d250d846..167a84b2b 100644 --- a/src/bin/tui/peers.rs +++ b/src/bin/tui/peers.rs @@ -61,7 +61,8 @@ impl TableViewItem for PeerStats { fn to_column(&self, column: PeerColumn) -> String { // Converts optional size to human readable size fn size_to_string(size: u64) -> String { - size.file_size(CONVENTIONAL).unwrap_or("-".to_string()) + size.file_size(CONVENTIONAL) + .unwrap_or_else(|_| "-".to_string()) } match column { @@ -71,15 +72,13 @@ impl TableViewItem for PeerStats { "↑: {}, ↓: {}", size_to_string(self.sent_bytes_per_sec), size_to_string(self.received_bytes_per_sec), - ) - .to_string(), + ), PeerColumn::TotalDifficulty => format!( "{} D @ {} H ({}s)", self.total_difficulty, self.height, (Utc::now() - self.last_seen).num_seconds(), - ) - .to_string(), + ), PeerColumn::Direction => self.direction.clone(), PeerColumn::Version => format!("{}", self.version), PeerColumn::UserAgent => self.user_agent.clone(), @@ -175,8 +174,7 @@ impl TUIStatusListener for TUIPeerView { l.height, stats.chain_stats.total_difficulty, stats.chain_stats.height - ) - .to_string(), + ), None => "".to_string(), }; let _ = c.call_on_name( diff --git a/src/bin/tui/table.rs b/src/bin/tui/table.rs index a3b55bd3b..3b84f0349 100644 --- a/src/bin/tui/table.rs +++ b/src/bin/tui/table.rs @@ -625,8 +625,9 @@ impl + PartialEq, H: Eq + Hash + Copy + Clone + 'static> Tab } }); self.rows_to_items = rows_to_items; - - old_item.map(|o| self.set_selected_item(o)); + if let Some(o) = old_item { + self.set_selected_item(o) + } } } diff --git a/src/bin/tui/ui.rs b/src/bin/tui/ui.rs index 368849050..804c4aea3 100644 --- a/src/bin/tui/ui.rs +++ b/src/bin/tui/ui.rs @@ -188,7 +188,7 @@ impl Controller { let stat_update_interval = 1; let mut next_stat_update = Utc::now().timestamp() + stat_update_interval; while self.ui.step() { - while let Some(message) = self.rx.try_iter().next() { + if let Some(message) = self.rx.try_iter().next() { match message { ControllerMessage::Shutdown => { warn!("Shutdown in progress, please wait");