From 04a012375208da72cdda448233640205858a529a Mon Sep 17 00:00:00 2001 From: Quentin Le Sceller Date: Wed, 12 Feb 2020 13:35:33 -0500 Subject: [PATCH] Less cloning and additional pattern simplifications (#3223) * API Cleanup * Chain Cleanup * Core Cleanup * Keychain Cleanup * P2P Cleanup * Pool Cleanup * Store Cleanup * Util Cleanup * Cleanup clone_from_slice * Address jasper comments --- api/src/auth.rs | 2 +- api/src/client.rs | 4 +- api/src/handlers.rs | 24 +++------ api/src/handlers/blocks_api.rs | 30 +++++------ api/src/handlers/chain_api.rs | 10 ++-- api/src/handlers/pool_api.rs | 8 +-- api/src/handlers/server_api.rs | 2 +- api/src/handlers/utils.rs | 15 +++--- api/src/handlers/version_api.rs | 2 +- api/src/rest.rs | 10 ++-- api/src/router.rs | 20 ++++---- api/src/types.rs | 20 +++----- api/src/web.rs | 6 +-- chain/src/chain.rs | 14 +++--- chain/src/pipe.rs | 6 +-- chain/src/store.rs | 56 ++++++++++----------- chain/src/txhashset/txhashset.rs | 2 +- chain/src/txhashset/utxo_view.rs | 2 +- chain/src/types.rs | 17 +++---- core/src/core/transaction.rs | 38 ++++++-------- core/src/libtx/aggsig.rs | 86 +++++++++++++++----------------- core/src/libtx/proof.rs | 8 +-- core/src/libtx/secp_ser.rs | 10 ++-- core/src/pow/cuckaroo.rs | 16 +++--- core/src/pow/cuckarood.rs | 18 +++---- core/src/pow/cuckaroom.rs | 16 +++--- core/src/pow/cuckatoo.rs | 24 ++++----- core/src/ser.rs | 6 +-- keychain/src/base58.rs | 2 +- keychain/src/extkey_bip32.rs | 8 +-- keychain/src/types.rs | 14 ++---- p2p/src/conn.rs | 10 ++-- p2p/src/msg.rs | 2 +- p2p/src/peers.rs | 20 ++++---- p2p/src/store.rs | 6 +-- p2p/src/types.rs | 16 +++--- pool/src/pool.rs | 6 +-- pool/src/transaction_pool.rs | 25 ++++------ store/src/lmdb.rs | 2 +- store/src/pmmr.rs | 2 +- store/src/types.rs | 2 +- util/src/file.rs | 4 +- util/src/logger.rs | 2 +- 43 files changed, 267 insertions(+), 326 deletions(-) diff --git a/api/src/auth.rs b/api/src/auth.rs index 9a0e3aacd..679219d52 100644 --- a/api/src/auth.rs +++ b/api/src/auth.rs @@ -128,7 +128,7 @@ impl Handler for BasicAuthURIMiddleware { unauthorized_response(&self.basic_realm) } } else { - return next_handler.call(req, handlers); + next_handler.call(req, handlers) } } } diff --git a/api/src/client.rs b/api/src/client.rs index c8502ed42..d4ad5b70d 100644 --- a/api/src/client.rs +++ b/api/src/client.rs @@ -34,7 +34,7 @@ pub type ClientResponseFuture = Box + Sen /// Helper function to easily issue a HTTP GET request against a given URL that /// returns a JSON object. Handles request building, JSON deserialization and /// response code checking. -pub fn get<'a, T>(url: &'a str, api_secret: Option) -> Result +pub fn get(url: &str, api_secret: Option) -> Result where for<'de> T: Deserialize<'de>, { @@ -44,7 +44,7 @@ where /// Helper function to easily issue an async HTTP GET request against a given /// URL that returns a future. Handles request building, JSON deserialization /// and response code checking. -pub fn get_async<'a, T>(url: &'a str, api_secret: Option) -> ClientResponseFuture +pub fn get_async(url: &str, api_secret: Option) -> ClientResponseFuture where for<'de> T: Deserialize<'de> + Send + 'static, { diff --git a/api/src/handlers.rs b/api/src/handlers.rs index ab85fc155..a4a40b361 100644 --- a/api/src/handlers.rs +++ b/api/src/handlers.rs @@ -191,14 +191,10 @@ impl OwnerAPIHandlerV2 { impl crate::router::Handler for OwnerAPIHandlerV2 { fn post(&self, req: Request) -> ResponseFuture { - Box::new( - self.handle_post_request(req) - .and_then(|r| ok(r)) - .or_else(|e| { - error!("Request Error: {:?}", e); - ok(create_error_response(e)) - }), - ) + Box::new(self.handle_post_request(req).and_then(ok).or_else(|e| { + error!("Request Error: {:?}", e); + ok(create_error_response(e)) + })) } fn options(&self, _req: Request) -> ResponseFuture { @@ -260,14 +256,10 @@ impl ForeignAPIHandlerV2 { impl crate::router::Handler for ForeignAPIHandlerV2 { fn post(&self, req: Request) -> ResponseFuture { - Box::new( - self.handle_post_request(req) - .and_then(|r| ok(r)) - .or_else(|e| { - error!("Request Error: {:?}", e); - ok(create_error_response(e)) - }), - ) + Box::new(self.handle_post_request(req).and_then(ok).or_else(|e| { + error!("Request Error: {:?}", e); + ok(create_error_response(e)) + })) } fn options(&self, _req: Request) -> ResponseFuture { diff --git a/api/src/handlers/blocks_api.rs b/api/src/handlers/blocks_api.rs index e556b1309..206f4f101 100644 --- a/api/src/handlers/blocks_api.rs +++ b/api/src/handlers/blocks_api.rs @@ -43,7 +43,7 @@ impl HeaderHandler { if let Ok(height) = input.parse() { match w(&self.chain)?.get_header_by_height(height) { Ok(header) => return Ok(BlockHeaderPrintable::from_header(&header)), - Err(_) => return Err(ErrorKind::NotFound)?, + Err(_) => return Err(ErrorKind::NotFound.into()), } } check_block_param(&input)?; @@ -60,14 +60,14 @@ impl HeaderHandler { let oid = get_output(&self.chain, &commit_id)?.1; match w(&self.chain)?.get_header_for_output(&oid) { Ok(header) => Ok(BlockHeaderPrintable::from_header(&header)), - Err(_) => Err(ErrorKind::NotFound)?, + Err(_) => Err(ErrorKind::NotFound.into()), } } pub fn get_header_v2(&self, h: &Hash) -> Result { let chain = w(&self.chain)?; let header = chain.get_block_header(h).context(ErrorKind::NotFound)?; - return Ok(BlockHeaderPrintable::from_header(&header)); + Ok(BlockHeaderPrintable::from_header(&header)) } // Try to get hash from height, hash or output commit @@ -80,7 +80,7 @@ impl HeaderHandler { if let Some(height) = height { match w(&self.chain)?.get_header_by_height(height) { Ok(header) => return Ok(header.hash()), - Err(_) => return Err(ErrorKind::NotFound)?, + Err(_) => return Err(ErrorKind::NotFound.into()), } } if let Some(hash) = hash { @@ -90,12 +90,10 @@ impl HeaderHandler { let oid = get_output_v2(&self.chain, &commit, false, false)?.1; match w(&self.chain)?.get_header_for_output(&oid) { Ok(header) => return Ok(header.hash()), - Err(_) => return Err(ErrorKind::NotFound)?, + Err(_) => return Err(ErrorKind::NotFound.into()), } } - return Err(ErrorKind::Argument( - "not a valid hash, height or output commit".to_owned(), - ))?; + Err(ErrorKind::Argument("not a valid hash, height or output commit".to_owned()).into()) } } @@ -145,7 +143,7 @@ impl BlockHandler { if let Ok(height) = input.parse() { match w(&self.chain)?.get_header_by_height(height) { Ok(header) => return Ok(header.hash()), - Err(_) => return Err(ErrorKind::NotFound)?, + Err(_) => return Err(ErrorKind::NotFound.into()), } } check_block_param(&input)?; @@ -164,7 +162,7 @@ impl BlockHandler { if let Some(height) = height { match w(&self.chain)?.get_header_by_height(height) { Ok(header) => return Ok(header.hash()), - Err(_) => return Err(ErrorKind::NotFound)?, + Err(_) => return Err(ErrorKind::NotFound.into()), } } if let Some(hash) = hash { @@ -174,23 +172,19 @@ impl BlockHandler { let oid = get_output_v2(&self.chain, &commit, false, false)?.1; match w(&self.chain)?.get_header_for_output(&oid) { Ok(header) => return Ok(header.hash()), - Err(_) => return Err(ErrorKind::NotFound)?, + Err(_) => return Err(ErrorKind::NotFound.into()), } } - return Err(ErrorKind::Argument( - "not a valid hash, height or output commit".to_owned(), - ))?; + Err(ErrorKind::Argument("not a valid hash, height or output commit".to_owned()).into()) } } -fn check_block_param(input: &String) -> Result<(), Error> { +fn check_block_param(input: &str) -> Result<(), Error> { lazy_static! { static ref RE: Regex = Regex::new(r"[0-9a-fA-F]{64}").unwrap(); } if !RE.is_match(&input) { - return Err(ErrorKind::Argument( - "Not a valid hash or height.".to_owned(), - ))?; + return Err(ErrorKind::Argument("Not a valid hash or height.".to_owned()).into()); } Ok(()) } diff --git a/api/src/handlers/chain_api.rs b/api/src/handlers/chain_api.rs index 6b291a8a9..ef0a2d746 100644 --- a/api/src/handlers/chain_api.rs +++ b/api/src/handlers/chain_api.rs @@ -170,7 +170,7 @@ impl OutputHandler { outputs = [&outputs[..], &block_output_batch[..]].concat(); } } - return Ok(outputs); + Ok(outputs) } // allows traversal of utxo set @@ -327,7 +327,7 @@ impl OutputHandler { let mut return_vec = vec![]; for i in (start_height..=end_height).rev() { if let Ok(res) = self.outputs_at_height(i, commitments.clone(), include_rp) { - if res.outputs.len() > 0 { + if !res.outputs.is_empty() { return_vec.push(res); } } @@ -359,7 +359,7 @@ impl OutputHandler { include_rproof, include_merkle_proof, ) { - if res.len() > 0 { + if !res.is_empty() { return_vec = [&return_vec[..], &res[..]].concat(); } } @@ -394,7 +394,7 @@ impl KernelHandler { .trim_end_matches('/') .rsplit('/') .next() - .ok_or(ErrorKind::RequestError("missing excess".into()))?; + .ok_or_else(|| ErrorKind::RequestError("missing excess".into()))?; let excess = util::from_hex(excess.to_owned()) .map_err(|_| ErrorKind::RequestError("invalid excess hex".into()))?; if excess.len() != 33 { @@ -447,7 +447,7 @@ impl KernelHandler { min_height: Option, max_height: Option, ) -> Result { - let excess = util::from_hex(excess.to_owned()) + let excess = util::from_hex(excess) .map_err(|_| ErrorKind::RequestError("invalid excess hex".into()))?; if excess.len() != 33 { return Err(ErrorKind::RequestError("invalid excess length".into()).into()); diff --git a/api/src/handlers/pool_api.rs b/api/src/handlers/pool_api.rs index 2877739df..41fd65225 100644 --- a/api/src/handlers/pool_api.rs +++ b/api/src/handlers/pool_api.rs @@ -84,10 +84,10 @@ impl PoolHandler { .blockchain .chain_head() .context(ErrorKind::Internal("Failed to get chain head".to_owned()))?; - let res = tx_pool + tx_pool .add_to_pool(source, tx, !fluff.unwrap_or(false), &header) .context(ErrorKind::Internal("Failed to update pool".to_owned()))?; - Ok(res) + Ok(()) } } /// Dummy wrapper for the hex-encoded serialized transaction. @@ -141,10 +141,10 @@ impl PoolPushHandler { .blockchain .chain_head() .context(ErrorKind::Internal("Failed to get chain head".to_owned()))?; - let res = tx_pool + tx_pool .add_to_pool(source, tx, !fluff, &header) .context(ErrorKind::Internal("Failed to update pool".to_owned()))?; - Ok(res) + Ok(()) }), ) } diff --git a/api/src/handlers/server_api.rs b/api/src/handlers/server_api.rs index 1c724596f..7b7601d52 100644 --- a/api/src/handlers/server_api.rs +++ b/api/src/handlers/server_api.rs @@ -54,7 +54,7 @@ impl Handler for KernelDownloadHandler { } else { response( StatusCode::INTERNAL_SERVER_ERROR, - format!("requesting kernel data from peer failed (no peers)"), + "requesting kernel data from peer failed (no peers)".to_string(), ) } } diff --git a/api/src/handlers/utils.rs b/api/src/handlers/utils.rs index dc2f214ef..905c915f7 100644 --- a/api/src/handlers/utils.rs +++ b/api/src/handlers/utils.rs @@ -70,7 +70,7 @@ pub fn get_output( } } } - Err(ErrorKind::NotFound)? + Err(ErrorKind::NotFound.into()) } /// Retrieves an output from the chain given a commit id (a tiny bit iteratively) @@ -102,10 +102,11 @@ pub fn get_output_v2( match res { Ok(output_pos) => match chain.get_unspent_output_at(output_pos.position) { Ok(output) => { - let mut header = None; - if include_merkle_proof && output.is_coinbase() { - header = chain.get_header_by_height(output_pos.height).ok(); - } + let header = if include_merkle_proof && output.is_coinbase() { + chain.get_header_by_height(output_pos.height).ok() + } else { + None + }; match OutputPrintable::from_output( &output, chain.clone(), @@ -124,7 +125,7 @@ pub fn get_output_v2( } } } - Err(_) => return Err(ErrorKind::NotFound)?, + Err(_) => return Err(ErrorKind::NotFound.into()), }, Err(e) => { trace!( @@ -136,5 +137,5 @@ pub fn get_output_v2( } } } - Err(ErrorKind::NotFound)? + Err(ErrorKind::NotFound.into()) } diff --git a/api/src/handlers/version_api.rs b/api/src/handlers/version_api.rs index 3d4b6d3e1..d67e35e54 100644 --- a/api/src/handlers/version_api.rs +++ b/api/src/handlers/version_api.rs @@ -21,7 +21,7 @@ use crate::web::*; use hyper::{Body, Request}; use std::sync::Weak; -const CRATE_VERSION: &'static str = env!("CARGO_PKG_VERSION"); +const CRATE_VERSION: &str = env!("CARGO_PKG_VERSION"); /// Version handler. Get running node API version /// GET /v1/version diff --git a/api/src/rest.rs b/api/src/rest.rs index cfdeda65f..aaed890f3 100644 --- a/api/src/rest.rs +++ b/api/src/rest.rs @@ -137,9 +137,7 @@ impl TLSConfig { let keys = pemfile::pkcs8_private_keys(&mut reader) .map_err(|_| ErrorKind::Internal("failed to load private key".to_string()))?; if keys.len() != 1 { - return Err(ErrorKind::Internal( - "expected a single private key".to_string(), - ))?; + return Err(ErrorKind::Internal("expected a single private key".to_string()).into()); } Ok(keys[0].clone()) } @@ -193,7 +191,8 @@ impl ApiServer { if self.shutdown_sender.is_some() { return Err(ErrorKind::Internal( "Can't start HTTP API server, it's running already".to_string(), - ))?; + ) + .into()); } let (tx, _rx) = oneshot::channel::<()>(); self.shutdown_sender = Some(tx); @@ -222,7 +221,8 @@ impl ApiServer { if self.shutdown_sender.is_some() { return Err(ErrorKind::Internal( "Can't start HTTPS API server, it's running already".to_string(), - ))?; + ) + .into()); } let tls_conf = conf.build_server_config()?; diff --git a/api/src/router.rs b/api/src/router.rs index 9aa1abddd..9892d254b 100644 --- a/api/src/router.rs +++ b/api/src/router.rs @@ -70,16 +70,16 @@ pub trait Handler { req: Request, mut _handlers: Box>, ) -> ResponseFuture { - match req.method() { - &Method::GET => self.get(req), - &Method::POST => self.post(req), - &Method::PUT => self.put(req), - &Method::DELETE => self.delete(req), - &Method::PATCH => self.patch(req), - &Method::OPTIONS => self.options(req), - &Method::CONNECT => self.connect(req), - &Method::TRACE => self.trace(req), - &Method::HEAD => self.head(req), + match *req.method() { + Method::GET => self.get(req), + Method::POST => self.post(req), + Method::PUT => self.put(req), + Method::DELETE => self.delete(req), + Method::PATCH => self.patch(req), + Method::OPTIONS => self.options(req), + Method::CONNECT => self.connect(req), + Method::TRACE => self.trace(req), + Method::HEAD => self.head(req), _ => not_found(), } } diff --git a/api/src/types.rs b/api/src/types.rs index 7b554253e..4b82f698b 100644 --- a/api/src/types.rs +++ b/api/src/types.rs @@ -191,9 +191,7 @@ pub struct Output { impl Output { pub fn new(commit: &pedersen::Commitment, height: u64, mmr_index: u64) -> Output { Output { - commit: PrintableCommitment { - commit: commit.clone(), - }, + commit: PrintableCommitment { commit: *commit }, height: height, mmr_index: mmr_index, } @@ -207,7 +205,7 @@ pub struct PrintableCommitment { impl PrintableCommitment { pub fn commit(&self) -> pedersen::Commitment { - self.commit.clone() + self.commit } pub fn to_vec(&self) -> Vec { @@ -330,17 +328,17 @@ impl OutputPrintable { } pub fn commit(&self) -> Result { - Ok(self.commit.clone()) + Ok(self.commit) } pub fn range_proof(&self) -> Result { let proof_str = self .proof .clone() - .ok_or_else(|| ser::Error::HexError(format!("output range_proof missing")))?; + .ok_or_else(|| ser::Error::HexError("output range_proof missing".to_string()))?; let p_vec = util::from_hex(proof_str) - .map_err(|_| ser::Error::HexError(format!("invalid output range_proof")))?; + .map_err(|_| ser::Error::HexError("invalid output range_proof".to_string()))?; let mut p_bytes = [0; util::secp::constants::MAX_PROOF_SIZE]; for i in 0..p_bytes.len() { p_bytes[i] = p_vec[i]; @@ -481,7 +479,7 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable { } } - const FIELDS: &'static [&'static str] = &[ + const FIELDS: &[&str] = &[ "output_type", "commit", "spent", @@ -734,8 +732,7 @@ mod test { #[test] fn serialize_output_printable() { - let hex_output = - "{\ + let hex_output = "{\ \"output_type\":\"Coinbase\",\ \"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\ \"spent\":false,\ @@ -752,8 +749,7 @@ mod test { #[test] fn serialize_output() { - let hex_commit = - "{\ + let hex_commit = "{\ \"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\ \"height\":0,\ \"mmr_index\":0\ diff --git a/api/src/web.rs b/api/src/web.rs index a2c2c695b..67d6304d9 100644 --- a/api/src/web.rs +++ b/api/src/web.rs @@ -114,7 +114,7 @@ impl From<&str> for QueryParams { let params = form_urlencoded::parse(query_string.as_bytes()) .into_owned() .fold(HashMap::new(), |mut hm, (k, v)| { - hm.entry(k).or_insert(vec![]).push(v); + hm.entry(k).or_insert_with(|| vec![]).push(v); hm }); QueryParams { params } @@ -152,7 +152,7 @@ macro_rules! must_get_query( ($req: expr) =>( match $req.uri().query() { Some(q) => q, - None => return Err(ErrorKind::RequestError("no query string".to_owned()))?, + None => return Err(ErrorKind::RequestError("no query string".to_owned()).into()), } )); @@ -163,7 +163,7 @@ macro_rules! parse_param( None => $default, Some(val) => match val.parse() { Ok(val) => val, - Err(_) => return Err(ErrorKind::RequestError(format!("invalid value of parameter {}", $name)))?, + Err(_) => return Err(ErrorKind::RequestError(format!("invalid value of parameter {}", $name)).into()), } } )); diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 13ba33144..ce4727d06 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -90,7 +90,7 @@ impl OrphanBlockPool { { let height_hashes = height_idx .entry(orphan.block.header.height) - .or_insert(vec![]); + .or_insert_with(|| vec![]); height_hashes.push(orphan.block.hash()); orphans.insert(orphan.block.hash(), orphan); } @@ -125,11 +125,11 @@ impl OrphanBlockPool { /// Get an orphan from the pool indexed by the hash of its parent, removing /// it at the same time, preventing clone - fn remove_by_height(&self, height: &u64) -> Option> { + fn remove_by_height(&self, height: u64) -> Option> { let mut orphans = self.orphans.write(); let mut height_idx = self.height_idx.write(); height_idx - .remove(height) + .remove(&height) .map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect()) } @@ -452,7 +452,7 @@ impl Chain { let mut orphan_accepted = false; let mut height_accepted = height; - if let Some(orphans) = self.orphans.remove_by_height(&height) { + if let Some(orphans) = self.orphans.remove_by_height(height) { let orphans_len = orphans.len(); for (i, orphan) in orphans.into_iter().enumerate() { debug!( @@ -1219,7 +1219,7 @@ impl Chain { pub fn try_header_head(&self, timeout: Duration) -> Result, Error> { self.header_pmmr .try_read_for(timeout) - .map(|ref pmmr| self.read_header_head(pmmr).map(|x| Some(x))) + .map(|ref pmmr| self.read_header_head(pmmr).map(Some)) .unwrap_or(Ok(None)) } @@ -1563,7 +1563,7 @@ fn setup_head( batch.save_block(&genesis)?; batch.save_body_head(&Tip::from_header(&genesis.header))?; - if genesis.kernels().len() > 0 { + if !genesis.kernels().is_empty() { let (utxo_sum, kernel_sum) = (sums, genesis as &dyn Committed).verify_kernel_sums( genesis.header.overage(), genesis.header.total_kernel_offset(), @@ -1582,7 +1582,7 @@ fn setup_head( info!("init: saved genesis: {:?}", genesis.hash()); } - Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?, + Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()).into()), }; batch.commit()?; Ok(()) diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 53ebe85ae..dc37bf6f7 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -291,9 +291,7 @@ fn check_known_store(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result // Not yet processed this block, we can proceed. Ok(()) } - Err(e) => { - return Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into()); - } + Err(e) => Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into()), } } @@ -504,7 +502,7 @@ pub fn rewind_and_apply_header_fork( for h in fork_hashes { let header = batch .get_block_header(&h) - .map_err(|e| ErrorKind::StoreErr(e, format!("getting forked headers")))?; + .map_err(|e| ErrorKind::StoreErr(e, "getting forked headers".to_string()))?; ext.validate_root(&header)?; ext.apply_header(&header)?; } diff --git a/chain/src/store.rs b/chain/src/store.rs index d5fe9a3df..e02a788b3 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -28,14 +28,14 @@ use std::sync::Arc; const STORE_SUBPATH: &str = "chain"; -const BLOCK_HEADER_PREFIX: u8 = 'h' as u8; -const BLOCK_PREFIX: u8 = 'b' as u8; -const HEAD_PREFIX: u8 = 'H' as u8; -const TAIL_PREFIX: u8 = 'T' as u8; -const COMMIT_POS_PREFIX: u8 = 'c' as u8; -const COMMIT_POS_HGT_PREFIX: u8 = 'p' as u8; -const BLOCK_INPUT_BITMAP_PREFIX: u8 = 'B' as u8; -const BLOCK_SUMS_PREFIX: u8 = 'M' as u8; +const BLOCK_HEADER_PREFIX: u8 = b'h'; +const BLOCK_PREFIX: u8 = b'b'; +const HEAD_PREFIX: u8 = b'H'; +const TAIL_PREFIX: u8 = b'T'; +const COMMIT_POS_PREFIX: u8 = b'c'; +const COMMIT_POS_HGT_PREFIX: u8 = b'p'; +const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B'; +const BLOCK_SUMS_PREFIX: u8 = b'M'; /// All chain-related database operations pub struct ChainStore { @@ -45,7 +45,7 @@ pub struct ChainStore { impl ChainStore { /// Create new chain store pub fn new(db_root: &str) -> Result { - let db = store::Store::new(db_root, None, Some(STORE_SUBPATH.clone()), None)?; + let db = store::Store::new(db_root, None, Some(STORE_SUBPATH), None)?; Ok(ChainStore { db }) } @@ -64,12 +64,12 @@ impl ChainStore { impl ChainStore { /// The current chain head. pub fn head(&self) -> Result { - option_to_not_found(self.db.get_ser(&vec![HEAD_PREFIX]), || "HEAD".to_owned()) + option_to_not_found(self.db.get_ser(&[HEAD_PREFIX]), || "HEAD".to_owned()) } /// The current chain "tail" (earliest block in the store). pub fn tail(&self) -> Result { - option_to_not_found(self.db.get_ser(&vec![TAIL_PREFIX]), || "TAIL".to_owned()) + option_to_not_found(self.db.get_ser(&[TAIL_PREFIX]), || "TAIL".to_owned()) } /// Header of the block at the head of the block chain (not the same thing as header_head). @@ -124,8 +124,8 @@ impl ChainStore { /// Get PMMR pos for the given output commitment. /// Note: - /// - Original prefix 'COMMIT_POS_PREFIX' is not used anymore for normal case, refer to #2889 for detail. - /// - To be compatible with the old callers, let's keep this function name but replace with new prefix 'COMMIT_POS_HGT_PREFIX' + /// - Original prefix 'COMMIT_POS_PREFIX' is not used anymore for normal case, refer to #2889 for detail. + /// - To be compatible with the old callers, let's keep this function name but replace with new prefix 'COMMIT_POS_HGT_PREFIX' pub fn get_output_pos(&self, commit: &Commitment) -> Result { let res: Result, Error> = self.db.get_ser(&to_key( COMMIT_POS_HGT_PREFIX, @@ -169,12 +169,12 @@ pub struct Batch<'a> { impl<'a> Batch<'a> { /// The head. pub fn head(&self) -> Result { - option_to_not_found(self.db.get_ser(&vec![HEAD_PREFIX]), || "HEAD".to_owned()) + option_to_not_found(self.db.get_ser(&[HEAD_PREFIX]), || "HEAD".to_owned()) } /// The tail. pub fn tail(&self) -> Result { - option_to_not_found(self.db.get_ser(&vec![TAIL_PREFIX]), || "TAIL".to_owned()) + option_to_not_found(self.db.get_ser(&[TAIL_PREFIX]), || "TAIL".to_owned()) } /// Header of the block at the head of the block chain (not the same thing as header_head). @@ -184,12 +184,12 @@ impl<'a> Batch<'a> { /// Save body head to db. pub fn save_body_head(&self, t: &Tip) -> Result<(), Error> { - self.db.put_ser(&vec![HEAD_PREFIX], t) + self.db.put_ser(&[HEAD_PREFIX], t) } /// Save body "tail" to db. pub fn save_body_tail(&self, t: &Tip) -> Result<(), Error> { - self.db.put_ser(&vec![TAIL_PREFIX], t) + self.db.put_ser(&[TAIL_PREFIX], t) } /// get block @@ -271,8 +271,8 @@ impl<'a> Batch<'a> { /// Get output_pos from index. /// Note: - /// - Original prefix 'COMMIT_POS_PREFIX' is not used for normal case anymore, refer to #2889 for detail. - /// - To be compatible with the old callers, let's keep this function name but replace with new prefix 'COMMIT_POS_HGT_PREFIX' + /// - Original prefix 'COMMIT_POS_PREFIX' is not used for normal case anymore, refer to #2889 for detail. + /// - To be compatible with the old callers, let's keep this function name but replace with new prefix 'COMMIT_POS_HGT_PREFIX' pub fn get_output_pos(&self, commit: &Commitment) -> Result { let res: Result, Error> = self.db.get_ser(&to_key( COMMIT_POS_HGT_PREFIX, @@ -478,12 +478,10 @@ impl<'a> Iterator for DifficultyIter<'a> { self.header = if self.header.is_none() { if let Some(ref batch) = self.batch { batch.get_block_header(&self.start).ok() + } else if let Some(ref store) = self.store { + store.get_block_header(&self.start).ok() } else { - if let Some(ref store) = self.store { - store.get_block_header(&self.start).ok() - } else { - None - } + None } } else { self.prev_header.clone() @@ -494,12 +492,10 @@ impl<'a> Iterator for DifficultyIter<'a> { if let Some(header) = self.header.clone() { if let Some(ref batch) = self.batch { self.prev_header = batch.get_previous_header(&header).ok(); + } else if let Some(ref store) = self.store { + self.prev_header = store.get_previous_header(&header).ok(); } else { - if let Some(ref store) = self.store { - self.prev_header = store.get_previous_header(&header).ok(); - } else { - self.prev_header = None; - } + self.prev_header = None; } let prev_difficulty = self @@ -517,7 +513,7 @@ impl<'a> Iterator for DifficultyIter<'a> { header.pow.is_secondary(), )) } else { - return None; + None } } } diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 1dd52b08c..73a84f7de 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -1423,7 +1423,7 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result { // But practically, these zip files are not small ones, we just keep the zips in last 24 hours let data_dir = Path::new(&root_dir); let pattern = format!("{}_", TXHASHSET_ZIP); - if let Ok(n) = clean_files_by_prefix(data_dir.clone(), &pattern, 24 * 60 * 60) { + if let Ok(n) = clean_files_by_prefix(data_dir, &pattern, 24 * 60 * 60) { debug!( "{} zip files have been clean up in folder: {:?}", n, data_dir diff --git a/chain/src/txhashset/utxo_view.rs b/chain/src/txhashset/utxo_view.rs index e5b5a50c1..388adef23 100644 --- a/chain/src/txhashset/utxo_view.rs +++ b/chain/src/txhashset/utxo_view.rs @@ -114,7 +114,7 @@ impl<'a> UTXOView<'a> { /// that have not sufficiently matured. pub fn verify_coinbase_maturity( &self, - inputs: &Vec, + inputs: &[Input], height: u64, batch: &Batch<'_>, ) -> Result<(), Error> { diff --git a/chain/src/types.rs b/chain/src/types.rs index a0f2c920e..dafe2aed7 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -28,13 +28,13 @@ bitflags! { /// Options for block validation pub struct Options: u32 { /// No flags - const NONE = 0b00000000; + const NONE = 0b0000_0000; /// Runs without checking the Proof of Work, mostly to make testing easier. - const SKIP_POW = 0b00000001; + const SKIP_POW = 0b0000_0001; /// Adds block while in syncing mode. - const SYNC = 0b00000010; + const SYNC = 0b0000_0010; /// Block validation on a block we mined ourselves - const MINE = 0b00000100; + const MINE = 0b0000_0100; } } @@ -212,11 +212,10 @@ impl TxHashSetRoots { self.output_roots.merged_root(header), ); - if header.output_root != self.output_root(header) { - Err(ErrorKind::InvalidRoot.into()) - } else if header.range_proof_root != self.rproof_root { - Err(ErrorKind::InvalidRoot.into()) - } else if header.kernel_root != self.kernel_root { + if header.output_root != self.output_root(header) + || header.range_proof_root != self.rproof_root + || header.kernel_root != self.kernel_root + { Err(ErrorKind::InvalidRoot.into()) } else { Ok(()) diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index 352e82b1a..eeff00d86 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -438,7 +438,7 @@ impl TxKernel { } /// Batch signature verification. - pub fn batch_sig_verify(tx_kernels: &Vec) -> Result<(), Error> { + pub fn batch_sig_verify(tx_kernels: &[TxKernel]) -> Result<(), Error> { let len = tx_kernels.len(); let mut sigs: Vec = Vec::with_capacity(len); let mut pubkeys: Vec = Vec::with_capacity(len); @@ -629,10 +629,9 @@ impl TransactionBody { /// inputs, if any, are kept intact. /// Sort order is maintained. pub fn with_input(mut self, input: Input) -> TransactionBody { - self.inputs - .binary_search(&input) - .err() - .map(|e| self.inputs.insert(e, input)); + if let Err(e) = self.inputs.binary_search(&input) { + self.inputs.insert(e, input) + }; self } @@ -640,10 +639,9 @@ impl TransactionBody { /// outputs, if any, are kept intact. /// Sort order is maintained. pub fn with_output(mut self, output: Output) -> TransactionBody { - self.outputs - .binary_search(&output) - .err() - .map(|e| self.outputs.insert(e, output)); + if let Err(e) = self.outputs.binary_search(&output) { + self.outputs.insert(e, output) + }; self } @@ -651,10 +649,9 @@ impl TransactionBody { /// kernels, if any, are kept intact. /// Sort order is maintained. pub fn with_kernel(mut self, kernel: TxKernel) -> TransactionBody { - self.kernels - .binary_search(&kernel) - .err() - .map(|e| self.kernels.insert(e, kernel)); + if let Err(e) = self.kernels.binary_search(&kernel) { + self.kernels.insert(e, kernel) + }; self } @@ -1441,13 +1438,13 @@ impl PMMRable for Output { impl OutputFeatures { /// Is this a coinbase output? - pub fn is_coinbase(&self) -> bool { - *self == OutputFeatures::Coinbase + pub fn is_coinbase(self) -> bool { + self == OutputFeatures::Coinbase } /// Is this a plain output? - pub fn is_plain(&self) -> bool { - *self == OutputFeatures::Plain + pub fn is_plain(self) -> bool { + self == OutputFeatures::Plain } } @@ -1481,13 +1478,10 @@ impl Output { } /// Batch validates the range proofs using the commitments - pub fn batch_verify_proofs( - commits: &Vec, - proofs: &Vec, - ) -> Result<(), Error> { + pub fn batch_verify_proofs(commits: &[Commitment], proofs: &[RangeProof]) -> Result<(), Error> { let secp = static_secp_instance(); secp.lock() - .verify_bullet_proof_multi(commits.clone(), proofs.clone(), None)?; + .verify_bullet_proof_multi(commits.to_vec(), proofs.to_vec(), None)?; Ok(()) } } diff --git a/core/src/libtx/aggsig.rs b/core/src/libtx/aggsig.rs index 60440ce8d..352cefdbd 100644 --- a/core/src/libtx/aggsig.rs +++ b/core/src/libtx/aggsig.rs @@ -83,12 +83,12 @@ pub fn create_secnonce(secp: &Secp256k1) -> Result { /// // ... Encode message /// let message = Message::from_slice(&msg_bytes).unwrap(); /// let sig_part = aggsig::calculate_partial_sig( -/// &secp, -/// &secret_key, -/// &secret_nonce, -/// &pub_nonce_sum, -/// Some(&pub_key_sum), -/// &message, +/// &secp, +/// &secret_key, +/// &secret_nonce, +/// &pub_nonce_sum, +/// Some(&pub_key_sum), +/// &message, ///).unwrap(); /// ``` @@ -153,12 +153,12 @@ pub fn calculate_partial_sig( /// // ... Encode message /// let message = Message::from_slice(&msg_bytes).unwrap(); /// let sig_part = aggsig::calculate_partial_sig( -/// &secp, -/// &secret_key, -/// &secret_nonce, -/// &pub_nonce_sum, -/// Some(&pub_key_sum), -/// &message, +/// &secp, +/// &secret_key, +/// &secret_nonce, +/// &pub_nonce_sum, +/// Some(&pub_key_sum), +/// &message, ///).unwrap(); /// /// // Now verify the signature, ensuring the same values used to create @@ -166,12 +166,12 @@ pub fn calculate_partial_sig( /// let public_key = PublicKey::from_secret_key(&secp, &secret_key).unwrap(); /// /// let result = aggsig::verify_partial_sig( -/// &secp, -/// &sig_part, -/// &pub_nonce_sum, -/// &public_key, -/// Some(&pub_key_sum), -/// &message, +/// &secp, +/// &sig_part, +/// &pub_nonce_sum, +/// &public_key, +/// Some(&pub_key_sum), +/// &message, ///); /// ``` @@ -192,9 +192,7 @@ pub fn verify_partial_sig( pubkey_sum, true, ) { - Err(ErrorKind::Signature( - "Signature validation error".to_string(), - ))? + return Err(ErrorKind::Signature("Signature validation error".to_string()).into()); } Ok(()) } @@ -238,9 +236,9 @@ pub fn verify_partial_sig( /// let builder = proof::ProofBuilder::new(&keychain); /// let rproof = proof::create(&keychain, &builder, value, &key_id, switch, commit, None).unwrap(); /// let output = Output { -/// features: OutputFeatures::Coinbase, -/// commit: commit, -/// proof: rproof, +/// features: OutputFeatures::Coinbase, +/// commit: commit, +/// proof: rproof, /// }; /// let height = 20; /// let over_commit = secp.commit_value(reward(fees)).unwrap(); @@ -305,9 +303,9 @@ where /// let builder = proof::ProofBuilder::new(&keychain); /// let rproof = proof::create(&keychain, &builder, value, &key_id, switch, commit, None).unwrap(); /// let output = Output { -/// features: OutputFeatures::Coinbase, -/// commit: commit, -/// proof: rproof, +/// features: OutputFeatures::Coinbase, +/// commit: commit, +/// proof: rproof, /// }; /// let height = 20; /// let over_commit = secp.commit_value(reward(fees)).unwrap(); @@ -320,7 +318,7 @@ where /// /// // Verify the signature from the excess commit /// let sig_verifies = -/// aggsig::verify_single_from_commit(&keychain.secp(), &sig, &msg, &excess); +/// aggsig::verify_single_from_commit(&keychain.secp(), &sig, &msg, &excess); /// assert!(!sig_verifies.is_err()); /// ``` @@ -332,9 +330,7 @@ pub fn verify_single_from_commit( ) -> Result<(), Error> { let pubkey = commit.to_pubkey(secp)?; if !verify_single(secp, sig, msg, None, &pubkey, Some(&pubkey), false) { - Err(ErrorKind::Signature( - "Signature validation error".to_string(), - ))? + return Err(ErrorKind::Signature("Signature validation error".to_string()).into()); } Ok(()) } @@ -376,21 +372,21 @@ pub fn verify_single_from_commit( /// // ... Encode message /// let message = Message::from_slice(&msg_bytes).unwrap(); /// let sig_part = aggsig::calculate_partial_sig( -/// &secp, -/// &secret_key, -/// &secret_nonce, -/// &pub_nonce_sum, -/// Some(&pub_key_sum), -/// &message, +/// &secp, +/// &secret_key, +/// &secret_nonce, +/// &pub_nonce_sum, +/// Some(&pub_key_sum), +/// &message, /// ).unwrap(); /// // ... Verify above, once all signatures have been added together /// let sig_verifies = aggsig::verify_completed_sig( -/// &secp, -/// &sig_part, -/// &pub_key_sum, -/// Some(&pub_key_sum), -/// &message, -/// ); +/// &secp, +/// &sig_part, +/// &pub_key_sum, +/// Some(&pub_key_sum), +/// &message, +/// ); /// assert!(!sig_verifies.is_err()); /// ``` @@ -402,9 +398,7 @@ pub fn verify_completed_sig( msg: &secp::Message, ) -> Result<(), Error> { if !verify_single(secp, sig, msg, None, pubkey, pubkey_sum, true) { - Err(ErrorKind::Signature( - "Signature validation error".to_string(), - ))? + return Err(ErrorKind::Signature("Signature validation error".to_string()).into()); } Ok(()) } diff --git a/core/src/libtx/proof.rs b/core/src/libtx/proof.rs index 24f875f1b..0c7442f26 100644 --- a/core/src/libtx/proof.rs +++ b/core/src/libtx/proof.rs @@ -195,9 +195,7 @@ where let mut msg = [0; 20]; msg[2] = switch as u8; let id_bytes = id.to_bytes(); - for i in 0..17 { - msg[i + 3] = id_bytes[i]; - } + msg[3..20].clone_from_slice(&id_bytes[..17]); Ok(ProofMessage::from_bytes(&msg)) } @@ -307,9 +305,7 @@ where ) -> Result { let mut msg = [0; 20]; let id_ser = id.serialize_path(); - for i in 0..16 { - msg[i + 4] = id_ser[i]; - } + msg[4..20].clone_from_slice(&id_ser[..16]); Ok(ProofMessage::from_bytes(&msg)) } diff --git a/core/src/libtx/secp_ser.rs b/core/src/libtx/secp_ser.rs index 03d86851e..a29c21303 100644 --- a/core/src/libtx/secp_ser.rs +++ b/core/src/libtx/secp_ser.rs @@ -81,13 +81,13 @@ pub mod option_sig_serde { let static_secp = static_secp_instance(); let static_secp = static_secp.lock(); Option::::deserialize(deserializer).and_then(|res| match res { - Some(string) => from_hex(string.to_string()) + Some(string) => from_hex(string) .map_err(|err| Error::custom(err.to_string())) .and_then(|bytes: Vec| { let mut b = [0u8; 64]; b.copy_from_slice(&bytes[0..64]); secp::Signature::from_compact(&static_secp, &b) - .map(|val| Some(val)) + .map(Some) .map_err(|err| Error::custom(err.to_string())) }), None => Ok(None), @@ -123,13 +123,13 @@ pub mod option_seckey_serde { let static_secp = static_secp_instance(); let static_secp = static_secp.lock(); Option::::deserialize(deserializer).and_then(|res| match res { - Some(string) => from_hex(string.to_string()) + Some(string) => from_hex(string) .map_err(|err| Error::custom(err.to_string())) .and_then(|bytes: Vec| { let mut b = [0u8; 32]; b.copy_from_slice(&bytes[0..32]); secp::key::SecretKey::from_slice(&static_secp, &b) - .map(|val| Some(val)) + .map(Some) .map_err(|err| Error::custom(err.to_string())) }), None => Ok(None), @@ -195,7 +195,7 @@ pub mod option_commitment_serde { D: Deserializer<'de>, { Option::::deserialize(deserializer).and_then(|res| match res { - Some(string) => from_hex(string.to_string()) + Some(string) => from_hex(string) .map_err(|err| Error::custom(err.to_string())) .and_then(|bytes: Vec| Ok(Some(Commitment::from_vec(bytes.to_vec())))), None => Ok(None), diff --git a/core/src/pow/cuckaroo.rs b/core/src/pow/cuckaroo.rs index b378971c4..9ce288285 100644 --- a/core/src/pow/cuckaroo.rs +++ b/core/src/pow/cuckaroo.rs @@ -70,7 +70,7 @@ where fn verify(&self, proof: &Proof) -> Result<(), Error> { if proof.proof_size() != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?; + return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); } let nonces = &proof.nonces; let mut uvs = vec![0u64; 2 * proof.proof_size()]; @@ -79,10 +79,10 @@ where for n in 0..proof.proof_size() { if nonces[n] > to_u64!(self.params.edge_mask) { - return Err(ErrorKind::Verification("edge too big".to_owned()))?; + return Err(ErrorKind::Verification("edge too big".to_owned()).into()); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()))?; + return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); } // 21 is standard siphash rotation constant let edge = to_edge!( @@ -95,9 +95,7 @@ where xor1 ^= uvs[2 * n + 1]; } if xor0 | xor1 != 0 { - return Err(ErrorKind::Verification( - "endpoints don't match up".to_owned(), - ))?; + return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); } let mut n = 0; let mut i = 0; @@ -114,13 +112,13 @@ where if uvs[k] == uvs[i] { // find other edge endpoint matching one at i if j != i { - return Err(ErrorKind::Verification("branch in cycle".to_owned()))?; + return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); } j = k; } } if j == i { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?; + return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); } i = j ^ 1; n += 1; @@ -131,7 +129,7 @@ where if n == self.params.proof_size { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()))? + Err(ErrorKind::Verification("cycle too short".to_owned()).into()) } } } diff --git a/core/src/pow/cuckarood.rs b/core/src/pow/cuckarood.rs index 5d424c83c..089f39259 100644 --- a/core/src/pow/cuckarood.rs +++ b/core/src/pow/cuckarood.rs @@ -69,7 +69,7 @@ where fn verify(&self, proof: &Proof) -> Result<(), Error> { if proof.proof_size() != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?; + return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); } let nonces = &proof.nonces; let mut uvs = vec![0u64; 2 * proof.proof_size()]; @@ -81,13 +81,13 @@ where for n in 0..proof.proof_size() { let dir = (nonces[n] & 1) as usize; if ndir[dir] >= proof.proof_size() / 2 { - return Err(ErrorKind::Verification("edges not balanced".to_owned()))?; + return Err(ErrorKind::Verification("edges not balanced".to_owned()).into()); } if nonces[n] > to_u64!(self.params.edge_mask) { - return Err(ErrorKind::Verification("edge too big".to_owned()))?; + return Err(ErrorKind::Verification("edge too big".to_owned()).into()); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()))?; + return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); } let edge = to_edge!( T, @@ -101,9 +101,7 @@ where ndir[dir] += 1; } if xor0 | xor1 != 0 { - return Err(ErrorKind::Verification( - "endpoints don't match up".to_owned(), - ))?; + return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); } let mut n = 0; let mut i = 0; @@ -115,13 +113,13 @@ where if uvs[k] == uvs[i] { // find reverse edge endpoint identical to one at i if j != i { - return Err(ErrorKind::Verification("branch in cycle".to_owned()))?; + return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); } j = k; } } if j == i { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?; + return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); } i = j ^ 1; n += 1; @@ -132,7 +130,7 @@ where if n == self.params.proof_size { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()))? + Err(ErrorKind::Verification("cycle too short".to_owned()).into()) } } } diff --git a/core/src/pow/cuckaroom.rs b/core/src/pow/cuckaroom.rs index 2907ac268..7590447bb 100644 --- a/core/src/pow/cuckaroom.rs +++ b/core/src/pow/cuckaroom.rs @@ -69,7 +69,7 @@ where fn verify(&self, proof: &Proof) -> Result<(), Error> { let proofsize = proof.proof_size(); if proofsize != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?; + return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); } let nonces = &proof.nonces; let mut from = vec![0u32; proofsize]; @@ -80,10 +80,10 @@ where for n in 0..proofsize { if nonces[n] > to_u64!(self.params.edge_mask) { - return Err(ErrorKind::Verification("edge too big".to_owned()))?; + return Err(ErrorKind::Verification("edge too big".to_owned()).into()); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()))?; + return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); } let edge = to_edge!( T, @@ -95,9 +95,7 @@ where xor_to ^= to[n]; } if xor_from != xor_to { - return Err(ErrorKind::Verification( - "endpoints don't match up".to_owned(), - ))?; + return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); } let mut visited = vec![false; proofsize]; let mut n = 0; @@ -105,14 +103,14 @@ where loop { // follow cycle if visited[i] { - return Err(ErrorKind::Verification("branch in cycle".to_owned()))?; + return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); } visited[i] = true; let mut nexti = 0; while from[nexti] != to[i] { nexti += 1; if nexti == proofsize { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?; + return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); } } i = nexti; @@ -125,7 +123,7 @@ where if n == proofsize { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()))? + Err(ErrorKind::Verification("cycle too short".to_owned()).into()) } } } diff --git a/core/src/pow/cuckatoo.rs b/core/src/pow/cuckatoo.rs index 8bf871bcd..a7c688b7a 100644 --- a/core/src/pow/cuckatoo.rs +++ b/core/src/pow/cuckatoo.rs @@ -52,7 +52,7 @@ where /// Create a new graph with given parameters pub fn new(max_edges: T, max_sols: u32, proof_size: usize) -> Result, Error> { if to_u64!(max_edges) >= u64::max_value() / 2 { - return Err(ErrorKind::Verification(format!("graph is to big to build")))?; + return Err(ErrorKind::Verification("graph is to big to build".to_string()).into()); } let max_nodes = 2 * to_u64!(max_edges); Ok(Graph { @@ -88,7 +88,7 @@ where pub fn add_edge(&mut self, u: T, mut v: T) -> Result<(), Error> { let max_nodes_t = to_edge!(T, self.max_nodes); if u >= max_nodes_t || v >= max_nodes_t { - return Err(ErrorKind::EdgeAddition)?; + return Err(ErrorKind::EdgeAddition.into()); } v = v + to_edge!(T, self.max_nodes); let adj_u = self.adj_list[to_usize!(u ^ T::one())]; @@ -101,7 +101,7 @@ where let ulink = self.links.len(); let vlink = self.links.len() + 1; if to_edge!(T, vlink) == self.nil { - return Err(ErrorKind::EdgeAddition)?; + return Err(ErrorKind::EdgeAddition.into()); } self.links.push(Link { next: self.adj_list[to_usize!(u)], @@ -272,7 +272,7 @@ where self.verify_impl(&s)?; } if self.graph.solutions.is_empty() { - Err(ErrorKind::NoSolution)? + Err(ErrorKind::NoSolution.into()) } else { Ok(self.graph.solutions.clone()) } @@ -282,7 +282,7 @@ where /// graph pub fn verify_impl(&self, proof: &Proof) -> Result<(), Error> { if proof.proof_size() != global::proofsize() { - return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?; + return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into()); } let nonces = &proof.nonces; let mut uvs = vec![0u64; 2 * proof.proof_size()]; @@ -291,10 +291,10 @@ where for n in 0..proof.proof_size() { if nonces[n] > to_u64!(self.params.edge_mask) { - return Err(ErrorKind::Verification("edge too big".to_owned()))?; + return Err(ErrorKind::Verification("edge too big".to_owned()).into()); } if n > 0 && nonces[n] <= nonces[n - 1] { - return Err(ErrorKind::Verification("edges not ascending".to_owned()))?; + return Err(ErrorKind::Verification("edges not ascending".to_owned()).into()); } uvs[2 * n] = to_u64!(self.sipnode(to_edge!(T, nonces[n]), 0)?); uvs[2 * n + 1] = to_u64!(self.sipnode(to_edge!(T, nonces[n]), 1)?); @@ -302,9 +302,7 @@ where xor1 ^= uvs[2 * n + 1]; } if xor0 | xor1 != 0 { - return Err(ErrorKind::Verification( - "endpoints don't match up".to_owned(), - ))?; + return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into()); } let mut n = 0; let mut i = 0; @@ -321,13 +319,13 @@ where if uvs[k] >> 1 == uvs[i] >> 1 { // find other edge endpoint matching one at i if j != i { - return Err(ErrorKind::Verification("branch in cycle".to_owned()))?; + return Err(ErrorKind::Verification("branch in cycle".to_owned()).into()); } j = k; } } if j == i || uvs[j] == uvs[i] { - return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?; + return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into()); } i = j ^ 1; n += 1; @@ -338,7 +336,7 @@ where if n == self.params.proof_size { Ok(()) } else { - Err(ErrorKind::Verification("cycle too short".to_owned()))? + Err(ErrorKind::Verification("cycle too short".to_owned()).into()) } } } diff --git a/core/src/ser.rs b/core/src/ser.rs index af0f3b742..0dd583347 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -292,7 +292,7 @@ impl ProtocolVersion { pub const MAX: u32 = std::u32::MAX; /// Protocol version as u32 to allow for convenient exhaustive matching on values. - pub fn value(&self) -> u32 { + pub fn value(self) -> u32 { self.0 } @@ -608,7 +608,7 @@ impl PMMRable for RangeProof { type E = Self; fn as_elmt(&self) -> Self::E { - self.clone() + *self } // Size is length prefix (8 bytes for u64) + MAX_PROOF_SIZE. @@ -1255,7 +1255,7 @@ where } } } - const VARIANTS: &'static [&str] = &[ + const VARIANTS: &[&str] = &[ "NotFound", "PermissionDenied", "ConnectionRefused", diff --git a/keychain/src/base58.rs b/keychain/src/base58.rs index 19b740d52..ebe2d77a9 100644 --- a/keychain/src/base58.rs +++ b/keychain/src/base58.rs @@ -105,7 +105,7 @@ impl error::Error for Error { } } -static BASE58_CHARS: &'static [u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"; +static BASE58_CHARS: &[u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz"; static BASE58_DIGITS: [Option; 128] = [ None, diff --git a/keychain/src/extkey_bip32.rs b/keychain/src/extkey_bip32.rs index c73742789..b47763d8b 100644 --- a/keychain/src/extkey_bip32.rs +++ b/keychain/src/extkey_bip32.rs @@ -231,15 +231,15 @@ impl ChildNumber { /// Returns `true` if the child number is a [`Normal`] value. /// /// [`Normal`]: #variant.Normal - pub fn is_normal(&self) -> bool { + pub fn is_normal(self) -> bool { !self.is_hardened() } /// Returns `true` if the child number is a [`Hardened`] value. /// /// [`Hardened`]: #variant.Hardened - pub fn is_hardened(&self) -> bool { - match *self { + pub fn is_hardened(self) -> bool { + match self { ChildNumber::Hardened { .. } => true, ChildNumber::Normal { .. } => false, } @@ -544,7 +544,7 @@ impl ExtendedPubKey { H: BIP32Hasher, { let (sk, chain_code) = self.ckd_pub_tweak(secp, hasher, i)?; - let mut pk = self.public_key.clone(); + let mut pk = self.public_key; pk.add_exp_assign(secp, &sk).map_err(Error::Ecdsa)?; Ok(ExtendedPubKey { diff --git a/keychain/src/types.rs b/keychain/src/types.rs index f28df062d..a22ccfd51 100644 --- a/keychain/src/types.rs +++ b/keychain/src/types.rs @@ -152,9 +152,7 @@ impl Identifier { pub fn from_serialized_path(len: u8, p: &[u8]) -> Identifier { let mut id = [0; IDENTIFIER_SIZE]; id[0] = len; - for i in 1..IDENTIFIER_SIZE { - id[i] = p[i - 1]; - } + id[1..IDENTIFIER_SIZE].clone_from_slice(&p[0..(IDENTIFIER_SIZE - 1)]); Identifier(id) } @@ -169,9 +167,8 @@ impl Identifier { } pub fn from_bytes(bytes: &[u8]) -> Identifier { let mut identifier = [0; IDENTIFIER_SIZE]; - for i in 0..min(IDENTIFIER_SIZE, bytes.len()) { - identifier[i] = bytes[i]; - } + identifier[..min(IDENTIFIER_SIZE, bytes.len())] + .clone_from_slice(&bytes[..min(IDENTIFIER_SIZE, bytes.len())]); Identifier(identifier) } @@ -282,9 +279,8 @@ impl BlindingFactor { pub fn from_slice(data: &[u8]) -> BlindingFactor { let mut blind = [0; SECRET_KEY_SIZE]; - for i in 0..min(SECRET_KEY_SIZE, data.len()) { - blind[i] = data[i]; - } + blind[..min(SECRET_KEY_SIZE, data.len())] + .clone_from_slice(&data[..min(SECRET_KEY_SIZE, data.len())]); BlindingFactor(blind) } diff --git a/p2p/src/conn.rs b/p2p/src/conn.rs index 243bd5d22..a0c1cdf55 100644 --- a/p2p/src/conn.rs +++ b/p2p/src/conn.rs @@ -125,7 +125,7 @@ impl<'a> Message<'a> { let read_len = cmp::min(8000, len - written); let mut buf = vec![0u8; read_len]; self.stream.read_exact(&mut buf[..])?; - writer.write_all(&mut buf)?; + writer.write_all(&buf)?; written += read_len; } Ok(written) @@ -291,14 +291,14 @@ where let reader_stopped = stopped.clone(); let reader_tracker = tracker.clone(); - let writer_tracker = tracker.clone(); + let writer_tracker = tracker; let reader_thread = thread::Builder::new() .name("peer_read".to_string()) .spawn(move || { loop { // check the read end - match try_header!(read_header(&mut reader, version), &mut reader) { + match try_header!(read_header(&mut reader, version), &reader) { Some(MsgHeaderWrapper::Known(header)) => { reader .set_read_timeout(Some(BODY_IO_TIMEOUT)) @@ -347,7 +347,7 @@ where reader .peer_addr() .map(|a| a.to_string()) - .unwrap_or("?".to_owned()) + .unwrap_or_else(|_| "?".to_owned()) ); let _ = reader.shutdown(Shutdown::Both); })?; @@ -380,7 +380,7 @@ where writer .peer_addr() .map(|a| a.to_string()) - .unwrap_or("?".to_owned()) + .unwrap_or_else(|_| "?".to_owned()) ); })?; Ok((reader_thread, writer_thread)) diff --git a/p2p/src/msg.rs b/p2p/src/msg.rs index 2f004a36b..23d879642 100644 --- a/p2p/src/msg.rs +++ b/p2p/src/msg.rs @@ -31,7 +31,7 @@ use std::io::{Read, Write}; use std::sync::Arc; /// Grin's user agent with current version -pub const USER_AGENT: &'static str = concat!("MW/Grin ", env!("CARGO_PKG_VERSION")); +pub const USER_AGENT: &str = concat!("MW/Grin ", env!("CARGO_PKG_VERSION")); /// Magic numbers expected in the header of every message const OTHER_MAGIC: [u8; 2] = [73, 43]; diff --git a/p2p/src/peers.rs b/p2p/src/peers.rs index 415804a89..a3727a66f 100644 --- a/p2p/src/peers.rs +++ b/p2p/src/peers.rs @@ -73,7 +73,7 @@ impl Peers { }; debug!("Saving newly connected peer {}.", peer_data.addr); self.save_peer(&peer_data)?; - peers.insert(peer_data.addr, peer.clone()); + peers.insert(peer_data.addr, peer); Ok(()) } @@ -149,7 +149,7 @@ impl Peers { return None; } }; - peers.get(&addr).map(|p| p.clone()) + peers.get(&addr).cloned() } /// Number of peers currently connected to. @@ -171,7 +171,7 @@ impl Peers { // (total_difficulty) than we do. pub fn more_work_peers(&self) -> Result>, chain::Error> { let peers = self.connected_peers(); - if peers.len() == 0 { + if peers.is_empty() { return Ok(vec![]); } @@ -190,7 +190,7 @@ impl Peers { // (total_difficulty) than/as we do. pub fn more_or_same_work_peers(&self) -> Result { let peers = self.connected_peers(); - if peers.len() == 0 { + if peers.is_empty() { return Ok(0); } @@ -217,7 +217,7 @@ impl Peers { /// branch, showing the highest total difficulty. pub fn most_work_peers(&self) -> Vec> { let peers = self.connected_peers(); - if peers.len() == 0 { + if peers.is_empty() { return vec![]; } @@ -265,7 +265,7 @@ impl Peers { peers.remove(&peer.info.addr); Ok(()) } - None => return Err(Error::PeerNotFound), + None => Err(Error::PeerNotFound), } } @@ -275,9 +275,9 @@ impl Peers { // check if peer exist self.get_peer(peer_addr)?; if self.is_banned(peer_addr) { - return self.update_state(peer_addr, State::Healthy); + self.update_state(peer_addr, State::Healthy) } else { - return Err(Error::PeerNotBanned); + Err(Error::PeerNotBanned) } } @@ -469,7 +469,7 @@ impl Peers { .outgoing_connected_peers() .iter() .take(excess_outgoing_count) - .map(|x| x.info.addr.clone()) + .map(|x| x.info.addr) .collect::>(); rm.append(&mut addrs); } @@ -482,7 +482,7 @@ impl Peers { .incoming_connected_peers() .iter() .take(excess_incoming_count) - .map(|x| x.info.addr.clone()) + .map(|x| x.info.addr) .collect::>(); rm.append(&mut addrs); } diff --git a/p2p/src/store.rs b/p2p/src/store.rs index ae9703305..e3627786c 100644 --- a/p2p/src/store.rs +++ b/p2p/src/store.rs @@ -23,10 +23,10 @@ use crate::core::ser::{self, Readable, Reader, Writeable, Writer}; use crate::types::{Capabilities, PeerAddr, ReasonForBan}; use grin_store::{self, option_to_not_found, to_key, Error}; -const DB_NAME: &'static str = "peer"; -const STORE_SUBPATH: &'static str = "peers"; +const DB_NAME: &str = "peer"; +const STORE_SUBPATH: &str = "peers"; -const PEER_PREFIX: u8 = 'P' as u8; +const PEER_PREFIX: u8 = b'P'; // Types of messages enum_from_primitive! { diff --git a/p2p/src/types.rs b/p2p/src/types.rs index ca783b2a5..950fb9b06 100644 --- a/p2p/src/types.rs +++ b/p2p/src/types.rs @@ -333,17 +333,17 @@ bitflags! { #[derive(Serialize, Deserialize)] pub struct Capabilities: u32 { /// We don't know (yet) what the peer can do. - const UNKNOWN = 0b00000000; + const UNKNOWN = 0b0000_0000; /// Can provide full history of headers back to genesis /// (for at least one arbitrary fork). - const HEADER_HIST = 0b00000001; + const HEADER_HIST = 0b0000_0001; /// Can provide block headers and the TxHashSet for some recent-enough /// height. - const TXHASHSET_HIST = 0b00000010; + const TXHASHSET_HIST = 0b0000_0010; /// Can provide a list of healthy peers - const PEER_LIST = 0b00000100; + const PEER_LIST = 0b0000_0100; /// Can broadcast and request txs by kernel hash. - const TX_KERNEL_HASH = 0b00001000; + const TX_KERNEL_HASH = 0b0000_1000; /// All nodes right now are "full nodes". /// Some nodes internally may maintain longer block histories (archival_mode) @@ -470,11 +470,11 @@ pub struct PeerInfoDisplay { impl From for PeerInfoDisplay { fn from(info: PeerInfo) -> PeerInfoDisplay { PeerInfoDisplay { - capabilities: info.capabilities.clone(), + capabilities: info.capabilities, user_agent: info.user_agent.clone(), version: info.version, - addr: info.addr.clone(), - direction: info.direction.clone(), + addr: info.addr, + direction: info.direction, total_difficulty: info.total_difficulty(), height: info.height(), } diff --git a/pool/src/pool.rs b/pool/src/pool.rs index f8991e37e..8a6d8f48f 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -108,7 +108,7 @@ impl Pool { ( txs, kern_ids - .into_iter() + .iter() .filter(|id| !found_ids.contains(id)) .cloned() .collect(), @@ -412,7 +412,7 @@ impl Pool { let mut found_txs = vec![]; // Gather all the kernels of the multi-kernel transaction in one set - let kernel_set = kernels.into_iter().collect::>(); + let kernel_set = kernels.iter().collect::>(); // Check each transaction in the pool for entry in &self.entries { @@ -468,7 +468,7 @@ impl Bucket { fn new(tx: Transaction, age_idx: usize) -> Bucket { Bucket { fee_to_weight: tx.fee_to_weight(), - raw_txs: vec![tx.clone()], + raw_txs: vec![tx], age_idx, } } diff --git a/pool/src/transaction_pool.rs b/pool/src/transaction_pool.rs index 2fc4c0226..ffe24b5e5 100644 --- a/pool/src/transaction_pool.rs +++ b/pool/src/transaction_pool.rs @@ -192,19 +192,12 @@ impl TransactionPool { let bucket_transactions = self.txpool.bucket_transactions(Weighting::NoLimit); // Get last transaction and remove it - match bucket_transactions.last() { - Some(evictable_transaction) => { - // Remove transaction - self.txpool.entries = self - .txpool - .entries - .iter() - .filter(|x| x.tx != *evictable_transaction) - .map(|x| x.clone()) - .collect::>(); - } - None => (), - } + if let Some(evictable_transaction) = bucket_transactions.last() { + // Remove transaction + self.txpool + .entries + .retain(|x| x.tx != *evictable_transaction); + }; } // Old txs will "age out" after 30 mins. @@ -277,9 +270,9 @@ impl TransactionPool { } // Check that the stempool can accept this transaction - if stem && self.stempool.size() > self.config.max_stempool_size { - return Err(PoolError::OverCapacity); - } else if self.total_size() > self.config.max_pool_size { + if stem && self.stempool.size() > self.config.max_stempool_size + || self.total_size() > self.config.max_pool_size + { return Err(PoolError::OverCapacity); } diff --git a/store/src/lmdb.rs b/store/src/lmdb.rs index 41a010600..c9b161bb9 100644 --- a/store/src/lmdb.rs +++ b/store/src/lmdb.rs @@ -95,7 +95,7 @@ impl Store { Some(n) => n.to_owned(), None => "lmdb".to_owned(), }; - let full_path = [root_path.to_owned(), name.clone()].join("/"); + let full_path = [root_path.to_owned(), name].join("/"); fs::create_dir_all(&full_path) .expect("Unable to create directory 'db_root' to store chain_data"); diff --git a/store/src/pmmr.rs b/store/src/pmmr.rs index 2cbb69250..b1748fc03 100644 --- a/store/src/pmmr.rs +++ b/store/src/pmmr.rs @@ -174,7 +174,7 @@ impl Backend for PMMRBackend { fn data_as_temp_file(&self) -> Result { self.data_file .as_temp_file() - .map_err(|_| format!("Failed to build temp data file")) + .map_err(|_| "Failed to build temp data file".to_string()) } /// Rewind the PMMR backend to the given position. diff --git a/store/src/types.rs b/store/src/types.rs index 75a7604de..c1621cc6e 100644 --- a/store/src/types.rs +++ b/store/src/types.rs @@ -155,7 +155,7 @@ where /// Write the file out to disk, pruning removed elements. pub fn save_prune(&mut self, prune_pos: &[u64]) -> io::Result<()> { // Need to convert from 1-index to 0-index (don't ask). - let prune_idx: Vec<_> = prune_pos.into_iter().map(|x| x - 1).collect(); + let prune_idx: Vec<_> = prune_pos.iter().map(|x| x - 1).collect(); self.file.save_prune(prune_idx.as_slice()) } } diff --git a/util/src/file.rs b/util/src/file.rs index c1457699e..18c44a210 100644 --- a/util/src/file.rs +++ b/util/src/file.rs @@ -37,7 +37,7 @@ pub fn copy_dir_to(src: &Path, dst: &Path) -> io::Result { for entry_result in src.read_dir()? { let entry = entry_result?; let file_type = entry.file_type()?; - let count = copy_to(&entry.path(), &file_type, &dst.join(entry.file_name()))?; + let count = copy_to(&entry.path(), file_type, &dst.join(entry.file_name()))?; counter += count; } Ok(counter) @@ -55,7 +55,7 @@ pub fn list_files(path: &Path) -> Vec { .collect() } -fn copy_to(src: &Path, src_type: &fs::FileType, dst: &Path) -> io::Result { +fn copy_to(src: &Path, src_type: fs::FileType, dst: &Path) -> io::Result { if src_type.is_file() { fs::copy(src, dst) } else if src_type.is_dir() { diff --git a/util/src/logger.rs b/util/src/logger.rs index b38b46331..0f373feb3 100644 --- a/util/src/logger.rs +++ b/util/src/logger.rs @@ -340,7 +340,7 @@ fn send_panic_to_log() { None => error!("thread '{}' panicked at '{}'{:?}", thread, msg, backtrace), } //also print to stderr - let tui_running = TUI_RUNNING.lock().clone(); + let tui_running = *TUI_RUNNING.lock(); if !tui_running { let config = LOGGING_CONFIG.lock();