diff --git a/chain/src/txhashset/txhashset.rs b/chain/src/txhashset/txhashset.rs index 9f7e774c0..79b4d2cf0 100644 --- a/chain/src/txhashset/txhashset.rs +++ b/chain/src/txhashset/txhashset.rs @@ -624,9 +624,7 @@ impl<'a> HeaderExtension<'a> { /// This may be either the header MMR or the sync MMR depending on the /// extension. pub fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> { - self.pmmr - .push(header.clone()) - .map_err(&ErrorKind::TxHashSetErr)?; + self.pmmr.push(&header).map_err(&ErrorKind::TxHashSetErr)?; self.header = header.clone(); Ok(()) } @@ -961,7 +959,7 @@ impl<'a> Extension<'a> { fn apply_header(&mut self, header: &BlockHeader) -> Result<(), Error> { self.header_pmmr - .push(header.clone()) + .push(&header) .map_err(&ErrorKind::TxHashSetErr)?; Ok(()) } diff --git a/core/src/consensus.rs b/core/src/consensus.rs index aecfb1371..49d2d6651 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -296,7 +296,7 @@ where } /// Factor by which the secondary proof of work difficulty will be adjusted -pub fn secondary_pow_scaling(height: u64, diff_data: &Vec) -> u32 { +pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 { // Get the secondary count across the window, in pct (100 * 60 * 2nd_pow_fraction) let snd_count = 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64; diff --git a/core/src/core/block.rs b/core/src/core/block.rs index 468b9e735..2e5f3563c 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -286,7 +286,7 @@ impl BlockHeader { /// Total difficulty accumulated by the proof of work on this header pub fn total_difficulty(&self) -> Difficulty { - self.pow.total_difficulty.clone() + self.pow.total_difficulty } /// The "overage" to use when verifying the kernel sums. @@ -362,10 +362,7 @@ impl Readable for Block { body.validate_read(true) .map_err(|_| ser::Error::CorruptedData)?; - Ok(Block { - header: header, - body: body, - }) + Ok(Block { header, body }) } } @@ -465,7 +462,7 @@ impl Block { /// Build a new empty block from a specified header pub fn with_header(header: BlockHeader) -> Block { Block { - header: header, + header, ..Default::default() } } @@ -607,15 +604,14 @@ impl Block { // take the kernel offset for this block (block offset minus previous) and // verify.body.outputs and kernel sums - let block_kernel_offset = if self.header.total_kernel_offset() == prev_kernel_offset.clone() - { + let block_kernel_offset = if self.header.total_kernel_offset() == *prev_kernel_offset { // special case when the sum hasn't changed (typically an empty block), // zero isn't a valid private key but it's a valid blinding factor BlindingFactor::zero() } else { committed::sum_kernel_offsets( vec![self.header.total_kernel_offset()], - vec![prev_kernel_offset.clone()], + vec![*prev_kernel_offset], )? }; let (_utxo_sum, kernel_sum) = diff --git a/core/src/core/block_sums.rs b/core/src/core/block_sums.rs index 8f0fed4d4..6b6e122f5 100644 --- a/core/src/core/block_sums.rs +++ b/core/src/core/block_sums.rs @@ -53,8 +53,8 @@ impl Default for BlockSums { fn default() -> BlockSums { let zero_commit = secp_static::commit_to_zero_value(); BlockSums { - utxo_sum: zero_commit.clone(), - kernel_sum: zero_commit.clone(), + utxo_sum: zero_commit, + kernel_sum: zero_commit, } } } diff --git a/core/src/core/merkle_proof.rs b/core/src/core/merkle_proof.rs index 78a93d847..2d06f9345 100644 --- a/core/src/core/merkle_proof.rs +++ b/core/src/core/merkle_proof.rs @@ -86,7 +86,7 @@ impl MerkleProof { pub fn from_hex(hex: &str) -> Result { let bytes = util::from_hex(hex.to_string()).unwrap(); let res = ser::deserialize(&mut &bytes[..]) - .map_err(|_| format!("failed to deserialize a Merkle Proof"))?; + .map_err(|_| "failed to deserialize a Merkle Proof".to_string())?; Ok(res) } @@ -102,7 +102,7 @@ impl MerkleProof { // calculate the peaks once as these are based on overall MMR size // (and will not change) let peaks_pos = pmmr::peaks(self.mmr_size); - proof.verify_consume(root, element, node_pos, peaks_pos) + proof.verify_consume(root, element, node_pos, &peaks_pos) } /// Consumes the Merkle proof while verifying it. @@ -113,7 +113,7 @@ impl MerkleProof { root: Hash, element: &PMMRIndexHashable, node_pos: u64, - peaks_pos: Vec, + peaks_pos: &[u64], ) -> Result<(), MerkleProofError> { let node_hash = if node_pos > self.mmr_size { element.hash_with_index(self.mmr_size) @@ -123,7 +123,7 @@ impl MerkleProof { // handle special case of only a single entry in the MMR // (no siblings to hash together) - if self.path.len() == 0 { + if self.path.is_empty() { if root == node_hash { return Ok(()); } else { diff --git a/core/src/core/pmmr/db_pmmr.rs b/core/src/core/pmmr/db_pmmr.rs index a899a95b6..6a4614f57 100644 --- a/core/src/core/pmmr/db_pmmr.rs +++ b/core/src/core/pmmr/db_pmmr.rs @@ -42,8 +42,8 @@ where /// Build a new db backed MMR. pub fn new(backend: &'a mut B) -> DBPMMR { DBPMMR { + backend, last_pos: 0, - backend: backend, _marker: marker::PhantomData, } } @@ -52,8 +52,8 @@ where /// last_pos with the provided db backend. pub fn at(backend: &'a mut B, last_pos: u64) -> DBPMMR { DBPMMR { - last_pos: last_pos, - backend: backend, + backend, + last_pos, _marker: marker::PhantomData, } } @@ -98,7 +98,7 @@ where /// Push a new element into the MMR. Computes new related peaks at /// the same time if applicable. - pub fn push(&mut self, elmt: T) -> Result { + pub fn push(&mut self, elmt: &T) -> Result { let elmt_pos = self.last_pos + 1; let mut current_hash = elmt.hash_with_index(elmt_pos - 1); diff --git a/core/src/core/pmmr/pmmr.rs b/core/src/core/pmmr/pmmr.rs index 9a6f6025b..cde9573e1 100644 --- a/core/src/core/pmmr/pmmr.rs +++ b/core/src/core/pmmr/pmmr.rs @@ -53,8 +53,8 @@ where /// Build a new prunable Merkle Mountain Range using the provided backend. pub fn new(backend: &'a mut B) -> PMMR { PMMR { + backend, last_pos: 0, - backend: backend, _marker: marker::PhantomData, } } @@ -63,8 +63,8 @@ where /// last_pos with the provided backend. pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR { PMMR { - last_pos: last_pos, - backend: backend, + backend, + last_pos, _marker: marker::PhantomData, } } @@ -90,7 +90,7 @@ where let rhs = self.bag_the_rhs(peak_pos); let mut res = peaks(self.last_pos) .into_iter() - .filter(|x| x < &peak_pos) + .filter(|x| *x < peak_pos) .filter_map(|x| self.backend.get_from_file(x)) .collect::>(); res.reverse(); @@ -107,7 +107,7 @@ where pub fn bag_the_rhs(&self, peak_pos: u64) -> Option { let rhs = peaks(self.last_pos) .into_iter() - .filter(|x| x > &peak_pos) + .filter(|x| *x > peak_pos) .filter_map(|x| self.backend.get_from_file(x)) .collect::>(); @@ -145,7 +145,7 @@ where // check we actually have a hash in the MMR at this pos self.get_hash(pos) - .ok_or(format!("no element at pos {}", pos))?; + .ok_or_else(|| format!("no element at pos {}", pos))?; let mmr_size = self.unpruned_size(); @@ -510,7 +510,7 @@ pub fn peak_map_height(mut pos: u64) -> (u64, u64) { let mut peak_size = ALL_ONES >> pos.leading_zeros(); let mut bitmap = 0; while peak_size != 0 { - bitmap = bitmap << 1; + bitmap <<= 1; if pos >= peak_size { pos -= peak_size; bitmap |= 1; diff --git a/core/src/core/pmmr/readonly_pmmr.rs b/core/src/core/pmmr/readonly_pmmr.rs index 064df9958..c1da43369 100644 --- a/core/src/core/pmmr/readonly_pmmr.rs +++ b/core/src/core/pmmr/readonly_pmmr.rs @@ -41,8 +41,8 @@ where /// Build a new readonly PMMR. pub fn new(backend: &'a B) -> ReadonlyPMMR { ReadonlyPMMR { + backend, last_pos: 0, - backend: backend, _marker: marker::PhantomData, } } @@ -51,8 +51,8 @@ where /// last_pos with the provided backend. pub fn at(backend: &'a B, last_pos: u64) -> ReadonlyPMMR { ReadonlyPMMR { - last_pos: last_pos, - backend: backend, + backend, + last_pos, _marker: marker::PhantomData, } } diff --git a/core/src/core/pmmr/rewindable_pmmr.rs b/core/src/core/pmmr/rewindable_pmmr.rs index eded64035..d220a8c75 100644 --- a/core/src/core/pmmr/rewindable_pmmr.rs +++ b/core/src/core/pmmr/rewindable_pmmr.rs @@ -43,8 +43,8 @@ where /// Build a new readonly PMMR. pub fn new(backend: &'a B) -> RewindablePMMR { RewindablePMMR { + backend, last_pos: 0, - backend: backend, _marker: marker::PhantomData, } } @@ -53,8 +53,8 @@ where /// last_pos with the provided backend. pub fn at(backend: &'a B, last_pos: u64) -> RewindablePMMR { RewindablePMMR { - last_pos: last_pos, - backend: backend, + backend, + last_pos, _marker: marker::PhantomData, } } diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index aedbeeb7c..4b214f140 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -177,7 +177,7 @@ impl Readable for TxKernel { let features = KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?; Ok(TxKernel { - features: features, + features, fee: reader.read_u64()?, lock_height: reader.read_u64()?, excess: Commitment::read(reader)?, @@ -230,13 +230,13 @@ impl TxKernel { /// Builds a new tx kernel with the provided fee. pub fn with_fee(self, fee: u64) -> TxKernel { - TxKernel { fee: fee, ..self } + TxKernel { fee, ..self } } /// Builds a new tx kernel with the provided lock_height. pub fn with_lock_height(self, lock_height: u64) -> TxKernel { TxKernel { - lock_height: lock_height, + lock_height, ..self } } @@ -356,9 +356,9 @@ impl TransactionBody { verify_sorted: bool, ) -> Result { let body = TransactionBody { - inputs: inputs, - outputs: outputs, - kernels: kernels, + inputs, + outputs, + kernels, }; if verify_sorted { @@ -436,7 +436,7 @@ impl TransactionBody { /// Calculate transaction weight from transaction details pub fn weight(input_len: usize, output_len: usize, kernel_len: usize) -> u32 { - let mut body_weight = -1 * (input_len as i32) + (4 * output_len as i32) + kernel_len as i32; + let mut body_weight = -(input_len as i32) + (4 * output_len as i32) + kernel_len as i32; if body_weight < 1 { body_weight = 1; } @@ -559,7 +559,7 @@ impl TransactionBody { }; // Now batch verify all those unverified rangeproofs - if outputs.len() > 0 { + if !outputs.is_empty() { let mut commits = vec![]; let mut proofs = vec![]; for x in &outputs { @@ -687,10 +687,7 @@ impl Transaction { /// Creates a new transaction using this transaction as a template /// and with the specified offset. pub fn with_offset(self, offset: BlindingFactor) -> Transaction { - Transaction { - offset: offset, - ..self - } + Transaction { offset, ..self } } /// Builds a new transaction with the provided inputs added. Existing @@ -1072,7 +1069,7 @@ impl Readable for Output { OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?; Ok(Output { - features: features, + features, commit: Commitment::read(reader)?, proof: RangeProof::read(reader)?, }) @@ -1131,8 +1128,8 @@ impl OutputIdentifier { /// Build a new output_identifier. pub fn new(features: OutputFeatures, commit: &Commitment) -> OutputIdentifier { OutputIdentifier { - features: features, - commit: commit.clone(), + features, + commit: *commit, } } @@ -1152,9 +1149,9 @@ impl OutputIdentifier { /// Converts this identifier to a full output, provided a RangeProof pub fn into_output(self, proof: RangeProof) -> Output { Output { + proof, features: self.features, commit: self.commit, - proof: proof, } } @@ -1196,8 +1193,8 @@ impl Readable for OutputIdentifier { let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?; Ok(OutputIdentifier { + features, commit: Commitment::read(reader)?, - features: features, }) } } diff --git a/core/src/core/verifier_cache.rs b/core/src/core/verifier_cache.rs index f6266006d..aca2bc16c 100644 --- a/core/src/core/verifier_cache.rs +++ b/core/src/core/verifier_cache.rs @@ -27,10 +27,10 @@ use core::{Output, TxKernel}; pub trait VerifierCache: Sync + Send { /// Takes a vec of tx kernels and returns those kernels /// that have not yet been verified. - fn filter_kernel_sig_unverified(&mut self, kernels: &Vec) -> Vec; + fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec; /// Takes a vec of tx outputs and returns those outputs /// that have not yet had their rangeproofs verified. - fn filter_rangeproof_unverified(&mut self, outputs: &Vec) -> Vec; + fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec; /// Adds a vec of tx kernels to the cache (used in conjunction with the the filter above). fn add_kernel_sig_verified(&mut self, kernels: Vec); /// Adds a vec of outputs to the cache (used in conjunction with the the filter above). @@ -45,9 +45,6 @@ pub struct LruVerifierCache { rangeproof_verification_cache: LruCache, } -unsafe impl Sync for LruVerifierCache {} -unsafe impl Send for LruVerifierCache {} - impl LruVerifierCache { /// TODO how big should these caches be? /// They need to be *at least* large enough to cover a maxed out block. @@ -60,7 +57,7 @@ impl LruVerifierCache { } impl VerifierCache for LruVerifierCache { - fn filter_kernel_sig_unverified(&mut self, kernels: &Vec) -> Vec { + fn filter_kernel_sig_unverified(&mut self, kernels: &[TxKernel]) -> Vec { let res = kernels .into_iter() .filter(|x| { @@ -78,7 +75,7 @@ impl VerifierCache for LruVerifierCache { res } - fn filter_rangeproof_unverified(&mut self, outputs: &Vec) -> Vec { + fn filter_rangeproof_unverified(&mut self, outputs: &[Output]) -> Vec { let res = outputs .into_iter() .filter(|x| { diff --git a/core/src/global.rs b/core/src/global.rs index c98cfcfe6..84c4d294e 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -303,8 +303,7 @@ where if live_intervals[i - 1].timestamp > live_intervals[i].timestamp { live_intervals[i].timestamp = 0; } else { - live_intervals[i].timestamp = - live_intervals[i].timestamp - live_intervals[i - 1].timestamp; + live_intervals[i].timestamp -= live_intervals[i - 1].timestamp; } } // Remove genesis "interval" @@ -321,7 +320,7 @@ where for _ in 0..block_count_difference { last_ts = last_ts.saturating_sub(live_intervals[live_intervals.len() - 1].timestamp); - last_n.insert(0, HeaderInfo::from_ts_diff(last_ts, last_diff.clone())); + last_n.insert(0, HeaderInfo::from_ts_diff(last_ts, last_diff)); interval_index = match interval_index { 0 => live_intervals.len() - 1, _ => interval_index - 1, diff --git a/core/src/pow/common.rs b/core/src/pow/common.rs index c94dda948..1a456763a 100644 --- a/core/src/pow/common.rs +++ b/core/src/pow/common.rs @@ -78,19 +78,19 @@ where } } -pub fn set_header_nonce(header: Vec, nonce: Option) -> Result<[u64; 4], Error> { +pub fn set_header_nonce(header: &[u8], nonce: Option) -> Result<[u64; 4], Error> { if let Some(n) = nonce { let len = header.len(); - let mut header = header.clone(); + let mut header = header.to_owned(); header.truncate(len - mem::size_of::()); header.write_u32::(n)?; - create_siphash_keys(header) + create_siphash_keys(&header) } else { - create_siphash_keys(header) + create_siphash_keys(&header) } } -pub fn create_siphash_keys(header: Vec) -> Result<[u64; 4], Error> { +pub fn create_siphash_keys(header: &[u8]) -> Result<[u64; 4], Error> { let h = blake2b(32, &[], &header); let hb = h.as_bytes(); let mut rdr = Cursor::new(hb); @@ -163,7 +163,7 @@ where /// Reset the main keys used for siphash from the header and nonce pub fn reset_header_nonce(&mut self, header: Vec, nonce: Option) -> Result<(), Error> { - self.siphash_keys = set_header_nonce(header, nonce)?; + self.siphash_keys = set_header_nonce(&header, nonce)?; Ok(()) } @@ -175,7 +175,7 @@ where ); let mut masked = hash_u64 & self.edge_mask.to_u64().ok_or(ErrorKind::IntegerCast)?; if shift { - masked = masked << 1; + masked <<= 1; masked |= uorv; } Ok(T::from(masked).ok_or(ErrorKind::IntegerCast)?) diff --git a/core/src/pow/cuckatoo.rs b/core/src/pow/cuckatoo.rs index 189cc1482..d08f62e87 100644 --- a/core/src/pow/cuckatoo.rs +++ b/core/src/pow/cuckatoo.rs @@ -54,14 +54,14 @@ where pub fn new(max_edges: T, max_sols: u32, proof_size: usize) -> Result, Error> { let max_nodes = 2 * to_u64!(max_edges); Ok(Graph { - max_edges: max_edges, - max_nodes: max_nodes, + max_edges, + max_nodes, + max_sols, + proof_size, links: vec![], adj_list: vec![], visited: Bitmap::create(), - max_sols: max_sols, solutions: vec![], - proof_size: proof_size, nil: T::max_value(), }) } @@ -241,7 +241,7 @@ where /// Simple implementation of algorithm - pub fn find_cycles_iter<'a, I>(&mut self, iter: I) -> Result, Error> + pub fn find_cycles_iter(&mut self, iter: I) -> Result, Error> where I: Iterator, { @@ -260,7 +260,7 @@ where for s in &self.graph.solutions { self.verify_impl(&s)?; } - if self.graph.solutions.len() == 0 { + if self.graph.solutions.is_empty() { Err(ErrorKind::NoSolution)? } else { Ok(self.graph.solutions.clone()) diff --git a/core/src/pow/cuckoo.rs b/core/src/pow/cuckoo.rs index c6afc6fe8..c0140f87f 100644 --- a/core/src/pow/cuckoo.rs +++ b/core/src/pow/cuckoo.rs @@ -77,7 +77,7 @@ where let params = CuckooParams::new(edge_bits, proof_size)?; let num_nodes = 2 * params.num_edges as usize; Ok(CuckooContext { - params: params, + params, graph: vec![T::zero(); num_nodes], _max_sols: max_sols, }) @@ -190,7 +190,7 @@ where cycle.insert(Edge { u: us[0], v: vs[0] }); while nu != 0 { // u's in even position; v's in odd - nu = nu - 1; + nu -= 1; cycle.insert(Edge { u: us[((nu + 1) & !1) as usize], v: us[(nu | 1) as usize], @@ -214,11 +214,11 @@ where cycle.remove(&edge); } } - return if n == self.params.proof_size { + if n == self.params.proof_size { Ok(sol) } else { Err(ErrorKind::NoCycle)? - }; + } } /// Searches for a solution (simple implementation) diff --git a/core/src/pow/error.rs b/core/src/pow/error.rs index 55e48be05..7e1ac2324 100644 --- a/core/src/pow/error.rs +++ b/core/src/pow/error.rs @@ -85,7 +85,7 @@ impl From for Error { impl From> for Error { fn from(inner: Context) -> Error { - Error { inner: inner } + Error { inner } } } diff --git a/core/src/pow/lean.rs b/core/src/pow/lean.rs index ffc4406dc..9d622ccb7 100644 --- a/core/src/pow/lean.rs +++ b/core/src/pow/lean.rs @@ -37,7 +37,7 @@ impl Lean { // edge bitmap, before trimming all of them are on let mut edges = Bitmap::create_with_capacity(params.num_edges as u32); - edges.flip_inplace(0..params.num_edges.into()); + edges.flip_inplace(0..params.num_edges); Lean { params, edges } } diff --git a/core/src/pow/mod.rs b/core/src/pow/mod.rs index 97f8f3ad6..fc231f553 100644 --- a/core/src/pow/mod.rs +++ b/core/src/pow/mod.rs @@ -77,7 +77,7 @@ pub fn mine_genesis_block() -> Result { } // total_difficulty on the genesis header *is* the difficulty of that block - let genesis_difficulty = gen.header.pow.total_difficulty.clone(); + let genesis_difficulty = gen.header.pow.total_difficulty; let sz = global::min_edge_bits(); let proof_size = global::proofsize(); diff --git a/core/src/pow/siphash.rs b/core/src/pow/siphash.rs index fb12cf103..72ac38ce4 100644 --- a/core/src/pow/siphash.rs +++ b/core/src/pow/siphash.rs @@ -62,7 +62,7 @@ pub fn siphash24(v: &[u64; 4], nonce: u64) -> u64 { round!(); round!(); - return v0 ^ v1 ^ v2 ^ v3; + v0 ^ v1 ^ v2 ^ v3 } #[cfg(test)] diff --git a/core/src/ser.rs b/core/src/ser.rs index 978645517..73495834e 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -92,10 +92,7 @@ impl error::Error for Error { fn description(&self) -> &str { match *self { Error::IOErr(ref e, _) => e, - Error::UnexpectedData { - expected: _, - received: _, - } => "unexpected data", + Error::UnexpectedData { .. } => "unexpected data", Error::CorruptedData => "corrupted data", Error::TooLargeReadErr => "too large read", Error::ConsensusError(_) => "consensus error (sort order)", @@ -231,13 +228,13 @@ where /// Deserializes a Readeable from any std::io::Read implementation. pub fn deserialize(source: &mut Read) -> Result { - let mut reader = BinReader { source: source }; + let mut reader = BinReader { source }; T::read(&mut reader) } /// Serializes a Writeable into any std::io::Write implementation. pub fn serialize(sink: &mut Write, thing: &W) -> Result<(), Error> { - let mut writer = BinWriter { sink: sink }; + let mut writer = BinWriter { sink }; thing.write(&mut writer) } @@ -319,9 +316,7 @@ impl Readable for Commitment { fn read(reader: &mut Reader) -> Result { let a = reader.read_fixed_bytes(PEDERSEN_COMMITMENT_SIZE)?; let mut c = [0; PEDERSEN_COMMITMENT_SIZE]; - for i in 0..PEDERSEN_COMMITMENT_SIZE { - c[i] = a[i]; - } + c[..PEDERSEN_COMMITMENT_SIZE].clone_from_slice(&a[..PEDERSEN_COMMITMENT_SIZE]); Ok(Commitment(c)) } } @@ -368,9 +363,7 @@ impl Readable for RangeProof { fn read(reader: &mut Reader) -> Result { let p = reader.read_limited_vec(MAX_PROOF_SIZE)?; let mut a = [0; MAX_PROOF_SIZE]; - for i in 0..p.len() { - a[i] = p[i]; - } + a[..p.len()].clone_from_slice(&p[..]); Ok(RangeProof { proof: a, plen: p.len(), @@ -388,9 +381,7 @@ impl Readable for Signature { fn read(reader: &mut Reader) -> Result { let a = reader.read_fixed_bytes(AGG_SIGNATURE_SIZE)?; let mut c = [0; AGG_SIGNATURE_SIZE]; - for i in 0..AGG_SIGNATURE_SIZE { - c[i] = a[i]; - } + c[..AGG_SIGNATURE_SIZE].clone_from_slice(&a[..AGG_SIGNATURE_SIZE]); Ok(Signature::from_raw_data(&c).unwrap()) } } @@ -577,81 +568,81 @@ pub trait AsFixedBytes: Sized + AsRef<[u8]> { impl<'a> AsFixedBytes for &'a [u8] { fn len(&self) -> usize { - return 1; + 1 } } impl AsFixedBytes for Vec { fn len(&self) -> usize { - return self.len(); + self.len() } } impl AsFixedBytes for [u8; 1] { fn len(&self) -> usize { - return 1; + 1 } } impl AsFixedBytes for [u8; 2] { fn len(&self) -> usize { - return 2; + 2 } } impl AsFixedBytes for [u8; 4] { fn len(&self) -> usize { - return 4; + 4 } } impl AsFixedBytes for [u8; 6] { fn len(&self) -> usize { - return 6; + 6 } } impl AsFixedBytes for [u8; 8] { fn len(&self) -> usize { - return 8; + 8 } } impl AsFixedBytes for [u8; 20] { fn len(&self) -> usize { - return 20; + 20 } } impl AsFixedBytes for [u8; 32] { fn len(&self) -> usize { - return 32; + 32 } } impl AsFixedBytes for String { fn len(&self) -> usize { - return self.len(); + self.len() } } impl AsFixedBytes for ::core::hash::Hash { fn len(&self) -> usize { - return 32; + 32 } } impl AsFixedBytes for ::util::secp::pedersen::RangeProof { fn len(&self) -> usize { - return self.plen; + self.plen } } impl AsFixedBytes for ::util::secp::Signature { fn len(&self) -> usize { - return 64; + 64 } } impl AsFixedBytes for ::util::secp::pedersen::Commitment { fn len(&self) -> usize { - return PEDERSEN_COMMITMENT_SIZE; + PEDERSEN_COMMITMENT_SIZE } } impl AsFixedBytes for BlindingFactor { fn len(&self) -> usize { - return SECRET_KEY_SIZE; + SECRET_KEY_SIZE } } impl AsFixedBytes for ::keychain::Identifier { fn len(&self) -> usize { - return IDENTIFIER_SIZE; + IDENTIFIER_SIZE } } diff --git a/core/tests/consensus.rs b/core/tests/consensus.rs index 41f3d583f..b87ca7129 100644 --- a/core/tests/consensus.rs +++ b/core/tests/consensus.rs @@ -496,25 +496,31 @@ fn secondary_pow_scale() { // all primary, factor should increase so it becomes easier to find a high // difficulty block assert_eq!( - secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()), + secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect::>()), 147 ); // all secondary on 90%, factor should go down a bit hi.is_secondary = true; assert_eq!( - secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()), + secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect::>()), 94 ); // all secondary on 1%, factor should go down to bound (divide by 2) assert_eq!( - secondary_pow_scaling(890_000, &(0..window).map(|_| hi.clone()).collect()), + secondary_pow_scaling( + 890_000, + &(0..window).map(|_| hi.clone()).collect::>() + ), 49 ); // same as above, testing lowest bound let mut low_hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 3); low_hi.is_secondary = true; assert_eq!( - secondary_pow_scaling(890_000, &(0..window).map(|_| low_hi.clone()).collect()), + secondary_pow_scaling( + 890_000, + &(0..window).map(|_| low_hi.clone()).collect::>() + ), 1 ); // just about the right ratio, also no longer playing with median @@ -525,7 +531,7 @@ fn secondary_pow_scale() { &(0..(window / 10)) .map(|_| primary_hi.clone()) .chain((0..(window * 9 / 10)).map(|_| hi.clone())) - .collect() + .collect::>() ), 94 ); @@ -536,7 +542,7 @@ fn secondary_pow_scale() { &(0..(window / 20)) .map(|_| primary_hi.clone()) .chain((0..(window * 95 / 100)).map(|_| hi.clone())) - .collect() + .collect::>() ), 94 ); @@ -547,7 +553,7 @@ fn secondary_pow_scale() { &(0..(window * 6 / 10)) .map(|_| primary_hi.clone()) .chain((0..(window * 4 / 10)).map(|_| hi.clone())) - .collect() + .collect::>() ), 84 );