Less cloning and pattern simplifications (#3216)

* Less cloning and pattern simplifications

* Revert inclusive range and remove unecessary Error:From
This commit is contained in:
Quentin Le Sceller 2020-02-05 11:02:07 -05:00 committed by GitHub
parent a41965e024
commit c4e69717ab
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
34 changed files with 153 additions and 263 deletions

View file

@ -210,7 +210,7 @@ impl Chain {
pow_verifier,
verifier_cache,
archive_mode,
genesis: genesis.header.clone(),
genesis: genesis.header,
};
// DB migrations to be run prior to the chain being used.
@ -308,7 +308,7 @@ impl Chain {
// but not yet committed the batch.
// A node shutdown at this point can be catastrophic...
// We prevent this via the stop_lock (see above).
if let Ok(_) = maybe_new_head {
if maybe_new_head.is_ok() {
ctx.batch.commit()?;
}
@ -334,7 +334,7 @@ impl Chain {
added: Instant::now(),
};
&self.orphans.add(orphan);
self.orphans.add(orphan);
debug!(
"process_block: orphan: {:?}, # orphans {}{}",
@ -364,7 +364,7 @@ impl Chain {
b.header.height,
e
);
Err(ErrorKind::Other(format!("{:?}", e).to_owned()).into())
Err(ErrorKind::Other(format!("{:?}", e)).into())
}
},
}
@ -807,7 +807,7 @@ impl Chain {
while let Ok(header) = current {
// break out of the while loop when we find a header common
// between the header chain and the current body chain
if let Ok(_) = self.is_on_current_chain(&header) {
if self.is_on_current_chain(&header).is_ok() {
oldest_height = header.height;
oldest_hash = header.hash();
break;
@ -992,7 +992,7 @@ impl Chain {
// Move sandbox to overwrite
txhashset.release_backend_files();
txhashset::txhashset_replace(sandbox_dir.clone(), PathBuf::from(self.db_root.clone()))?;
txhashset::txhashset_replace(sandbox_dir, PathBuf::from(self.db_root.clone()))?;
// Re-open on db root dir
txhashset = txhashset::TxHashSet::open(
@ -1434,7 +1434,7 @@ impl Chain {
if chain_header.hash() == header.hash() {
Ok(())
} else {
Err(ErrorKind::Other(format!("not on current chain")).into())
Err(ErrorKind::Other("not on current chain".to_string()).into())
}
}

View file

@ -63,7 +63,7 @@ fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result
if !header.pow.is_primary() && !header.pow.is_secondary() {
return Err(ErrorKind::LowEdgebits.into());
}
if !(ctx.pow_verifier)(header).is_ok() {
if (ctx.pow_verifier)(header).is_err() {
error!(
"pipe: error validating header with cuckoo edge_bits {}",
header.pow.edge_bits(),
@ -385,7 +385,7 @@ fn validate_block(block: &Block, ctx: &mut BlockContext<'_>) -> Result<(), Error
let prev = ctx.batch.get_previous_header(&block.header)?;
block
.validate(&prev.total_kernel_offset, ctx.verifier_cache.clone())
.map_err(|e| ErrorKind::InvalidBlockProof(e))?;
.map_err(ErrorKind::InvalidBlockProof)?;
Ok(())
}
@ -489,7 +489,7 @@ pub fn rewind_and_apply_header_fork(
) -> Result<(), Error> {
let mut fork_hashes = vec![];
let mut current = header.clone();
while current.height > 0 && !ext.is_on_current_chain(&current, batch).is_ok() {
while current.height > 0 && ext.is_on_current_chain(&current, batch).is_err() {
fork_hashes.push(current.hash());
current = batch.get_previous_header(&current)?;
}
@ -530,9 +530,9 @@ pub fn rewind_and_apply_fork(
// Rewind the txhashset extension back to common ancestor based on header MMR.
let mut current = batch.head_header()?;
while current.height > 0
&& !header_extension
&& header_extension
.is_on_current_chain(&current, batch)
.is_ok()
.is_err()
{
current = batch.get_previous_header(&current)?;
}
@ -552,7 +552,7 @@ pub fn rewind_and_apply_fork(
for h in fork_hashes {
let fb = batch
.get_block(&h)
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked blocks")))?;
.map_err(|e| ErrorKind::StoreErr(e, "getting forked blocks".to_string()))?;
// Re-verify coinbase maturity along this fork.
verify_coinbase_maturity(&fb, ext, batch)?;

View file

@ -26,7 +26,7 @@ use grin_store as store;
use grin_store::{option_to_not_found, to_key, Error, SerIterator};
use std::sync::Arc;
const STORE_SUBPATH: &'static str = "chain";
const STORE_SUBPATH: &str = "chain";
const BLOCK_HEADER_PREFIX: u8 = 'h' as u8;
const BLOCK_PREFIX: u8 = 'b' as u8;

View file

@ -149,7 +149,7 @@ impl BitmapAccumulator {
let chunk_pos = pmmr::insertion_to_pmmr_index(chunk_idx + 1);
let rewind_pos = chunk_pos.saturating_sub(1);
pmmr.rewind(rewind_pos, &Bitmap::create())
.map_err(|e| ErrorKind::Other(e))?;
.map_err(ErrorKind::Other)?;
Ok(())
}

View file

@ -36,13 +36,13 @@ use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Instant;
const TXHASHSET_SUBDIR: &'static str = "txhashset";
const TXHASHSET_SUBDIR: &str = "txhashset";
const OUTPUT_SUBDIR: &'static str = "output";
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
const KERNEL_SUBDIR: &'static str = "kernel";
const OUTPUT_SUBDIR: &str = "output";
const RANGE_PROOF_SUBDIR: &str = "rangeproof";
const KERNEL_SUBDIR: &str = "kernel";
const TXHASHSET_ZIP: &'static str = "txhashset_snapshot";
const TXHASHSET_ZIP: &str = "txhashset_snapshot";
/// Convenience wrapper around a single prunable MMR backend.
pub struct PMMRHandle<T: PMMRable> {
@ -65,9 +65,9 @@ impl<T: PMMRable> PMMRHandle<T> {
) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(root_dir).join(sub_dir).join(file_name);
fs::create_dir_all(path.clone())?;
let path_str = path.to_str().ok_or(Error::from(ErrorKind::Other(
"invalid file path".to_owned(),
)))?;
let path_str = path
.to_str()
.ok_or_else(|| ErrorKind::Other("invalid file path".to_owned()))?;
let backend = PMMRBackend::new(path_str.to_string(), prunable, version, header)?;
let last_pos = backend.unpruned_size();
Ok(PMMRHandle { backend, last_pos })
@ -82,7 +82,7 @@ impl PMMRHandle<BlockHeader> {
if let Some(entry) = header_pmmr.get_data(pos) {
Ok(entry.hash())
} else {
Err(ErrorKind::Other(format!("get header hash by height")).into())
Err(ErrorKind::Other("get header hash by height".to_string()).into())
}
}
@ -90,14 +90,14 @@ impl PMMRHandle<BlockHeader> {
/// Find the last leaf pos based on MMR size and return its header hash.
pub fn head_hash(&self) -> Result<Hash, Error> {
if self.last_pos == 0 {
return Err(ErrorKind::Other(format!("MMR empty, no head")).into());
return Err(ErrorKind::Other("MMR empty, no head".to_string()).into());
}
let header_pmmr = ReadonlyPMMR::at(&self.backend, self.last_pos);
let leaf_pos = pmmr::bintree_rightmost(self.last_pos);
if let Some(entry) = header_pmmr.get_data(leaf_pos) {
Ok(entry.hash())
} else {
Err(ErrorKind::Other(format!("failed to find head hash")).into())
Err(ErrorKind::Other("failed to find head hash".to_string()).into())
}
}
}
@ -200,7 +200,7 @@ impl TxHashSet {
commit_index,
})
} else {
Err(ErrorKind::TxHashSetErr(format!("failed to open kernel PMMR")).into())
Err(ErrorKind::TxHashSetErr("failed to open kernel PMMR".to_string()).into())
}
}
@ -236,14 +236,14 @@ impl TxHashSet {
height: block_height,
})
} else {
Err(ErrorKind::TxHashSetErr(format!("txhashset hash mismatch")).into())
Err(ErrorKind::TxHashSetErr("txhashset hash mismatch".to_string()).into())
}
} else {
Err(ErrorKind::OutputNotFound.into())
}
}
Err(grin_store::Error::NotFoundErr(_)) => Err(ErrorKind::OutputNotFound.into()),
Err(e) => Err(ErrorKind::StoreErr(e, format!("txhashset unspent check")).into()),
Err(e) => Err(ErrorKind::StoreErr(e, "txhashset unspent check".to_string()).into()),
}
}
@ -739,7 +739,7 @@ impl<'a> HeaderExtension<'a> {
if let Some(hash) = self.get_header_hash(pos) {
Ok(batch.get_block_header(&hash)?)
} else {
Err(ErrorKind::Other(format!("get header by height")).into())
Err(ErrorKind::Other("get header by height".to_string()).into())
}
}
@ -751,13 +751,13 @@ impl<'a> HeaderExtension<'a> {
batch: &Batch<'_>,
) -> Result<(), Error> {
if header.height > self.head.height {
return Err(ErrorKind::Other(format!("not on current chain, out beyond")).into());
return Err(ErrorKind::Other("not on current chain, out beyond".to_string()).into());
}
let chain_header = self.get_header_by_height(header.height, batch)?;
if chain_header.hash() == header.hash() {
Ok(())
} else {
Err(ErrorKind::Other(format!("not on current chain")).into())
Err(ErrorKind::Other("not on current chain".to_string()).into())
}
}
@ -966,7 +966,7 @@ impl<'a> Extension<'a> {
if let Some(hash) = self.output_pmmr.get_hash(pos) {
if hash != input.hash_with_index(pos - 1) {
return Err(
ErrorKind::TxHashSetErr(format!("output pmmr hash mismatch")).into(),
ErrorKind::TxHashSetErr("output pmmr hash mismatch".to_string()).into(),
);
}
}
@ -978,7 +978,7 @@ impl<'a> Extension<'a> {
Ok(true) => {
self.rproof_pmmr
.prune(pos)
.map_err(|e| ErrorKind::TxHashSetErr(e))?;
.map_err(ErrorKind::TxHashSetErr)?;
Ok(pos)
}
Ok(false) => Err(ErrorKind::AlreadySpent(commit).into()),
@ -1016,13 +1016,13 @@ impl<'a> Extension<'a> {
{
if self.output_pmmr.unpruned_size() != self.rproof_pmmr.unpruned_size() {
return Err(
ErrorKind::Other(format!("output vs rproof MMRs different sizes")).into(),
ErrorKind::Other("output vs rproof MMRs different sizes".to_string()).into(),
);
}
if output_pos != rproof_pos {
return Err(
ErrorKind::Other(format!("output vs rproof MMRs different pos")).into(),
ErrorKind::Other("output vs rproof MMRs different pos".to_string()).into(),
);
}
}
@ -1067,10 +1067,10 @@ impl<'a> Extension<'a> {
let header = batch.get_block_header(&self.head.last_block_h)?;
self.output_pmmr
.snapshot(&header)
.map_err(|e| ErrorKind::Other(e))?;
.map_err(ErrorKind::Other)?;
self.rproof_pmmr
.snapshot(&header)
.map_err(|e| ErrorKind::Other(e))?;
.map_err(ErrorKind::Other)?;
Ok(())
}
@ -1244,7 +1244,7 @@ impl<'a> Extension<'a> {
if self.head.height == 0 {
let zero_commit = secp_static::commit_to_zero_value();
return Ok((zero_commit.clone(), zero_commit.clone()));
return Ok((zero_commit, zero_commit));
}
// The real magicking happens here. Sum of kernel excesses should equal
@ -1312,7 +1312,7 @@ impl<'a> Extension<'a> {
let kernel = self
.kernel_pmmr
.get_data(n)
.ok_or::<Error>(ErrorKind::TxKernelNotFound.into())?;
.ok_or_else(|| ErrorKind::TxKernelNotFound)?;
tx_kernels.push(kernel);
}
@ -1379,7 +1379,7 @@ impl<'a> Extension<'a> {
}
// remaining part which not full of 1000 range proofs
if proofs.len() > 0 {
if !proofs.is_empty() {
Output::batch_verify_proofs(&commits, &proofs)?;
commits.clear();
proofs.clear();
@ -1509,7 +1509,7 @@ pub fn zip_write(
header: &BlockHeader,
) -> Result<(), Error> {
debug!("zip_write on path: {:?}", root_dir);
let txhashset_path = root_dir.clone().join(TXHASHSET_SUBDIR);
let txhashset_path = root_dir.join(TXHASHSET_SUBDIR);
fs::create_dir_all(&txhashset_path)?;
// Explicit list of files to extract from our zip archive.
@ -1531,12 +1531,9 @@ pub fn txhashset_replace(from: PathBuf, to: PathBuf) -> Result<(), Error> {
clean_txhashset_folder(&to);
// rename the 'from' folder as the 'to' folder
if let Err(e) = fs::rename(
from.clone().join(TXHASHSET_SUBDIR),
to.clone().join(TXHASHSET_SUBDIR),
) {
if let Err(e) = fs::rename(from.join(TXHASHSET_SUBDIR), to.join(TXHASHSET_SUBDIR)) {
error!("hashset_replace fail on {}. err: {}", TXHASHSET_SUBDIR, e);
Err(ErrorKind::TxHashSetErr(format!("txhashset replacing fail")).into())
Err(ErrorKind::TxHashSetErr("txhashset replacing fail".to_string()).into())
} else {
Ok(())
}

View file

@ -136,7 +136,7 @@ impl<'a> UTXOView<'a> {
// Find the "cutoff" pos in the output MMR based on the
// header from 1,000 blocks ago.
let cutoff_height = height.checked_sub(global::coinbase_maturity()).unwrap_or(0);
let cutoff_height = height.saturating_sub(global::coinbase_maturity());
let cutoff_header = self.get_header_by_height(cutoff_height, batch)?;
let cutoff_pos = cutoff_header.output_mmr_size;
@ -168,7 +168,7 @@ impl<'a> UTXOView<'a> {
let header = batch.get_block_header(&hash)?;
Ok(header)
} else {
Err(ErrorKind::Other(format!("get header by height")).into())
Err(ErrorKind::Other("get header by height".to_string()).into())
}
}
}

View file

@ -174,7 +174,7 @@ pub fn header_version(height: u64) -> HeaderVersion {
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
return height < 3 * HARD_FORK_INTERVAL && version == header_version(height);
height < 3 * HARD_FORK_INTERVAL && version == header_version(height)
}
/// Number of blocks used to calculate difficulty adjustments

View file

@ -67,7 +67,7 @@ impl<H: Hashed> ShortIdentifiable for H {
}
/// Short id for identifying inputs/outputs/kernels
#[derive(Clone, Serialize, Deserialize, Hash)]
#[derive(Clone, Serialize, Deserialize)]
pub struct ShortId([u8; 6]);
impl DefaultHashable for ShortId {}

View file

@ -347,7 +347,7 @@ where
let mut last_ts = last_n.last().unwrap().timestamp;
for _ in n..needed_block_count {
last_ts = last_ts.saturating_sub(last_ts_delta);
last_n.push(HeaderInfo::from_ts_diff(last_ts, last_diff.clone()));
last_n.push(HeaderInfo::from_ts_diff(last_ts, last_diff));
}
}
last_n.reverse();

View file

@ -235,7 +235,7 @@ where
// Store the kernel offset (k2) on the tx.
// Commitments will sum correctly when accounting for the offset.
tx.offset = k2.clone();
tx.offset = k2;
// Set the kernel on the tx.
let tx = tx.replace_kernel(kern);

View file

@ -163,9 +163,8 @@ where
&self.rewind_hash
};
let res = blake2b(32, &commit.0, hash);
SecretKey::from_slice(self.keychain.secp(), res.as_bytes()).map_err(|e| {
ErrorKind::RangeProof(format!("Unable to create nonce: {:?}", e).to_string()).into()
})
SecretKey::from_slice(self.keychain.secp(), res.as_bytes())
.map_err(|e| ErrorKind::RangeProof(format!("Unable to create nonce: {:?}", e)).into())
}
}
@ -279,9 +278,8 @@ where
fn nonce(&self, commit: &Commitment) -> Result<SecretKey, Error> {
let res = blake2b(32, &commit.0, &self.root_hash);
SecretKey::from_slice(self.keychain.secp(), res.as_bytes()).map_err(|e| {
ErrorKind::RangeProof(format!("Unable to create nonce: {:?}", e).to_string()).into()
})
SecretKey::from_slice(self.keychain.secp(), res.as_bytes())
.map_err(|e| ErrorKind::RangeProof(format!("Unable to create nonce: {:?}", e)).into())
}
}
@ -365,9 +363,8 @@ where
impl ProofBuild for ViewKey {
fn rewind_nonce(&self, secp: &Secp256k1, commit: &Commitment) -> Result<SecretKey, Error> {
let res = blake2b(32, &commit.0, &self.rewind_hash);
SecretKey::from_slice(secp, res.as_bytes()).map_err(|e| {
ErrorKind::RangeProof(format!("Unable to create nonce: {:?}", e).to_string()).into()
})
SecretKey::from_slice(secp, res.as_bytes())
.map_err(|e| ErrorKind::RangeProof(format!("Unable to create nonce: {:?}", e)).into())
}
fn private_nonce(&self, _secp: &Secp256k1, _commit: &Commitment) -> Result<SecretKey, Error> {
@ -452,17 +449,8 @@ mod tests {
let id = ExtKeychain::derive_key_id(3, rng.gen(), rng.gen(), rng.gen(), 0);
let switch = SwitchCommitmentType::Regular;
let commit = keychain.commit(amount, &id, switch).unwrap();
let proof = create(
&keychain,
&builder,
amount,
&id,
switch,
commit.clone(),
None,
)
.unwrap();
assert!(verify(&keychain.secp(), commit.clone(), proof.clone(), None).is_ok());
let proof = create(&keychain, &builder, amount, &id, switch, commit, None).unwrap();
assert!(verify(&keychain.secp(), commit, proof, None).is_ok());
let rewind = rewind(keychain.secp(), &builder, commit, None, proof).unwrap();
assert!(rewind.is_some());
let (r_amount, r_id, r_switch) = rewind.unwrap();
@ -482,18 +470,9 @@ mod tests {
let commit_a = {
let switch = SwitchCommitmentType::Regular;
let commit = keychain.commit(amount, &id, switch).unwrap();
let proof = create(
&keychain,
&builder,
amount,
&id,
switch,
commit.clone(),
None,
)
.unwrap();
assert!(verify(&keychain.secp(), commit.clone(), proof.clone(), None).is_ok());
let rewind = rewind(keychain.secp(), &builder, commit.clone(), None, proof).unwrap();
let proof = create(&keychain, &builder, amount, &id, switch, commit, None).unwrap();
assert!(verify(&keychain.secp(), commit, proof, None).is_ok());
let rewind = rewind(keychain.secp(), &builder, commit, None, proof).unwrap();
assert!(rewind.is_some());
let (r_amount, r_id, r_switch) = rewind.unwrap();
assert_eq!(r_amount, amount);
@ -505,18 +484,9 @@ mod tests {
let commit_b = {
let switch = SwitchCommitmentType::None;
let commit = keychain.commit(amount, &id, switch).unwrap();
let proof = create(
&keychain,
&builder,
amount,
&id,
switch,
commit.clone(),
None,
)
.unwrap();
assert!(verify(&keychain.secp(), commit.clone(), proof.clone(), None).is_ok());
let rewind = rewind(keychain.secp(), &builder, commit.clone(), None, proof).unwrap();
let proof = create(&keychain, &builder, amount, &id, switch, commit, None).unwrap();
assert!(verify(&keychain.secp(), commit, proof, None).is_ok());
let rewind = rewind(keychain.secp(), &builder, commit, None, proof).unwrap();
assert!(rewind.is_some());
let (r_amount, r_id, r_switch) = rewind.unwrap();
assert_eq!(r_amount, amount);
@ -583,18 +553,9 @@ mod tests {
let commit = keychain.commit(amount, &id, switch).unwrap();
// Generate proof with ProofBuilder..
let proof = create(
&keychain,
&builder,
amount,
&id,
switch,
commit.clone(),
None,
)
.unwrap();
let proof = create(&keychain, &builder, amount, &id, switch, commit, None).unwrap();
// ..and rewind with ViewKey
let rewind = rewind(keychain.secp(), &view_key, commit.clone(), None, proof);
let rewind = rewind(keychain.secp(), &view_key, commit, None, proof);
assert!(rewind.is_ok());
let rewind = rewind.unwrap();
@ -628,18 +589,9 @@ mod tests {
let commit = keychain.commit(amount, &id, switch).unwrap();
// Generate proof with ProofBuilder..
let proof = create(
&keychain,
&builder,
amount,
&id,
switch,
commit.clone(),
None,
)
.unwrap();
let proof = create(&keychain, &builder, amount, &id, switch, commit, None).unwrap();
// ..and rewind with ViewKey
let rewind = rewind(keychain.secp(), &view_key, commit.clone(), None, proof);
let rewind = rewind(keychain.secp(), &view_key, commit, None, proof);
assert!(rewind.is_ok());
let rewind = rewind.unwrap();
@ -680,24 +632,9 @@ mod tests {
let commit = keychain.commit(amount, &id, switch).unwrap();
// Generate proof with ProofBuilder..
let proof = create(
&keychain,
&builder,
amount,
&id,
switch,
commit.clone(),
None,
)
.unwrap();
let proof = create(&keychain, &builder, amount, &id, switch, commit, None).unwrap();
// ..and rewind with child ViewKey
let rewind = rewind(
keychain.secp(),
&child_view_key,
commit.clone(),
None,
proof,
);
let rewind = rewind(keychain.secp(), &child_view_key, commit, None, proof);
assert!(rewind.is_ok());
let rewind = rewind.unwrap();
@ -731,24 +668,9 @@ mod tests {
let commit = keychain.commit(amount, &id, switch).unwrap();
// Generate proof with ProofBuilder..
let proof = create(
&keychain,
&builder,
amount,
&id,
switch,
commit.clone(),
None,
)
.unwrap();
let proof = create(&keychain, &builder, amount, &id, switch, commit, None).unwrap();
// ..and rewind with child ViewKey
let rewind = rewind(
keychain.secp(),
&child_view_key,
commit.clone(),
None,
proof,
);
let rewind = rewind(keychain.secp(), &child_view_key, commit, None, proof);
assert!(rewind.is_ok());
let rewind = rewind.unwrap();

View file

@ -173,10 +173,10 @@ mod test {
#[test]
fn cuckaroo19_vectors() {
let mut ctx = new_impl::<u64>(19, 42);
ctx.params.siphash_keys = V1_19_HASH.clone();
assert!(ctx.verify(&Proof::new(V1_19_SOL.to_vec().clone())).is_ok());
ctx.params.siphash_keys = V1_19_HASH;
assert!(ctx.verify(&Proof::new(V1_19_SOL.to_vec())).is_ok());
ctx.params.siphash_keys = V2_19_HASH.clone();
assert!(ctx.verify(&Proof::new(V2_19_SOL.to_vec().clone())).is_ok());
assert!(ctx.verify(&Proof::new(V2_19_SOL.to_vec())).is_ok());
assert!(ctx.verify(&Proof::zero(42)).is_err());
}

View file

@ -175,16 +175,12 @@ mod test {
#[test]
fn cuckarood19_29_vectors() {
let mut ctx19 = new_impl::<u64>(19, 42);
ctx19.params.siphash_keys = V1_19_HASH.clone();
assert!(ctx19
.verify(&Proof::new(V1_19_SOL.to_vec().clone()))
.is_ok());
ctx19.params.siphash_keys = V1_19_HASH;
assert!(ctx19.verify(&Proof::new(V1_19_SOL.to_vec())).is_ok());
assert!(ctx19.verify(&Proof::zero(42)).is_err());
let mut ctx29 = new_impl::<u64>(29, 42);
ctx29.params.siphash_keys = V2_29_HASH.clone();
assert!(ctx29
.verify(&Proof::new(V2_29_SOL.to_vec().clone()))
.is_ok());
ctx29.params.siphash_keys = V2_29_HASH;
assert!(ctx29.verify(&Proof::new(V2_29_SOL.to_vec())).is_ok());
assert!(ctx29.verify(&Proof::zero(42)).is_err());
}

View file

@ -168,16 +168,12 @@ mod test {
#[test]
fn cuckaroom19_29_vectors() {
let mut ctx19 = new_impl::<u64>(19, 42);
ctx19.params.siphash_keys = V1_19_HASH.clone();
assert!(ctx19
.verify(&Proof::new(V1_19_SOL.to_vec().clone()))
.is_ok());
ctx19.params.siphash_keys = V1_19_HASH;
assert!(ctx19.verify(&Proof::new(V1_19_SOL.to_vec())).is_ok());
assert!(ctx19.verify(&Proof::zero(42)).is_err());
let mut ctx29 = new_impl::<u64>(29, 42);
ctx29.params.siphash_keys = V2_29_HASH.clone();
assert!(ctx29
.verify(&Proof::new(V2_29_SOL.to_vec().clone()))
.is_ok());
ctx29.params.siphash_keys = V2_29_HASH;
assert!(ctx29.verify(&Proof::new(V2_29_SOL.to_vec())).is_ok());
assert!(ctx29.verify(&Proof::zero(42)).is_err());
}

View file

@ -409,7 +409,7 @@ mod test {
{
let mut ctx = CuckatooContext::<u32>::new_impl(29, 42, 10).unwrap();
ctx.set_header_nonce([0u8; 80].to_vec(), Some(20), false)?;
assert!(ctx.verify(&Proof::new(V1_29.to_vec().clone())).is_ok());
assert!(ctx.verify(&Proof::new(V1_29.to_vec())).is_ok());
Ok(())
}
@ -419,7 +419,7 @@ mod test {
{
let mut ctx = CuckatooContext::<u32>::new_impl(31, 42, 10).unwrap();
ctx.set_header_nonce([0u8; 80].to_vec(), Some(99), false)?;
assert!(ctx.verify(&Proof::new(V1_31.to_vec().clone())).is_ok());
assert!(ctx.verify(&Proof::new(V1_31.to_vec())).is_ok());
Ok(())
}
@ -431,11 +431,11 @@ mod test {
let mut header = [0u8; 80];
header[0] = 1u8;
ctx.set_header_nonce(header.to_vec(), Some(20), false)?;
assert!(!ctx.verify(&Proof::new(V1_29.to_vec().clone())).is_ok());
assert!(!ctx.verify(&Proof::new(V1_29.to_vec())).is_ok());
header[0] = 0u8;
ctx.set_header_nonce(header.to_vec(), Some(20), false)?;
assert!(ctx.verify(&Proof::new(V1_29.to_vec().clone())).is_ok());
let mut bad_proof = V1_29.clone();
assert!(ctx.verify(&Proof::new(V1_29.to_vec())).is_ok());
let mut bad_proof = V1_29;
bad_proof[0] = 0x48a9e1;
assert!(!ctx.verify(&Proof::new(bad_proof.to_vec())).is_ok());
Ok(())

View file

@ -61,7 +61,7 @@ pub fn siphash_block(v: &[u64; 4], nonce: u64, rot_e: u8, xor_all: bool) -> u64
for i in xor_from..SIPHASH_BLOCK_SIZE {
xor ^= nonce_hash[i as usize];
}
return xor;
xor
}
/// Implements siphash 2-4 specialized for a 4 u64 array key and a u64 nonce

View file

@ -96,7 +96,7 @@ impl Difficulty {
}
/// Converts the difficulty into a u64
pub fn to_num(&self) -> u64 {
pub fn to_num(self) -> u64 {
self.num
}
}
@ -389,7 +389,7 @@ impl Proof {
}
}
fn extract_bits(bits: &Vec<u8>, bit_start: usize, bit_count: usize, read_from: usize) -> u64 {
fn extract_bits(bits: &[u8], bit_start: usize, bit_count: usize, read_from: usize) -> u64 {
let mut buf: [u8; 8] = [0; 8];
buf.copy_from_slice(&bits[read_from..read_from + 8]);
if bit_count == 64 {
@ -400,7 +400,7 @@ fn extract_bits(bits: &Vec<u8>, bit_start: usize, bit_count: usize, read_from: u
u64::from_le_bytes(buf) >> skip_bits & bit_mask
}
fn read_number(bits: &Vec<u8>, bit_start: usize, bit_count: usize) -> u64 {
fn read_number(bits: &[u8], bit_start: usize, bit_count: usize) -> u64 {
if bit_count == 0 {
return 0;
}

View file

@ -1255,7 +1255,7 @@ where
}
}
}
const VARIANTS: &'static [&'static str] = &[
const VARIANTS: &'static [&str] = &[
"NotFound",
"PermissionDenied",
"ConnectionRefused",

View file

@ -100,7 +100,7 @@ fn block_with_cut_through() {
// spending tx2 - reuse key_id2
let mut btx3 = txspend1i1o(5, &keychain, &builder, key_id2.clone(), key_id3);
let mut btx3 = txspend1i1o(5, &keychain, &builder, key_id2, key_id3);
let prev = BlockHeader::default();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let b = new_block(
@ -135,7 +135,7 @@ fn empty_block_with_coinbase_is_valid() {
.outputs()
.iter()
.filter(|out| out.is_coinbase())
.map(|o| o.clone())
.cloned()
.collect::<Vec<_>>();
assert_eq!(coinbase_outputs.len(), 1);
@ -143,7 +143,7 @@ fn empty_block_with_coinbase_is_valid() {
.kernels()
.iter()
.filter(|out| out.is_coinbase())
.map(|o| o.clone())
.cloned()
.collect::<Vec<_>>();
assert_eq!(coinbase_kernels.len(), 1);
@ -494,11 +494,7 @@ fn same_amount_outputs_copy_range_proof() {
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
let prev = BlockHeader::default();
let b = new_block(
vec![&mut Transaction::new(
ins.clone(),
outs.clone(),
kernels.clone(),
)],
vec![&mut Transaction::new(ins.clone(), outs, kernels.clone())],
&keychain,
&builder,
&prev,
@ -551,11 +547,7 @@ fn wrong_amount_range_proof() {
let key_id = keychain::ExtKeychain::derive_key_id(1, 4, 0, 0, 0);
let prev = BlockHeader::default();
let b = new_block(
vec![&mut Transaction::new(
ins.clone(),
outs.clone(),
kernels.clone(),
)],
vec![&mut Transaction::new(ins.clone(), outs, kernels.clone())],
&keychain,
&builder,
&prev,
@ -598,7 +590,7 @@ fn validate_header_proof() {
assert!(BlockHeader::from_pre_pow_and_proof(
"0xaf1678".to_string(),
b.header.pow.nonce,
b.header.pow.proof.clone(),
b.header.pow.proof,
)
.is_err());
}

View file

@ -137,7 +137,7 @@ impl PMMRable for TestElem {
type E = Self;
fn as_elmt(&self) -> Self::E {
self.clone()
*self
}
fn elmt_size() -> Option<u16> {

View file

@ -118,7 +118,7 @@ fn next_target_adjustment() {
// We should never drop below minimum
hi.difficulty = Difficulty::zero();
assert_eq!(
next_difficulty(1, repeat(90, hi.clone(), just_enough, None)).difficulty,
next_difficulty(1, repeat(90, hi, just_enough, None)).difficulty,
Difficulty::min()
);
}
@ -132,7 +132,7 @@ fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option<u64>) -> V
};
// watch overflow here, length shouldn't be ridiculous anyhow
assert!(len < std::usize::MAX as u64);
let diffs = vec![diff.difficulty.clone(); len as usize];
let diffs = vec![diff.difficulty; len as usize];
let times = (0..(len as usize)).map(|n| n * interval as usize).rev();
let pairs = times.zip(diffs.iter());
pairs
@ -140,7 +140,7 @@ fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option<u64>) -> V
HeaderInfo::new(
diff.block_hash,
cur_time + t as u64,
d.clone(),
*d,
diff.secondary_scaling,
diff.is_secondary,
)

View file

@ -193,7 +193,7 @@ fn add_block_repeated(
chain_sim: Vec<(HeaderInfo, DiffStats)>,
iterations: usize,
) -> Vec<(HeaderInfo, DiffStats)> {
let mut return_chain = chain_sim.clone();
let mut return_chain = chain_sim;
for _ in 0..iterations {
return_chain = add_block(interval, return_chain.clone());
}
@ -203,7 +203,7 @@ fn add_block_repeated(
// Prints the contents of the iterator and its difficulties.. useful for
// tweaking
fn print_chain_sim(chain_sim: Vec<(HeaderInfo, DiffStats)>) {
let mut chain_sim = chain_sim.clone();
let mut chain_sim = chain_sim;
chain_sim.reverse();
let mut last_time = 0;
let mut first = true;
@ -233,7 +233,7 @@ fn print_chain_sim(chain_sim: Vec<(HeaderInfo, DiffStats)>) {
stats.earliest_ts,
stats.ts_delta,
);
let mut sb = stats.last_blocks.clone();
let mut sb = stats.last_blocks;
sb.reverse();
for i in sb {
println!(" {}", i);
@ -282,7 +282,7 @@ fn adjustment_scenarios() {
let chain_sim = add_block_repeated(60, chain_sim, just_enough as usize);
let chain_sim = add_block_repeated(600, chain_sim, 60);
println!("");
println!();
println!("*********************************************************");
println!("Scenario 3) Sudden drop in hashpower");
println!("*********************************************************");
@ -294,7 +294,7 @@ fn adjustment_scenarios() {
let chain_sim = add_block_repeated(60, chain_sim, just_enough as usize);
let chain_sim = add_block_repeated(10, chain_sim, 10);
println!("");
println!();
println!("*********************************************************");
println!("Scenario 4) Sudden increase in hashpower");
println!("*********************************************************");
@ -308,7 +308,7 @@ fn adjustment_scenarios() {
let chain_sim = add_block_repeated(60, chain_sim, 20);
let chain_sim = add_block_repeated(10, chain_sim, 10);
println!("");
println!();
println!("*********************************************************");
println!("Scenario 5) Oscillations in hashpower");
println!("*********************************************************");

View file

@ -98,7 +98,7 @@ fn test_zero_commit_fails() {
// blinding should fail as signing with a zero r*G shouldn't work
build::transaction(
KernelFeatures::Plain { fee: 0 },
vec![input(10, key_id1.clone()), output(10, key_id1.clone())],
vec![input(10, key_id1.clone()), output(10, key_id1)],
&keychain,
&builder,
)
@ -177,8 +177,8 @@ fn multi_kernel_transaction_deaggregation() {
assert!(tx4.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let tx1234 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(vec![tx1, tx2]).unwrap();
let tx34 = aggregate(vec![tx3, tx4]).unwrap();
assert!(tx1234
.validate(Weighting::AsTransaction, vc.clone())
@ -192,7 +192,7 @@ fn multi_kernel_transaction_deaggregation() {
.is_ok());
assert_eq!(tx34, deaggregated_tx34);
let deaggregated_tx12 = deaggregate(tx1234.clone(), vec![tx34.clone()]).unwrap();
let deaggregated_tx12 = deaggregate(tx1234, vec![tx34]).unwrap();
assert!(deaggregated_tx12
.validate(Weighting::AsTransaction, vc.clone())
@ -213,12 +213,12 @@ fn multi_kernel_transaction_deaggregation_2() {
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
let tx12 = aggregate(vec![tx1, tx2]).unwrap();
assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx12.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let deaggregated_tx3 = deaggregate(tx123.clone(), vec![tx12.clone()]).unwrap();
let deaggregated_tx3 = deaggregate(tx123, vec![tx12]).unwrap();
assert!(deaggregated_tx3
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
@ -238,13 +238,13 @@ fn multi_kernel_transaction_deaggregation_3() {
assert!(tx3.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let tx123 = aggregate(vec![tx1.clone(), tx2.clone(), tx3.clone()]).unwrap();
let tx13 = aggregate(vec![tx1.clone(), tx3.clone()]).unwrap();
let tx2 = aggregate(vec![tx2.clone()]).unwrap();
let tx13 = aggregate(vec![tx1, tx3]).unwrap();
let tx2 = aggregate(vec![tx2]).unwrap();
assert!(tx123.validate(Weighting::AsTransaction, vc.clone()).is_ok());
assert!(tx2.validate(Weighting::AsTransaction, vc.clone()).is_ok());
let deaggregated_tx13 = deaggregate(tx123.clone(), vec![tx2.clone()]).unwrap();
let deaggregated_tx13 = deaggregate(tx123, vec![tx2]).unwrap();
assert!(deaggregated_tx13
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
@ -279,11 +279,7 @@ fn multi_kernel_transaction_deaggregation_4() {
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
let deaggregated_tx5 = deaggregate(
tx12345.clone(),
vec![tx1.clone(), tx2.clone(), tx3.clone(), tx4.clone()],
)
.unwrap();
let deaggregated_tx5 = deaggregate(tx12345, vec![tx1, tx2, tx3, tx4]).unwrap();
assert!(deaggregated_tx5
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
@ -314,14 +310,14 @@ fn multi_kernel_transaction_deaggregation_5() {
tx5.clone(),
])
.unwrap();
let tx12 = aggregate(vec![tx1.clone(), tx2.clone()]).unwrap();
let tx34 = aggregate(vec![tx3.clone(), tx4.clone()]).unwrap();
let tx12 = aggregate(vec![tx1, tx2]).unwrap();
let tx34 = aggregate(vec![tx3, tx4]).unwrap();
assert!(tx12345
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
let deaggregated_tx5 = deaggregate(tx12345.clone(), vec![tx12.clone(), tx34.clone()]).unwrap();
let deaggregated_tx5 = deaggregate(tx12345, vec![tx12, tx34]).unwrap();
assert!(deaggregated_tx5
.validate(Weighting::AsTransaction, vc.clone())
.is_ok());
@ -351,7 +347,7 @@ fn basic_transaction_deaggregation() {
.is_ok());
assert_eq!(tx1, deaggregated_tx1);
let deaggregated_tx2 = deaggregate(tx3.clone(), vec![tx1.clone()]).unwrap();
let deaggregated_tx2 = deaggregate(tx3, vec![tx1]).unwrap();
assert!(deaggregated_tx2
.validate(Weighting::AsTransaction, vc.clone())
@ -566,20 +562,14 @@ fn test_block_with_timelocked_tx() {
fee: 2,
lock_height: 2,
},
vec![input(5, key_id1.clone()), output(3, key_id2.clone())],
vec![input(5, key_id1), output(3, key_id2)],
&keychain,
&builder,
)
.unwrap();
let previous_header = BlockHeader::default();
let b = new_block(
vec![&tx1],
&keychain,
&builder,
&previous_header,
&key_id3.clone(),
);
let b = new_block(vec![&tx1], &keychain, &builder, &previous_header, &key_id3);
match b.validate(&BlindingFactor::zero(), vc.clone()) {
Err(KernelLockHeight(height)) => {

View file

@ -41,7 +41,7 @@ fn some_peak_map() {
fn bench_peak_map() {
let nano_to_millis = 1.0 / 1_000_000.0;
let increments = vec![1000_000u64, 10_000_000u64, 100_000_000u64];
let increments = vec![1_000_000u64, 10_000_000u64, 100_000_000u64];
for v in increments {
let start = Utc::now().timestamp_nanos();
@ -374,7 +374,7 @@ fn pmmr_get_last_n_insertions() {
// test when empty
let res = pmmr.readonly_pmmr().get_last_n_insertions(19);
assert!(res.len() == 0);
assert!(res.is_empty());
pmmr.push(&elems[0]).unwrap();
let res = pmmr.readonly_pmmr().get_last_n_insertions(19);

View file

@ -46,7 +46,7 @@ fn test_verifier_cache_rangeproofs() {
// Check our output is not verified according to the cache.
{
let mut cache = cache.write();
let unverified = cache.filter_rangeproof_unverified(&vec![out]);
let unverified = cache.filter_rangeproof_unverified(&[out]);
assert_eq!(unverified, vec![out]);
}
@ -59,7 +59,7 @@ fn test_verifier_cache_rangeproofs() {
// Check it shows as verified according to the cache.
{
let mut cache = cache.write();
let unverified = cache.filter_rangeproof_unverified(&vec![out]);
let unverified = cache.filter_rangeproof_unverified(&[out]);
assert_eq!(unverified, vec![]);
}
}

View file

@ -382,7 +382,7 @@ impl ExtendedPrivKey {
passphrase: &str,
is_floo: bool,
) -> Result<ExtendedPrivKey, Error> {
let seed = mnemonic::to_seed(mnemonic, passphrase).map_err(|e| Error::MnemonicError(e))?;
let seed = mnemonic::to_seed(mnemonic, passphrase).map_err(Error::MnemonicError)?;
let mut hasher = BIP32GrinHasher::new(is_floo);
let key = ExtendedPrivKey::new_master(secp, &mut hasher, &seed)?;
Ok(key)
@ -460,9 +460,7 @@ impl ExtendedPrivKey {
// Do SHA256 of just the ECDSA pubkey
let sha2_res = hasher.sha_256(&pk.public_key.serialize_vec(&secp, true)[..]);
// do RIPEMD160
let ripemd_res = hasher.ripemd_160(&sha2_res);
// Return
ripemd_res
hasher.ripemd_160(&sha2_res)
}
/// Returns the first four bytes of the identifier
@ -567,9 +565,7 @@ impl ExtendedPubKey {
// Do SHA256 of just the ECDSA pubkey
let sha2_res = hasher.sha_256(&self.public_key.serialize_vec(secp, true)[..]);
// do RIPEMD160
let ripemd_res = hasher.ripemd_160(&sha2_res);
// Return
ripemd_res
hasher.ripemd_160(&sha2_res)
}
/// Returns the first four bytes of the identifier

View file

@ -166,14 +166,14 @@ impl Keychain for ExtKeychain {
let keys = blind_sum
.positive_blinding_factors
.iter()
.filter_map(|b| b.secret_key(&self.secp).ok().clone())
.filter_map(|b| b.secret_key(&self.secp).ok())
.collect::<Vec<SecretKey>>();
pos_keys.extend(keys);
let keys = blind_sum
.negative_blinding_factors
.iter()
.filter_map(|b| b.secret_key(&self.secp).ok().clone())
.filter_map(|b| b.secret_key(&self.secp).ok())
.collect::<Vec<SecretKey>>();
neg_keys.extend(keys);
@ -265,7 +265,7 @@ mod test {
// adding secret keys 1 and 2 to give secret key 3
let mut skey3 = skey1.clone();
let _ = skey3.add_assign(&keychain.secp, &skey2).unwrap();
skey3.add_assign(&keychain.secp, &skey2).unwrap();
// create commitments for secret keys 1, 2 and 3
// all committing to the value 0 (which is what we do for tx_kernels)
@ -276,7 +276,7 @@ mod test {
// now sum commitments for keys 1 and 2
let sum = keychain
.secp
.commit_sum(vec![commit_1.clone(), commit_2.clone()], vec![])
.commit_sum(vec![commit_1, commit_2], vec![])
.unwrap();
// confirm the commitment for key 3 matches the sum of the commitments 1 and 2

View file

@ -112,7 +112,7 @@ pub fn to_entropy(mnemonic: &str) -> Result<Vec<u8>, Error> {
}
/// Converts entropy to a mnemonic
pub fn from_entropy(entropy: &Vec<u8>) -> Result<String, Error> {
pub fn from_entropy(entropy: &[u8]) -> Result<String, Error> {
let sizes: [usize; 5] = [16, 20, 24, 28, 32];
let length = entropy.len();
if !sizes.contains(&length) {
@ -124,7 +124,7 @@ pub fn from_entropy(entropy: &Vec<u8>) -> Result<String, Error> {
let mut hash = [0; 32];
let mut sha2sum = Sha256::default();
sha2sum.input(&entropy.clone());
sha2sum.input(entropy);
hash.copy_from_slice(sha2sum.result().as_slice());
let checksum = (hash[0] >> 8 - checksum_bits) & mask;
@ -149,7 +149,7 @@ pub fn from_entropy(entropy: &Vec<u8>) -> Result<String, Error> {
let words: Vec<String> = indexes.iter().map(|x| WORDS[*x as usize].clone()).collect();
let mnemonic = words.join(" ");
Ok(mnemonic.to_owned())
Ok(mnemonic)
}
/// Converts a nemonic and a passphrase into a seed

View file

@ -163,7 +163,7 @@ impl Identifier {
let mut p = ExtKeychainPath::from_identifier(&self);
if p.depth > 0 {
p.path[p.depth as usize - 1] = ChildNumber::from(0);
p.depth = p.depth - 1;
p.depth -= 1;
}
Identifier::from_path(&p)
}
@ -176,7 +176,7 @@ impl Identifier {
}
pub fn to_bytes(&self) -> [u8; IDENTIFIER_SIZE] {
self.0.clone()
self.0
}
pub fn from_pubkey(secp: &Secp256k1, pubkey: &PublicKey) -> Identifier {
@ -308,7 +308,7 @@ impl BlindingFactor {
// and secp lib checks this
Ok(secp::key::ZERO_KEY)
} else {
secp::key::SecretKey::from_slice(secp, &self.0).map_err(|e| Error::Secp(e))
secp::key::SecretKey::from_slice(secp, &self.0).map_err(Error::Secp)
}
}
@ -579,7 +579,7 @@ mod test {
// split a key, sum the split keys and confirm the sum matches the original key
let mut skey_sum = split.blind_1.secret_key(&secp).unwrap();
let skey_2 = split.blind_2.secret_key(&secp).unwrap();
let _ = skey_sum.add_assign(&secp, &skey_2).unwrap();
skey_sum.add_assign(&secp, &skey_2).unwrap();
assert_eq!(skey_in, skey_sum);
}
@ -592,7 +592,7 @@ mod test {
let skey_zero = ZERO_KEY;
let mut skey_out = skey_in.clone();
let _ = skey_out.add_assign(&secp, &skey_zero).unwrap();
skey_out.add_assign(&secp, &skey_zero).unwrap();
assert_eq!(skey_in, skey_out);
}

View file

@ -123,7 +123,7 @@ impl ViewKey {
{
let (secret_key, chain_code) = self.ckd_pub_tweak(secp, hasher, i)?;
let mut public_key = self.public_key.clone();
let mut public_key = self.public_key;
public_key.add_exp_assign(secp, &secret_key)?;
let switch_public_key = match &self.switch_public_key {

View file

@ -40,7 +40,7 @@ pub fn from_hex(hex_str: String) -> Result<Vec<u8>, num::ParseIntError> {
let hex_trim = if &hex_str[..2] == "0x" {
hex_str[2..].to_owned()
} else {
hex_str.clone()
hex_str
};
split_n(&hex_trim.trim()[..], 2)
.iter()

View file

@ -12,8 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::convert::TryInto;
/// Utility to track the rate of data transfers
use std::time::{Duration, SystemTime};
use std::time::SystemTime;
struct Entry {
bytes: u64,
@ -103,8 +104,8 @@ impl RateCounter {
// turns out getting the millisecs since epoch in Rust isn't as easy as it
// could be
fn millis_since_epoch() -> u64 {
let since_epoch = SystemTime::now()
SystemTime::now()
.duration_since(SystemTime::UNIX_EPOCH)
.unwrap_or(Duration::new(0, 0));
since_epoch.as_secs() * 1000 + u64::from(since_epoch.subsec_millis())
.map(|since_epoch| since_epoch.as_millis().try_into().unwrap_or(0))
.unwrap_or(0)
}

View file

@ -52,7 +52,7 @@ pub fn create_zip(dst_file: &File, src_dir: &Path, files: Vec<PathBuf>) -> io::R
/// Extract a set of files from the provided zip archive.
pub fn extract_files(from_archive: File, dest: &Path, files: Vec<PathBuf>) -> io::Result<()> {
let dest: PathBuf = PathBuf::from(dest);
let files: Vec<_> = files.iter().cloned().collect();
let files: Vec<_> = files.to_vec();
let res = thread::spawn(move || {
let mut archive = zip_rs::ZipArchive::new(from_archive).expect("archive file exists");
for x in files {

View file

@ -39,7 +39,7 @@ fn write_files(dir_name: String, root: &Path) -> io::Result<()> {
file.write_all(b"Hello, world!")?;
let mut file = File::create(root.join(dir_name.clone() + "/bar.txt"))?;
file.write_all(b"Goodbye, world!")?;
let mut file = File::create(root.join(dir_name.clone() + "/sub/lorem"))?;
let mut file = File::create(root.join(dir_name + "/sub/lorem"))?;
file.write_all(b"Lorem ipsum dolor sit amet, consectetur adipiscing elit")?;
Ok(())
}