mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
Less cloning and additional pattern simplifications (#3223)
* API Cleanup * Chain Cleanup * Core Cleanup * Keychain Cleanup * P2P Cleanup * Pool Cleanup * Store Cleanup * Util Cleanup * Cleanup clone_from_slice * Address jasper comments
This commit is contained in:
parent
c4e69717ab
commit
04a0123752
43 changed files with 267 additions and 326 deletions
|
@ -128,7 +128,7 @@ impl Handler for BasicAuthURIMiddleware {
|
|||
unauthorized_response(&self.basic_realm)
|
||||
}
|
||||
} else {
|
||||
return next_handler.call(req, handlers);
|
||||
next_handler.call(req, handlers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ pub type ClientResponseFuture<T> = Box<dyn Future<Item = T, Error = Error> + Sen
|
|||
/// Helper function to easily issue a HTTP GET request against a given URL that
|
||||
/// returns a JSON object. Handles request building, JSON deserialization and
|
||||
/// response code checking.
|
||||
pub fn get<'a, T>(url: &'a str, api_secret: Option<String>) -> Result<T, Error>
|
||||
pub fn get<T>(url: &str, api_secret: Option<String>) -> Result<T, Error>
|
||||
where
|
||||
for<'de> T: Deserialize<'de>,
|
||||
{
|
||||
|
@ -44,7 +44,7 @@ where
|
|||
/// Helper function to easily issue an async HTTP GET request against a given
|
||||
/// URL that returns a future. Handles request building, JSON deserialization
|
||||
/// and response code checking.
|
||||
pub fn get_async<'a, T>(url: &'a str, api_secret: Option<String>) -> ClientResponseFuture<T>
|
||||
pub fn get_async<T>(url: &str, api_secret: Option<String>) -> ClientResponseFuture<T>
|
||||
where
|
||||
for<'de> T: Deserialize<'de> + Send + 'static,
|
||||
{
|
||||
|
|
|
@ -191,14 +191,10 @@ impl OwnerAPIHandlerV2 {
|
|||
|
||||
impl crate::router::Handler for OwnerAPIHandlerV2 {
|
||||
fn post(&self, req: Request<Body>) -> ResponseFuture {
|
||||
Box::new(
|
||||
self.handle_post_request(req)
|
||||
.and_then(|r| ok(r))
|
||||
.or_else(|e| {
|
||||
Box::new(self.handle_post_request(req).and_then(ok).or_else(|e| {
|
||||
error!("Request Error: {:?}", e);
|
||||
ok(create_error_response(e))
|
||||
}),
|
||||
)
|
||||
}))
|
||||
}
|
||||
|
||||
fn options(&self, _req: Request<Body>) -> ResponseFuture {
|
||||
|
@ -260,14 +256,10 @@ impl ForeignAPIHandlerV2 {
|
|||
|
||||
impl crate::router::Handler for ForeignAPIHandlerV2 {
|
||||
fn post(&self, req: Request<Body>) -> ResponseFuture {
|
||||
Box::new(
|
||||
self.handle_post_request(req)
|
||||
.and_then(|r| ok(r))
|
||||
.or_else(|e| {
|
||||
Box::new(self.handle_post_request(req).and_then(ok).or_else(|e| {
|
||||
error!("Request Error: {:?}", e);
|
||||
ok(create_error_response(e))
|
||||
}),
|
||||
)
|
||||
}))
|
||||
}
|
||||
|
||||
fn options(&self, _req: Request<Body>) -> ResponseFuture {
|
||||
|
|
|
@ -43,7 +43,7 @@ impl HeaderHandler {
|
|||
if let Ok(height) = input.parse() {
|
||||
match w(&self.chain)?.get_header_by_height(height) {
|
||||
Ok(header) => return Ok(BlockHeaderPrintable::from_header(&header)),
|
||||
Err(_) => return Err(ErrorKind::NotFound)?,
|
||||
Err(_) => return Err(ErrorKind::NotFound.into()),
|
||||
}
|
||||
}
|
||||
check_block_param(&input)?;
|
||||
|
@ -60,14 +60,14 @@ impl HeaderHandler {
|
|||
let oid = get_output(&self.chain, &commit_id)?.1;
|
||||
match w(&self.chain)?.get_header_for_output(&oid) {
|
||||
Ok(header) => Ok(BlockHeaderPrintable::from_header(&header)),
|
||||
Err(_) => Err(ErrorKind::NotFound)?,
|
||||
Err(_) => Err(ErrorKind::NotFound.into()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_header_v2(&self, h: &Hash) -> Result<BlockHeaderPrintable, Error> {
|
||||
let chain = w(&self.chain)?;
|
||||
let header = chain.get_block_header(h).context(ErrorKind::NotFound)?;
|
||||
return Ok(BlockHeaderPrintable::from_header(&header));
|
||||
Ok(BlockHeaderPrintable::from_header(&header))
|
||||
}
|
||||
|
||||
// Try to get hash from height, hash or output commit
|
||||
|
@ -80,7 +80,7 @@ impl HeaderHandler {
|
|||
if let Some(height) = height {
|
||||
match w(&self.chain)?.get_header_by_height(height) {
|
||||
Ok(header) => return Ok(header.hash()),
|
||||
Err(_) => return Err(ErrorKind::NotFound)?,
|
||||
Err(_) => return Err(ErrorKind::NotFound.into()),
|
||||
}
|
||||
}
|
||||
if let Some(hash) = hash {
|
||||
|
@ -90,12 +90,10 @@ impl HeaderHandler {
|
|||
let oid = get_output_v2(&self.chain, &commit, false, false)?.1;
|
||||
match w(&self.chain)?.get_header_for_output(&oid) {
|
||||
Ok(header) => return Ok(header.hash()),
|
||||
Err(_) => return Err(ErrorKind::NotFound)?,
|
||||
Err(_) => return Err(ErrorKind::NotFound.into()),
|
||||
}
|
||||
}
|
||||
return Err(ErrorKind::Argument(
|
||||
"not a valid hash, height or output commit".to_owned(),
|
||||
))?;
|
||||
Err(ErrorKind::Argument("not a valid hash, height or output commit".to_owned()).into())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -145,7 +143,7 @@ impl BlockHandler {
|
|||
if let Ok(height) = input.parse() {
|
||||
match w(&self.chain)?.get_header_by_height(height) {
|
||||
Ok(header) => return Ok(header.hash()),
|
||||
Err(_) => return Err(ErrorKind::NotFound)?,
|
||||
Err(_) => return Err(ErrorKind::NotFound.into()),
|
||||
}
|
||||
}
|
||||
check_block_param(&input)?;
|
||||
|
@ -164,7 +162,7 @@ impl BlockHandler {
|
|||
if let Some(height) = height {
|
||||
match w(&self.chain)?.get_header_by_height(height) {
|
||||
Ok(header) => return Ok(header.hash()),
|
||||
Err(_) => return Err(ErrorKind::NotFound)?,
|
||||
Err(_) => return Err(ErrorKind::NotFound.into()),
|
||||
}
|
||||
}
|
||||
if let Some(hash) = hash {
|
||||
|
@ -174,23 +172,19 @@ impl BlockHandler {
|
|||
let oid = get_output_v2(&self.chain, &commit, false, false)?.1;
|
||||
match w(&self.chain)?.get_header_for_output(&oid) {
|
||||
Ok(header) => return Ok(header.hash()),
|
||||
Err(_) => return Err(ErrorKind::NotFound)?,
|
||||
Err(_) => return Err(ErrorKind::NotFound.into()),
|
||||
}
|
||||
}
|
||||
return Err(ErrorKind::Argument(
|
||||
"not a valid hash, height or output commit".to_owned(),
|
||||
))?;
|
||||
Err(ErrorKind::Argument("not a valid hash, height or output commit".to_owned()).into())
|
||||
}
|
||||
}
|
||||
|
||||
fn check_block_param(input: &String) -> Result<(), Error> {
|
||||
fn check_block_param(input: &str) -> Result<(), Error> {
|
||||
lazy_static! {
|
||||
static ref RE: Regex = Regex::new(r"[0-9a-fA-F]{64}").unwrap();
|
||||
}
|
||||
if !RE.is_match(&input) {
|
||||
return Err(ErrorKind::Argument(
|
||||
"Not a valid hash or height.".to_owned(),
|
||||
))?;
|
||||
return Err(ErrorKind::Argument("Not a valid hash or height.".to_owned()).into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ impl OutputHandler {
|
|||
outputs = [&outputs[..], &block_output_batch[..]].concat();
|
||||
}
|
||||
}
|
||||
return Ok(outputs);
|
||||
Ok(outputs)
|
||||
}
|
||||
|
||||
// allows traversal of utxo set
|
||||
|
@ -327,7 +327,7 @@ impl OutputHandler {
|
|||
let mut return_vec = vec![];
|
||||
for i in (start_height..=end_height).rev() {
|
||||
if let Ok(res) = self.outputs_at_height(i, commitments.clone(), include_rp) {
|
||||
if res.outputs.len() > 0 {
|
||||
if !res.outputs.is_empty() {
|
||||
return_vec.push(res);
|
||||
}
|
||||
}
|
||||
|
@ -359,7 +359,7 @@ impl OutputHandler {
|
|||
include_rproof,
|
||||
include_merkle_proof,
|
||||
) {
|
||||
if res.len() > 0 {
|
||||
if !res.is_empty() {
|
||||
return_vec = [&return_vec[..], &res[..]].concat();
|
||||
}
|
||||
}
|
||||
|
@ -394,7 +394,7 @@ impl KernelHandler {
|
|||
.trim_end_matches('/')
|
||||
.rsplit('/')
|
||||
.next()
|
||||
.ok_or(ErrorKind::RequestError("missing excess".into()))?;
|
||||
.ok_or_else(|| ErrorKind::RequestError("missing excess".into()))?;
|
||||
let excess = util::from_hex(excess.to_owned())
|
||||
.map_err(|_| ErrorKind::RequestError("invalid excess hex".into()))?;
|
||||
if excess.len() != 33 {
|
||||
|
@ -447,7 +447,7 @@ impl KernelHandler {
|
|||
min_height: Option<u64>,
|
||||
max_height: Option<u64>,
|
||||
) -> Result<LocatedTxKernel, Error> {
|
||||
let excess = util::from_hex(excess.to_owned())
|
||||
let excess = util::from_hex(excess)
|
||||
.map_err(|_| ErrorKind::RequestError("invalid excess hex".into()))?;
|
||||
if excess.len() != 33 {
|
||||
return Err(ErrorKind::RequestError("invalid excess length".into()).into());
|
||||
|
|
|
@ -84,10 +84,10 @@ impl PoolHandler {
|
|||
.blockchain
|
||||
.chain_head()
|
||||
.context(ErrorKind::Internal("Failed to get chain head".to_owned()))?;
|
||||
let res = tx_pool
|
||||
tx_pool
|
||||
.add_to_pool(source, tx, !fluff.unwrap_or(false), &header)
|
||||
.context(ErrorKind::Internal("Failed to update pool".to_owned()))?;
|
||||
Ok(res)
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
/// Dummy wrapper for the hex-encoded serialized transaction.
|
||||
|
@ -141,10 +141,10 @@ impl PoolPushHandler {
|
|||
.blockchain
|
||||
.chain_head()
|
||||
.context(ErrorKind::Internal("Failed to get chain head".to_owned()))?;
|
||||
let res = tx_pool
|
||||
tx_pool
|
||||
.add_to_pool(source, tx, !fluff, &header)
|
||||
.context(ErrorKind::Internal("Failed to update pool".to_owned()))?;
|
||||
Ok(res)
|
||||
Ok(())
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ impl Handler for KernelDownloadHandler {
|
|||
} else {
|
||||
response(
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
format!("requesting kernel data from peer failed (no peers)"),
|
||||
"requesting kernel data from peer failed (no peers)".to_string(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ pub fn get_output(
|
|||
}
|
||||
}
|
||||
}
|
||||
Err(ErrorKind::NotFound)?
|
||||
Err(ErrorKind::NotFound.into())
|
||||
}
|
||||
|
||||
/// Retrieves an output from the chain given a commit id (a tiny bit iteratively)
|
||||
|
@ -102,10 +102,11 @@ pub fn get_output_v2(
|
|||
match res {
|
||||
Ok(output_pos) => match chain.get_unspent_output_at(output_pos.position) {
|
||||
Ok(output) => {
|
||||
let mut header = None;
|
||||
if include_merkle_proof && output.is_coinbase() {
|
||||
header = chain.get_header_by_height(output_pos.height).ok();
|
||||
}
|
||||
let header = if include_merkle_proof && output.is_coinbase() {
|
||||
chain.get_header_by_height(output_pos.height).ok()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
match OutputPrintable::from_output(
|
||||
&output,
|
||||
chain.clone(),
|
||||
|
@ -124,7 +125,7 @@ pub fn get_output_v2(
|
|||
}
|
||||
}
|
||||
}
|
||||
Err(_) => return Err(ErrorKind::NotFound)?,
|
||||
Err(_) => return Err(ErrorKind::NotFound.into()),
|
||||
},
|
||||
Err(e) => {
|
||||
trace!(
|
||||
|
@ -136,5 +137,5 @@ pub fn get_output_v2(
|
|||
}
|
||||
}
|
||||
}
|
||||
Err(ErrorKind::NotFound)?
|
||||
Err(ErrorKind::NotFound.into())
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ use crate::web::*;
|
|||
use hyper::{Body, Request};
|
||||
use std::sync::Weak;
|
||||
|
||||
const CRATE_VERSION: &'static str = env!("CARGO_PKG_VERSION");
|
||||
const CRATE_VERSION: &str = env!("CARGO_PKG_VERSION");
|
||||
|
||||
/// Version handler. Get running node API version
|
||||
/// GET /v1/version
|
||||
|
|
|
@ -137,9 +137,7 @@ impl TLSConfig {
|
|||
let keys = pemfile::pkcs8_private_keys(&mut reader)
|
||||
.map_err(|_| ErrorKind::Internal("failed to load private key".to_string()))?;
|
||||
if keys.len() != 1 {
|
||||
return Err(ErrorKind::Internal(
|
||||
"expected a single private key".to_string(),
|
||||
))?;
|
||||
return Err(ErrorKind::Internal("expected a single private key".to_string()).into());
|
||||
}
|
||||
Ok(keys[0].clone())
|
||||
}
|
||||
|
@ -193,7 +191,8 @@ impl ApiServer {
|
|||
if self.shutdown_sender.is_some() {
|
||||
return Err(ErrorKind::Internal(
|
||||
"Can't start HTTP API server, it's running already".to_string(),
|
||||
))?;
|
||||
)
|
||||
.into());
|
||||
}
|
||||
let (tx, _rx) = oneshot::channel::<()>();
|
||||
self.shutdown_sender = Some(tx);
|
||||
|
@ -222,7 +221,8 @@ impl ApiServer {
|
|||
if self.shutdown_sender.is_some() {
|
||||
return Err(ErrorKind::Internal(
|
||||
"Can't start HTTPS API server, it's running already".to_string(),
|
||||
))?;
|
||||
)
|
||||
.into());
|
||||
}
|
||||
|
||||
let tls_conf = conf.build_server_config()?;
|
||||
|
|
|
@ -70,16 +70,16 @@ pub trait Handler {
|
|||
req: Request<Body>,
|
||||
mut _handlers: Box<dyn Iterator<Item = HandlerObj>>,
|
||||
) -> ResponseFuture {
|
||||
match req.method() {
|
||||
&Method::GET => self.get(req),
|
||||
&Method::POST => self.post(req),
|
||||
&Method::PUT => self.put(req),
|
||||
&Method::DELETE => self.delete(req),
|
||||
&Method::PATCH => self.patch(req),
|
||||
&Method::OPTIONS => self.options(req),
|
||||
&Method::CONNECT => self.connect(req),
|
||||
&Method::TRACE => self.trace(req),
|
||||
&Method::HEAD => self.head(req),
|
||||
match *req.method() {
|
||||
Method::GET => self.get(req),
|
||||
Method::POST => self.post(req),
|
||||
Method::PUT => self.put(req),
|
||||
Method::DELETE => self.delete(req),
|
||||
Method::PATCH => self.patch(req),
|
||||
Method::OPTIONS => self.options(req),
|
||||
Method::CONNECT => self.connect(req),
|
||||
Method::TRACE => self.trace(req),
|
||||
Method::HEAD => self.head(req),
|
||||
_ => not_found(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -191,9 +191,7 @@ pub struct Output {
|
|||
impl Output {
|
||||
pub fn new(commit: &pedersen::Commitment, height: u64, mmr_index: u64) -> Output {
|
||||
Output {
|
||||
commit: PrintableCommitment {
|
||||
commit: commit.clone(),
|
||||
},
|
||||
commit: PrintableCommitment { commit: *commit },
|
||||
height: height,
|
||||
mmr_index: mmr_index,
|
||||
}
|
||||
|
@ -207,7 +205,7 @@ pub struct PrintableCommitment {
|
|||
|
||||
impl PrintableCommitment {
|
||||
pub fn commit(&self) -> pedersen::Commitment {
|
||||
self.commit.clone()
|
||||
self.commit
|
||||
}
|
||||
|
||||
pub fn to_vec(&self) -> Vec<u8> {
|
||||
|
@ -330,17 +328,17 @@ impl OutputPrintable {
|
|||
}
|
||||
|
||||
pub fn commit(&self) -> Result<pedersen::Commitment, ser::Error> {
|
||||
Ok(self.commit.clone())
|
||||
Ok(self.commit)
|
||||
}
|
||||
|
||||
pub fn range_proof(&self) -> Result<pedersen::RangeProof, ser::Error> {
|
||||
let proof_str = self
|
||||
.proof
|
||||
.clone()
|
||||
.ok_or_else(|| ser::Error::HexError(format!("output range_proof missing")))?;
|
||||
.ok_or_else(|| ser::Error::HexError("output range_proof missing".to_string()))?;
|
||||
|
||||
let p_vec = util::from_hex(proof_str)
|
||||
.map_err(|_| ser::Error::HexError(format!("invalid output range_proof")))?;
|
||||
.map_err(|_| ser::Error::HexError("invalid output range_proof".to_string()))?;
|
||||
let mut p_bytes = [0; util::secp::constants::MAX_PROOF_SIZE];
|
||||
for i in 0..p_bytes.len() {
|
||||
p_bytes[i] = p_vec[i];
|
||||
|
@ -481,7 +479,7 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
|
|||
}
|
||||
}
|
||||
|
||||
const FIELDS: &'static [&'static str] = &[
|
||||
const FIELDS: &[&str] = &[
|
||||
"output_type",
|
||||
"commit",
|
||||
"spent",
|
||||
|
@ -734,8 +732,7 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn serialize_output_printable() {
|
||||
let hex_output =
|
||||
"{\
|
||||
let hex_output = "{\
|
||||
\"output_type\":\"Coinbase\",\
|
||||
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
|
||||
\"spent\":false,\
|
||||
|
@ -752,8 +749,7 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn serialize_output() {
|
||||
let hex_commit =
|
||||
"{\
|
||||
let hex_commit = "{\
|
||||
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
|
||||
\"height\":0,\
|
||||
\"mmr_index\":0\
|
||||
|
|
|
@ -114,7 +114,7 @@ impl From<&str> for QueryParams {
|
|||
let params = form_urlencoded::parse(query_string.as_bytes())
|
||||
.into_owned()
|
||||
.fold(HashMap::new(), |mut hm, (k, v)| {
|
||||
hm.entry(k).or_insert(vec![]).push(v);
|
||||
hm.entry(k).or_insert_with(|| vec![]).push(v);
|
||||
hm
|
||||
});
|
||||
QueryParams { params }
|
||||
|
@ -152,7 +152,7 @@ macro_rules! must_get_query(
|
|||
($req: expr) =>(
|
||||
match $req.uri().query() {
|
||||
Some(q) => q,
|
||||
None => return Err(ErrorKind::RequestError("no query string".to_owned()))?,
|
||||
None => return Err(ErrorKind::RequestError("no query string".to_owned()).into()),
|
||||
}
|
||||
));
|
||||
|
||||
|
@ -163,7 +163,7 @@ macro_rules! parse_param(
|
|||
None => $default,
|
||||
Some(val) => match val.parse() {
|
||||
Ok(val) => val,
|
||||
Err(_) => return Err(ErrorKind::RequestError(format!("invalid value of parameter {}", $name)))?,
|
||||
Err(_) => return Err(ErrorKind::RequestError(format!("invalid value of parameter {}", $name)).into()),
|
||||
}
|
||||
}
|
||||
));
|
||||
|
|
|
@ -90,7 +90,7 @@ impl OrphanBlockPool {
|
|||
{
|
||||
let height_hashes = height_idx
|
||||
.entry(orphan.block.header.height)
|
||||
.or_insert(vec![]);
|
||||
.or_insert_with(|| vec![]);
|
||||
height_hashes.push(orphan.block.hash());
|
||||
orphans.insert(orphan.block.hash(), orphan);
|
||||
}
|
||||
|
@ -125,11 +125,11 @@ impl OrphanBlockPool {
|
|||
|
||||
/// Get an orphan from the pool indexed by the hash of its parent, removing
|
||||
/// it at the same time, preventing clone
|
||||
fn remove_by_height(&self, height: &u64) -> Option<Vec<Orphan>> {
|
||||
fn remove_by_height(&self, height: u64) -> Option<Vec<Orphan>> {
|
||||
let mut orphans = self.orphans.write();
|
||||
let mut height_idx = self.height_idx.write();
|
||||
height_idx
|
||||
.remove(height)
|
||||
.remove(&height)
|
||||
.map(|hs| hs.iter().filter_map(|h| orphans.remove(h)).collect())
|
||||
}
|
||||
|
||||
|
@ -452,7 +452,7 @@ impl Chain {
|
|||
let mut orphan_accepted = false;
|
||||
let mut height_accepted = height;
|
||||
|
||||
if let Some(orphans) = self.orphans.remove_by_height(&height) {
|
||||
if let Some(orphans) = self.orphans.remove_by_height(height) {
|
||||
let orphans_len = orphans.len();
|
||||
for (i, orphan) in orphans.into_iter().enumerate() {
|
||||
debug!(
|
||||
|
@ -1219,7 +1219,7 @@ impl Chain {
|
|||
pub fn try_header_head(&self, timeout: Duration) -> Result<Option<Tip>, Error> {
|
||||
self.header_pmmr
|
||||
.try_read_for(timeout)
|
||||
.map(|ref pmmr| self.read_header_head(pmmr).map(|x| Some(x)))
|
||||
.map(|ref pmmr| self.read_header_head(pmmr).map(Some))
|
||||
.unwrap_or(Ok(None))
|
||||
}
|
||||
|
||||
|
@ -1563,7 +1563,7 @@ fn setup_head(
|
|||
batch.save_block(&genesis)?;
|
||||
batch.save_body_head(&Tip::from_header(&genesis.header))?;
|
||||
|
||||
if genesis.kernels().len() > 0 {
|
||||
if !genesis.kernels().is_empty() {
|
||||
let (utxo_sum, kernel_sum) = (sums, genesis as &dyn Committed).verify_kernel_sums(
|
||||
genesis.header.overage(),
|
||||
genesis.header.total_kernel_offset(),
|
||||
|
@ -1582,7 +1582,7 @@ fn setup_head(
|
|||
|
||||
info!("init: saved genesis: {:?}", genesis.hash());
|
||||
}
|
||||
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
|
||||
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()).into()),
|
||||
};
|
||||
batch.commit()?;
|
||||
Ok(())
|
||||
|
|
|
@ -291,9 +291,7 @@ fn check_known_store(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result
|
|||
// Not yet processed this block, we can proceed.
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into());
|
||||
}
|
||||
Err(e) => Err(ErrorKind::StoreErr(e, "pipe get this block".to_owned()).into()),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -504,7 +502,7 @@ pub fn rewind_and_apply_header_fork(
|
|||
for h in fork_hashes {
|
||||
let header = batch
|
||||
.get_block_header(&h)
|
||||
.map_err(|e| ErrorKind::StoreErr(e, format!("getting forked headers")))?;
|
||||
.map_err(|e| ErrorKind::StoreErr(e, "getting forked headers".to_string()))?;
|
||||
ext.validate_root(&header)?;
|
||||
ext.apply_header(&header)?;
|
||||
}
|
||||
|
|
|
@ -28,14 +28,14 @@ use std::sync::Arc;
|
|||
|
||||
const STORE_SUBPATH: &str = "chain";
|
||||
|
||||
const BLOCK_HEADER_PREFIX: u8 = 'h' as u8;
|
||||
const BLOCK_PREFIX: u8 = 'b' as u8;
|
||||
const HEAD_PREFIX: u8 = 'H' as u8;
|
||||
const TAIL_PREFIX: u8 = 'T' as u8;
|
||||
const COMMIT_POS_PREFIX: u8 = 'c' as u8;
|
||||
const COMMIT_POS_HGT_PREFIX: u8 = 'p' as u8;
|
||||
const BLOCK_INPUT_BITMAP_PREFIX: u8 = 'B' as u8;
|
||||
const BLOCK_SUMS_PREFIX: u8 = 'M' as u8;
|
||||
const BLOCK_HEADER_PREFIX: u8 = b'h';
|
||||
const BLOCK_PREFIX: u8 = b'b';
|
||||
const HEAD_PREFIX: u8 = b'H';
|
||||
const TAIL_PREFIX: u8 = b'T';
|
||||
const COMMIT_POS_PREFIX: u8 = b'c';
|
||||
const COMMIT_POS_HGT_PREFIX: u8 = b'p';
|
||||
const BLOCK_INPUT_BITMAP_PREFIX: u8 = b'B';
|
||||
const BLOCK_SUMS_PREFIX: u8 = b'M';
|
||||
|
||||
/// All chain-related database operations
|
||||
pub struct ChainStore {
|
||||
|
@ -45,7 +45,7 @@ pub struct ChainStore {
|
|||
impl ChainStore {
|
||||
/// Create new chain store
|
||||
pub fn new(db_root: &str) -> Result<ChainStore, Error> {
|
||||
let db = store::Store::new(db_root, None, Some(STORE_SUBPATH.clone()), None)?;
|
||||
let db = store::Store::new(db_root, None, Some(STORE_SUBPATH), None)?;
|
||||
Ok(ChainStore { db })
|
||||
}
|
||||
|
||||
|
@ -64,12 +64,12 @@ impl ChainStore {
|
|||
impl ChainStore {
|
||||
/// The current chain head.
|
||||
pub fn head(&self) -> Result<Tip, Error> {
|
||||
option_to_not_found(self.db.get_ser(&vec![HEAD_PREFIX]), || "HEAD".to_owned())
|
||||
option_to_not_found(self.db.get_ser(&[HEAD_PREFIX]), || "HEAD".to_owned())
|
||||
}
|
||||
|
||||
/// The current chain "tail" (earliest block in the store).
|
||||
pub fn tail(&self) -> Result<Tip, Error> {
|
||||
option_to_not_found(self.db.get_ser(&vec![TAIL_PREFIX]), || "TAIL".to_owned())
|
||||
option_to_not_found(self.db.get_ser(&[TAIL_PREFIX]), || "TAIL".to_owned())
|
||||
}
|
||||
|
||||
/// Header of the block at the head of the block chain (not the same thing as header_head).
|
||||
|
@ -169,12 +169,12 @@ pub struct Batch<'a> {
|
|||
impl<'a> Batch<'a> {
|
||||
/// The head.
|
||||
pub fn head(&self) -> Result<Tip, Error> {
|
||||
option_to_not_found(self.db.get_ser(&vec![HEAD_PREFIX]), || "HEAD".to_owned())
|
||||
option_to_not_found(self.db.get_ser(&[HEAD_PREFIX]), || "HEAD".to_owned())
|
||||
}
|
||||
|
||||
/// The tail.
|
||||
pub fn tail(&self) -> Result<Tip, Error> {
|
||||
option_to_not_found(self.db.get_ser(&vec![TAIL_PREFIX]), || "TAIL".to_owned())
|
||||
option_to_not_found(self.db.get_ser(&[TAIL_PREFIX]), || "TAIL".to_owned())
|
||||
}
|
||||
|
||||
/// Header of the block at the head of the block chain (not the same thing as header_head).
|
||||
|
@ -184,12 +184,12 @@ impl<'a> Batch<'a> {
|
|||
|
||||
/// Save body head to db.
|
||||
pub fn save_body_head(&self, t: &Tip) -> Result<(), Error> {
|
||||
self.db.put_ser(&vec![HEAD_PREFIX], t)
|
||||
self.db.put_ser(&[HEAD_PREFIX], t)
|
||||
}
|
||||
|
||||
/// Save body "tail" to db.
|
||||
pub fn save_body_tail(&self, t: &Tip) -> Result<(), Error> {
|
||||
self.db.put_ser(&vec![TAIL_PREFIX], t)
|
||||
self.db.put_ser(&[TAIL_PREFIX], t)
|
||||
}
|
||||
|
||||
/// get block
|
||||
|
@ -478,13 +478,11 @@ impl<'a> Iterator for DifficultyIter<'a> {
|
|||
self.header = if self.header.is_none() {
|
||||
if let Some(ref batch) = self.batch {
|
||||
batch.get_block_header(&self.start).ok()
|
||||
} else {
|
||||
if let Some(ref store) = self.store {
|
||||
} else if let Some(ref store) = self.store {
|
||||
store.get_block_header(&self.start).ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.prev_header.clone()
|
||||
};
|
||||
|
@ -494,13 +492,11 @@ impl<'a> Iterator for DifficultyIter<'a> {
|
|||
if let Some(header) = self.header.clone() {
|
||||
if let Some(ref batch) = self.batch {
|
||||
self.prev_header = batch.get_previous_header(&header).ok();
|
||||
} else {
|
||||
if let Some(ref store) = self.store {
|
||||
} else if let Some(ref store) = self.store {
|
||||
self.prev_header = store.get_previous_header(&header).ok();
|
||||
} else {
|
||||
self.prev_header = None;
|
||||
}
|
||||
}
|
||||
|
||||
let prev_difficulty = self
|
||||
.prev_header
|
||||
|
@ -517,7 +513,7 @@ impl<'a> Iterator for DifficultyIter<'a> {
|
|||
header.pow.is_secondary(),
|
||||
))
|
||||
} else {
|
||||
return None;
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1423,7 +1423,7 @@ pub fn zip_read(root_dir: String, header: &BlockHeader) -> Result<File, Error> {
|
|||
// But practically, these zip files are not small ones, we just keep the zips in last 24 hours
|
||||
let data_dir = Path::new(&root_dir);
|
||||
let pattern = format!("{}_", TXHASHSET_ZIP);
|
||||
if let Ok(n) = clean_files_by_prefix(data_dir.clone(), &pattern, 24 * 60 * 60) {
|
||||
if let Ok(n) = clean_files_by_prefix(data_dir, &pattern, 24 * 60 * 60) {
|
||||
debug!(
|
||||
"{} zip files have been clean up in folder: {:?}",
|
||||
n, data_dir
|
||||
|
|
|
@ -114,7 +114,7 @@ impl<'a> UTXOView<'a> {
|
|||
/// that have not sufficiently matured.
|
||||
pub fn verify_coinbase_maturity(
|
||||
&self,
|
||||
inputs: &Vec<Input>,
|
||||
inputs: &[Input],
|
||||
height: u64,
|
||||
batch: &Batch<'_>,
|
||||
) -> Result<(), Error> {
|
||||
|
|
|
@ -28,13 +28,13 @@ bitflags! {
|
|||
/// Options for block validation
|
||||
pub struct Options: u32 {
|
||||
/// No flags
|
||||
const NONE = 0b00000000;
|
||||
const NONE = 0b0000_0000;
|
||||
/// Runs without checking the Proof of Work, mostly to make testing easier.
|
||||
const SKIP_POW = 0b00000001;
|
||||
const SKIP_POW = 0b0000_0001;
|
||||
/// Adds block while in syncing mode.
|
||||
const SYNC = 0b00000010;
|
||||
const SYNC = 0b0000_0010;
|
||||
/// Block validation on a block we mined ourselves
|
||||
const MINE = 0b00000100;
|
||||
const MINE = 0b0000_0100;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -212,11 +212,10 @@ impl TxHashSetRoots {
|
|||
self.output_roots.merged_root(header),
|
||||
);
|
||||
|
||||
if header.output_root != self.output_root(header) {
|
||||
Err(ErrorKind::InvalidRoot.into())
|
||||
} else if header.range_proof_root != self.rproof_root {
|
||||
Err(ErrorKind::InvalidRoot.into())
|
||||
} else if header.kernel_root != self.kernel_root {
|
||||
if header.output_root != self.output_root(header)
|
||||
|| header.range_proof_root != self.rproof_root
|
||||
|| header.kernel_root != self.kernel_root
|
||||
{
|
||||
Err(ErrorKind::InvalidRoot.into())
|
||||
} else {
|
||||
Ok(())
|
||||
|
|
|
@ -438,7 +438,7 @@ impl TxKernel {
|
|||
}
|
||||
|
||||
/// Batch signature verification.
|
||||
pub fn batch_sig_verify(tx_kernels: &Vec<TxKernel>) -> Result<(), Error> {
|
||||
pub fn batch_sig_verify(tx_kernels: &[TxKernel]) -> Result<(), Error> {
|
||||
let len = tx_kernels.len();
|
||||
let mut sigs: Vec<secp::Signature> = Vec::with_capacity(len);
|
||||
let mut pubkeys: Vec<secp::key::PublicKey> = Vec::with_capacity(len);
|
||||
|
@ -629,10 +629,9 @@ impl TransactionBody {
|
|||
/// inputs, if any, are kept intact.
|
||||
/// Sort order is maintained.
|
||||
pub fn with_input(mut self, input: Input) -> TransactionBody {
|
||||
self.inputs
|
||||
.binary_search(&input)
|
||||
.err()
|
||||
.map(|e| self.inputs.insert(e, input));
|
||||
if let Err(e) = self.inputs.binary_search(&input) {
|
||||
self.inputs.insert(e, input)
|
||||
};
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -640,10 +639,9 @@ impl TransactionBody {
|
|||
/// outputs, if any, are kept intact.
|
||||
/// Sort order is maintained.
|
||||
pub fn with_output(mut self, output: Output) -> TransactionBody {
|
||||
self.outputs
|
||||
.binary_search(&output)
|
||||
.err()
|
||||
.map(|e| self.outputs.insert(e, output));
|
||||
if let Err(e) = self.outputs.binary_search(&output) {
|
||||
self.outputs.insert(e, output)
|
||||
};
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -651,10 +649,9 @@ impl TransactionBody {
|
|||
/// kernels, if any, are kept intact.
|
||||
/// Sort order is maintained.
|
||||
pub fn with_kernel(mut self, kernel: TxKernel) -> TransactionBody {
|
||||
self.kernels
|
||||
.binary_search(&kernel)
|
||||
.err()
|
||||
.map(|e| self.kernels.insert(e, kernel));
|
||||
if let Err(e) = self.kernels.binary_search(&kernel) {
|
||||
self.kernels.insert(e, kernel)
|
||||
};
|
||||
self
|
||||
}
|
||||
|
||||
|
@ -1441,13 +1438,13 @@ impl PMMRable for Output {
|
|||
|
||||
impl OutputFeatures {
|
||||
/// Is this a coinbase output?
|
||||
pub fn is_coinbase(&self) -> bool {
|
||||
*self == OutputFeatures::Coinbase
|
||||
pub fn is_coinbase(self) -> bool {
|
||||
self == OutputFeatures::Coinbase
|
||||
}
|
||||
|
||||
/// Is this a plain output?
|
||||
pub fn is_plain(&self) -> bool {
|
||||
*self == OutputFeatures::Plain
|
||||
pub fn is_plain(self) -> bool {
|
||||
self == OutputFeatures::Plain
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1481,13 +1478,10 @@ impl Output {
|
|||
}
|
||||
|
||||
/// Batch validates the range proofs using the commitments
|
||||
pub fn batch_verify_proofs(
|
||||
commits: &Vec<Commitment>,
|
||||
proofs: &Vec<RangeProof>,
|
||||
) -> Result<(), Error> {
|
||||
pub fn batch_verify_proofs(commits: &[Commitment], proofs: &[RangeProof]) -> Result<(), Error> {
|
||||
let secp = static_secp_instance();
|
||||
secp.lock()
|
||||
.verify_bullet_proof_multi(commits.clone(), proofs.clone(), None)?;
|
||||
.verify_bullet_proof_multi(commits.to_vec(), proofs.to_vec(), None)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -192,9 +192,7 @@ pub fn verify_partial_sig(
|
|||
pubkey_sum,
|
||||
true,
|
||||
) {
|
||||
Err(ErrorKind::Signature(
|
||||
"Signature validation error".to_string(),
|
||||
))?
|
||||
return Err(ErrorKind::Signature("Signature validation error".to_string()).into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -332,9 +330,7 @@ pub fn verify_single_from_commit(
|
|||
) -> Result<(), Error> {
|
||||
let pubkey = commit.to_pubkey(secp)?;
|
||||
if !verify_single(secp, sig, msg, None, &pubkey, Some(&pubkey), false) {
|
||||
Err(ErrorKind::Signature(
|
||||
"Signature validation error".to_string(),
|
||||
))?
|
||||
return Err(ErrorKind::Signature("Signature validation error".to_string()).into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -402,9 +398,7 @@ pub fn verify_completed_sig(
|
|||
msg: &secp::Message,
|
||||
) -> Result<(), Error> {
|
||||
if !verify_single(secp, sig, msg, None, pubkey, pubkey_sum, true) {
|
||||
Err(ErrorKind::Signature(
|
||||
"Signature validation error".to_string(),
|
||||
))?
|
||||
return Err(ErrorKind::Signature("Signature validation error".to_string()).into());
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -195,9 +195,7 @@ where
|
|||
let mut msg = [0; 20];
|
||||
msg[2] = switch as u8;
|
||||
let id_bytes = id.to_bytes();
|
||||
for i in 0..17 {
|
||||
msg[i + 3] = id_bytes[i];
|
||||
}
|
||||
msg[3..20].clone_from_slice(&id_bytes[..17]);
|
||||
Ok(ProofMessage::from_bytes(&msg))
|
||||
}
|
||||
|
||||
|
@ -307,9 +305,7 @@ where
|
|||
) -> Result<ProofMessage, Error> {
|
||||
let mut msg = [0; 20];
|
||||
let id_ser = id.serialize_path();
|
||||
for i in 0..16 {
|
||||
msg[i + 4] = id_ser[i];
|
||||
}
|
||||
msg[4..20].clone_from_slice(&id_ser[..16]);
|
||||
Ok(ProofMessage::from_bytes(&msg))
|
||||
}
|
||||
|
||||
|
|
|
@ -81,13 +81,13 @@ pub mod option_sig_serde {
|
|||
let static_secp = static_secp_instance();
|
||||
let static_secp = static_secp.lock();
|
||||
Option::<String>::deserialize(deserializer).and_then(|res| match res {
|
||||
Some(string) => from_hex(string.to_string())
|
||||
Some(string) => from_hex(string)
|
||||
.map_err(|err| Error::custom(err.to_string()))
|
||||
.and_then(|bytes: Vec<u8>| {
|
||||
let mut b = [0u8; 64];
|
||||
b.copy_from_slice(&bytes[0..64]);
|
||||
secp::Signature::from_compact(&static_secp, &b)
|
||||
.map(|val| Some(val))
|
||||
.map(Some)
|
||||
.map_err(|err| Error::custom(err.to_string()))
|
||||
}),
|
||||
None => Ok(None),
|
||||
|
@ -123,13 +123,13 @@ pub mod option_seckey_serde {
|
|||
let static_secp = static_secp_instance();
|
||||
let static_secp = static_secp.lock();
|
||||
Option::<String>::deserialize(deserializer).and_then(|res| match res {
|
||||
Some(string) => from_hex(string.to_string())
|
||||
Some(string) => from_hex(string)
|
||||
.map_err(|err| Error::custom(err.to_string()))
|
||||
.and_then(|bytes: Vec<u8>| {
|
||||
let mut b = [0u8; 32];
|
||||
b.copy_from_slice(&bytes[0..32]);
|
||||
secp::key::SecretKey::from_slice(&static_secp, &b)
|
||||
.map(|val| Some(val))
|
||||
.map(Some)
|
||||
.map_err(|err| Error::custom(err.to_string()))
|
||||
}),
|
||||
None => Ok(None),
|
||||
|
@ -195,7 +195,7 @@ pub mod option_commitment_serde {
|
|||
D: Deserializer<'de>,
|
||||
{
|
||||
Option::<String>::deserialize(deserializer).and_then(|res| match res {
|
||||
Some(string) => from_hex(string.to_string())
|
||||
Some(string) => from_hex(string)
|
||||
.map_err(|err| Error::custom(err.to_string()))
|
||||
.and_then(|bytes: Vec<u8>| Ok(Some(Commitment::from_vec(bytes.to_vec())))),
|
||||
None => Ok(None),
|
||||
|
|
|
@ -70,7 +70,7 @@ where
|
|||
|
||||
fn verify(&self, proof: &Proof) -> Result<(), Error> {
|
||||
if proof.proof_size() != global::proofsize() {
|
||||
return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into());
|
||||
}
|
||||
let nonces = &proof.nonces;
|
||||
let mut uvs = vec![0u64; 2 * proof.proof_size()];
|
||||
|
@ -79,10 +79,10 @@ where
|
|||
|
||||
for n in 0..proof.proof_size() {
|
||||
if nonces[n] > to_u64!(self.params.edge_mask) {
|
||||
return Err(ErrorKind::Verification("edge too big".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edge too big".to_owned()).into());
|
||||
}
|
||||
if n > 0 && nonces[n] <= nonces[n - 1] {
|
||||
return Err(ErrorKind::Verification("edges not ascending".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edges not ascending".to_owned()).into());
|
||||
}
|
||||
// 21 is standard siphash rotation constant
|
||||
let edge = to_edge!(
|
||||
|
@ -95,9 +95,7 @@ where
|
|||
xor1 ^= uvs[2 * n + 1];
|
||||
}
|
||||
if xor0 | xor1 != 0 {
|
||||
return Err(ErrorKind::Verification(
|
||||
"endpoints don't match up".to_owned(),
|
||||
))?;
|
||||
return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into());
|
||||
}
|
||||
let mut n = 0;
|
||||
let mut i = 0;
|
||||
|
@ -114,13 +112,13 @@ where
|
|||
if uvs[k] == uvs[i] {
|
||||
// find other edge endpoint matching one at i
|
||||
if j != i {
|
||||
return Err(ErrorKind::Verification("branch in cycle".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("branch in cycle".to_owned()).into());
|
||||
}
|
||||
j = k;
|
||||
}
|
||||
}
|
||||
if j == i {
|
||||
return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into());
|
||||
}
|
||||
i = j ^ 1;
|
||||
n += 1;
|
||||
|
@ -131,7 +129,7 @@ where
|
|||
if n == self.params.proof_size {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ErrorKind::Verification("cycle too short".to_owned()))?
|
||||
Err(ErrorKind::Verification("cycle too short".to_owned()).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ where
|
|||
|
||||
fn verify(&self, proof: &Proof) -> Result<(), Error> {
|
||||
if proof.proof_size() != global::proofsize() {
|
||||
return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into());
|
||||
}
|
||||
let nonces = &proof.nonces;
|
||||
let mut uvs = vec![0u64; 2 * proof.proof_size()];
|
||||
|
@ -81,13 +81,13 @@ where
|
|||
for n in 0..proof.proof_size() {
|
||||
let dir = (nonces[n] & 1) as usize;
|
||||
if ndir[dir] >= proof.proof_size() / 2 {
|
||||
return Err(ErrorKind::Verification("edges not balanced".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edges not balanced".to_owned()).into());
|
||||
}
|
||||
if nonces[n] > to_u64!(self.params.edge_mask) {
|
||||
return Err(ErrorKind::Verification("edge too big".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edge too big".to_owned()).into());
|
||||
}
|
||||
if n > 0 && nonces[n] <= nonces[n - 1] {
|
||||
return Err(ErrorKind::Verification("edges not ascending".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edges not ascending".to_owned()).into());
|
||||
}
|
||||
let edge = to_edge!(
|
||||
T,
|
||||
|
@ -101,9 +101,7 @@ where
|
|||
ndir[dir] += 1;
|
||||
}
|
||||
if xor0 | xor1 != 0 {
|
||||
return Err(ErrorKind::Verification(
|
||||
"endpoints don't match up".to_owned(),
|
||||
))?;
|
||||
return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into());
|
||||
}
|
||||
let mut n = 0;
|
||||
let mut i = 0;
|
||||
|
@ -115,13 +113,13 @@ where
|
|||
if uvs[k] == uvs[i] {
|
||||
// find reverse edge endpoint identical to one at i
|
||||
if j != i {
|
||||
return Err(ErrorKind::Verification("branch in cycle".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("branch in cycle".to_owned()).into());
|
||||
}
|
||||
j = k;
|
||||
}
|
||||
}
|
||||
if j == i {
|
||||
return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into());
|
||||
}
|
||||
i = j ^ 1;
|
||||
n += 1;
|
||||
|
@ -132,7 +130,7 @@ where
|
|||
if n == self.params.proof_size {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ErrorKind::Verification("cycle too short".to_owned()))?
|
||||
Err(ErrorKind::Verification("cycle too short".to_owned()).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ where
|
|||
fn verify(&self, proof: &Proof) -> Result<(), Error> {
|
||||
let proofsize = proof.proof_size();
|
||||
if proofsize != global::proofsize() {
|
||||
return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into());
|
||||
}
|
||||
let nonces = &proof.nonces;
|
||||
let mut from = vec![0u32; proofsize];
|
||||
|
@ -80,10 +80,10 @@ where
|
|||
|
||||
for n in 0..proofsize {
|
||||
if nonces[n] > to_u64!(self.params.edge_mask) {
|
||||
return Err(ErrorKind::Verification("edge too big".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edge too big".to_owned()).into());
|
||||
}
|
||||
if n > 0 && nonces[n] <= nonces[n - 1] {
|
||||
return Err(ErrorKind::Verification("edges not ascending".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edges not ascending".to_owned()).into());
|
||||
}
|
||||
let edge = to_edge!(
|
||||
T,
|
||||
|
@ -95,9 +95,7 @@ where
|
|||
xor_to ^= to[n];
|
||||
}
|
||||
if xor_from != xor_to {
|
||||
return Err(ErrorKind::Verification(
|
||||
"endpoints don't match up".to_owned(),
|
||||
))?;
|
||||
return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into());
|
||||
}
|
||||
let mut visited = vec![false; proofsize];
|
||||
let mut n = 0;
|
||||
|
@ -105,14 +103,14 @@ where
|
|||
loop {
|
||||
// follow cycle
|
||||
if visited[i] {
|
||||
return Err(ErrorKind::Verification("branch in cycle".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("branch in cycle".to_owned()).into());
|
||||
}
|
||||
visited[i] = true;
|
||||
let mut nexti = 0;
|
||||
while from[nexti] != to[i] {
|
||||
nexti += 1;
|
||||
if nexti == proofsize {
|
||||
return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into());
|
||||
}
|
||||
}
|
||||
i = nexti;
|
||||
|
@ -125,7 +123,7 @@ where
|
|||
if n == proofsize {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ErrorKind::Verification("cycle too short".to_owned()))?
|
||||
Err(ErrorKind::Verification("cycle too short".to_owned()).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -52,7 +52,7 @@ where
|
|||
/// Create a new graph with given parameters
|
||||
pub fn new(max_edges: T, max_sols: u32, proof_size: usize) -> Result<Graph<T>, Error> {
|
||||
if to_u64!(max_edges) >= u64::max_value() / 2 {
|
||||
return Err(ErrorKind::Verification(format!("graph is to big to build")))?;
|
||||
return Err(ErrorKind::Verification("graph is to big to build".to_string()).into());
|
||||
}
|
||||
let max_nodes = 2 * to_u64!(max_edges);
|
||||
Ok(Graph {
|
||||
|
@ -88,7 +88,7 @@ where
|
|||
pub fn add_edge(&mut self, u: T, mut v: T) -> Result<(), Error> {
|
||||
let max_nodes_t = to_edge!(T, self.max_nodes);
|
||||
if u >= max_nodes_t || v >= max_nodes_t {
|
||||
return Err(ErrorKind::EdgeAddition)?;
|
||||
return Err(ErrorKind::EdgeAddition.into());
|
||||
}
|
||||
v = v + to_edge!(T, self.max_nodes);
|
||||
let adj_u = self.adj_list[to_usize!(u ^ T::one())];
|
||||
|
@ -101,7 +101,7 @@ where
|
|||
let ulink = self.links.len();
|
||||
let vlink = self.links.len() + 1;
|
||||
if to_edge!(T, vlink) == self.nil {
|
||||
return Err(ErrorKind::EdgeAddition)?;
|
||||
return Err(ErrorKind::EdgeAddition.into());
|
||||
}
|
||||
self.links.push(Link {
|
||||
next: self.adj_list[to_usize!(u)],
|
||||
|
@ -272,7 +272,7 @@ where
|
|||
self.verify_impl(&s)?;
|
||||
}
|
||||
if self.graph.solutions.is_empty() {
|
||||
Err(ErrorKind::NoSolution)?
|
||||
Err(ErrorKind::NoSolution.into())
|
||||
} else {
|
||||
Ok(self.graph.solutions.clone())
|
||||
}
|
||||
|
@ -282,7 +282,7 @@ where
|
|||
/// graph
|
||||
pub fn verify_impl(&self, proof: &Proof) -> Result<(), Error> {
|
||||
if proof.proof_size() != global::proofsize() {
|
||||
return Err(ErrorKind::Verification("wrong cycle length".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("wrong cycle length".to_owned()).into());
|
||||
}
|
||||
let nonces = &proof.nonces;
|
||||
let mut uvs = vec![0u64; 2 * proof.proof_size()];
|
||||
|
@ -291,10 +291,10 @@ where
|
|||
|
||||
for n in 0..proof.proof_size() {
|
||||
if nonces[n] > to_u64!(self.params.edge_mask) {
|
||||
return Err(ErrorKind::Verification("edge too big".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edge too big".to_owned()).into());
|
||||
}
|
||||
if n > 0 && nonces[n] <= nonces[n - 1] {
|
||||
return Err(ErrorKind::Verification("edges not ascending".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("edges not ascending".to_owned()).into());
|
||||
}
|
||||
uvs[2 * n] = to_u64!(self.sipnode(to_edge!(T, nonces[n]), 0)?);
|
||||
uvs[2 * n + 1] = to_u64!(self.sipnode(to_edge!(T, nonces[n]), 1)?);
|
||||
|
@ -302,9 +302,7 @@ where
|
|||
xor1 ^= uvs[2 * n + 1];
|
||||
}
|
||||
if xor0 | xor1 != 0 {
|
||||
return Err(ErrorKind::Verification(
|
||||
"endpoints don't match up".to_owned(),
|
||||
))?;
|
||||
return Err(ErrorKind::Verification("endpoints don't match up".to_owned()).into());
|
||||
}
|
||||
let mut n = 0;
|
||||
let mut i = 0;
|
||||
|
@ -321,13 +319,13 @@ where
|
|||
if uvs[k] >> 1 == uvs[i] >> 1 {
|
||||
// find other edge endpoint matching one at i
|
||||
if j != i {
|
||||
return Err(ErrorKind::Verification("branch in cycle".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("branch in cycle".to_owned()).into());
|
||||
}
|
||||
j = k;
|
||||
}
|
||||
}
|
||||
if j == i || uvs[j] == uvs[i] {
|
||||
return Err(ErrorKind::Verification("cycle dead ends".to_owned()))?;
|
||||
return Err(ErrorKind::Verification("cycle dead ends".to_owned()).into());
|
||||
}
|
||||
i = j ^ 1;
|
||||
n += 1;
|
||||
|
@ -338,7 +336,7 @@ where
|
|||
if n == self.params.proof_size {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ErrorKind::Verification("cycle too short".to_owned()))?
|
||||
Err(ErrorKind::Verification("cycle too short".to_owned()).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -292,7 +292,7 @@ impl ProtocolVersion {
|
|||
pub const MAX: u32 = std::u32::MAX;
|
||||
|
||||
/// Protocol version as u32 to allow for convenient exhaustive matching on values.
|
||||
pub fn value(&self) -> u32 {
|
||||
pub fn value(self) -> u32 {
|
||||
self.0
|
||||
}
|
||||
|
||||
|
@ -608,7 +608,7 @@ impl PMMRable for RangeProof {
|
|||
type E = Self;
|
||||
|
||||
fn as_elmt(&self) -> Self::E {
|
||||
self.clone()
|
||||
*self
|
||||
}
|
||||
|
||||
// Size is length prefix (8 bytes for u64) + MAX_PROOF_SIZE.
|
||||
|
@ -1255,7 +1255,7 @@ where
|
|||
}
|
||||
}
|
||||
}
|
||||
const VARIANTS: &'static [&str] = &[
|
||||
const VARIANTS: &[&str] = &[
|
||||
"NotFound",
|
||||
"PermissionDenied",
|
||||
"ConnectionRefused",
|
||||
|
|
|
@ -105,7 +105,7 @@ impl error::Error for Error {
|
|||
}
|
||||
}
|
||||
|
||||
static BASE58_CHARS: &'static [u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
|
||||
static BASE58_CHARS: &[u8] = b"123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz";
|
||||
|
||||
static BASE58_DIGITS: [Option<u8>; 128] = [
|
||||
None,
|
||||
|
|
|
@ -231,15 +231,15 @@ impl ChildNumber {
|
|||
/// Returns `true` if the child number is a [`Normal`] value.
|
||||
///
|
||||
/// [`Normal`]: #variant.Normal
|
||||
pub fn is_normal(&self) -> bool {
|
||||
pub fn is_normal(self) -> bool {
|
||||
!self.is_hardened()
|
||||
}
|
||||
|
||||
/// Returns `true` if the child number is a [`Hardened`] value.
|
||||
///
|
||||
/// [`Hardened`]: #variant.Hardened
|
||||
pub fn is_hardened(&self) -> bool {
|
||||
match *self {
|
||||
pub fn is_hardened(self) -> bool {
|
||||
match self {
|
||||
ChildNumber::Hardened { .. } => true,
|
||||
ChildNumber::Normal { .. } => false,
|
||||
}
|
||||
|
@ -544,7 +544,7 @@ impl ExtendedPubKey {
|
|||
H: BIP32Hasher,
|
||||
{
|
||||
let (sk, chain_code) = self.ckd_pub_tweak(secp, hasher, i)?;
|
||||
let mut pk = self.public_key.clone();
|
||||
let mut pk = self.public_key;
|
||||
pk.add_exp_assign(secp, &sk).map_err(Error::Ecdsa)?;
|
||||
|
||||
Ok(ExtendedPubKey {
|
||||
|
|
|
@ -152,9 +152,7 @@ impl Identifier {
|
|||
pub fn from_serialized_path(len: u8, p: &[u8]) -> Identifier {
|
||||
let mut id = [0; IDENTIFIER_SIZE];
|
||||
id[0] = len;
|
||||
for i in 1..IDENTIFIER_SIZE {
|
||||
id[i] = p[i - 1];
|
||||
}
|
||||
id[1..IDENTIFIER_SIZE].clone_from_slice(&p[0..(IDENTIFIER_SIZE - 1)]);
|
||||
Identifier(id)
|
||||
}
|
||||
|
||||
|
@ -169,9 +167,8 @@ impl Identifier {
|
|||
}
|
||||
pub fn from_bytes(bytes: &[u8]) -> Identifier {
|
||||
let mut identifier = [0; IDENTIFIER_SIZE];
|
||||
for i in 0..min(IDENTIFIER_SIZE, bytes.len()) {
|
||||
identifier[i] = bytes[i];
|
||||
}
|
||||
identifier[..min(IDENTIFIER_SIZE, bytes.len())]
|
||||
.clone_from_slice(&bytes[..min(IDENTIFIER_SIZE, bytes.len())]);
|
||||
Identifier(identifier)
|
||||
}
|
||||
|
||||
|
@ -282,9 +279,8 @@ impl BlindingFactor {
|
|||
|
||||
pub fn from_slice(data: &[u8]) -> BlindingFactor {
|
||||
let mut blind = [0; SECRET_KEY_SIZE];
|
||||
for i in 0..min(SECRET_KEY_SIZE, data.len()) {
|
||||
blind[i] = data[i];
|
||||
}
|
||||
blind[..min(SECRET_KEY_SIZE, data.len())]
|
||||
.clone_from_slice(&data[..min(SECRET_KEY_SIZE, data.len())]);
|
||||
BlindingFactor(blind)
|
||||
}
|
||||
|
||||
|
|
|
@ -125,7 +125,7 @@ impl<'a> Message<'a> {
|
|||
let read_len = cmp::min(8000, len - written);
|
||||
let mut buf = vec![0u8; read_len];
|
||||
self.stream.read_exact(&mut buf[..])?;
|
||||
writer.write_all(&mut buf)?;
|
||||
writer.write_all(&buf)?;
|
||||
written += read_len;
|
||||
}
|
||||
Ok(written)
|
||||
|
@ -291,14 +291,14 @@ where
|
|||
let reader_stopped = stopped.clone();
|
||||
|
||||
let reader_tracker = tracker.clone();
|
||||
let writer_tracker = tracker.clone();
|
||||
let writer_tracker = tracker;
|
||||
|
||||
let reader_thread = thread::Builder::new()
|
||||
.name("peer_read".to_string())
|
||||
.spawn(move || {
|
||||
loop {
|
||||
// check the read end
|
||||
match try_header!(read_header(&mut reader, version), &mut reader) {
|
||||
match try_header!(read_header(&mut reader, version), &reader) {
|
||||
Some(MsgHeaderWrapper::Known(header)) => {
|
||||
reader
|
||||
.set_read_timeout(Some(BODY_IO_TIMEOUT))
|
||||
|
@ -347,7 +347,7 @@ where
|
|||
reader
|
||||
.peer_addr()
|
||||
.map(|a| a.to_string())
|
||||
.unwrap_or("?".to_owned())
|
||||
.unwrap_or_else(|_| "?".to_owned())
|
||||
);
|
||||
let _ = reader.shutdown(Shutdown::Both);
|
||||
})?;
|
||||
|
@ -380,7 +380,7 @@ where
|
|||
writer
|
||||
.peer_addr()
|
||||
.map(|a| a.to_string())
|
||||
.unwrap_or("?".to_owned())
|
||||
.unwrap_or_else(|_| "?".to_owned())
|
||||
);
|
||||
})?;
|
||||
Ok((reader_thread, writer_thread))
|
||||
|
|
|
@ -31,7 +31,7 @@ use std::io::{Read, Write};
|
|||
use std::sync::Arc;
|
||||
|
||||
/// Grin's user agent with current version
|
||||
pub const USER_AGENT: &'static str = concat!("MW/Grin ", env!("CARGO_PKG_VERSION"));
|
||||
pub const USER_AGENT: &str = concat!("MW/Grin ", env!("CARGO_PKG_VERSION"));
|
||||
|
||||
/// Magic numbers expected in the header of every message
|
||||
const OTHER_MAGIC: [u8; 2] = [73, 43];
|
||||
|
|
|
@ -73,7 +73,7 @@ impl Peers {
|
|||
};
|
||||
debug!("Saving newly connected peer {}.", peer_data.addr);
|
||||
self.save_peer(&peer_data)?;
|
||||
peers.insert(peer_data.addr, peer.clone());
|
||||
peers.insert(peer_data.addr, peer);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ impl Peers {
|
|||
return None;
|
||||
}
|
||||
};
|
||||
peers.get(&addr).map(|p| p.clone())
|
||||
peers.get(&addr).cloned()
|
||||
}
|
||||
|
||||
/// Number of peers currently connected to.
|
||||
|
@ -171,7 +171,7 @@ impl Peers {
|
|||
// (total_difficulty) than we do.
|
||||
pub fn more_work_peers(&self) -> Result<Vec<Arc<Peer>>, chain::Error> {
|
||||
let peers = self.connected_peers();
|
||||
if peers.len() == 0 {
|
||||
if peers.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
|
@ -190,7 +190,7 @@ impl Peers {
|
|||
// (total_difficulty) than/as we do.
|
||||
pub fn more_or_same_work_peers(&self) -> Result<usize, chain::Error> {
|
||||
let peers = self.connected_peers();
|
||||
if peers.len() == 0 {
|
||||
if peers.is_empty() {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
|
@ -217,7 +217,7 @@ impl Peers {
|
|||
/// branch, showing the highest total difficulty.
|
||||
pub fn most_work_peers(&self) -> Vec<Arc<Peer>> {
|
||||
let peers = self.connected_peers();
|
||||
if peers.len() == 0 {
|
||||
if peers.is_empty() {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
|
@ -265,7 +265,7 @@ impl Peers {
|
|||
peers.remove(&peer.info.addr);
|
||||
Ok(())
|
||||
}
|
||||
None => return Err(Error::PeerNotFound),
|
||||
None => Err(Error::PeerNotFound),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -275,9 +275,9 @@ impl Peers {
|
|||
// check if peer exist
|
||||
self.get_peer(peer_addr)?;
|
||||
if self.is_banned(peer_addr) {
|
||||
return self.update_state(peer_addr, State::Healthy);
|
||||
self.update_state(peer_addr, State::Healthy)
|
||||
} else {
|
||||
return Err(Error::PeerNotBanned);
|
||||
Err(Error::PeerNotBanned)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -469,7 +469,7 @@ impl Peers {
|
|||
.outgoing_connected_peers()
|
||||
.iter()
|
||||
.take(excess_outgoing_count)
|
||||
.map(|x| x.info.addr.clone())
|
||||
.map(|x| x.info.addr)
|
||||
.collect::<Vec<_>>();
|
||||
rm.append(&mut addrs);
|
||||
}
|
||||
|
@ -482,7 +482,7 @@ impl Peers {
|
|||
.incoming_connected_peers()
|
||||
.iter()
|
||||
.take(excess_incoming_count)
|
||||
.map(|x| x.info.addr.clone())
|
||||
.map(|x| x.info.addr)
|
||||
.collect::<Vec<_>>();
|
||||
rm.append(&mut addrs);
|
||||
}
|
||||
|
|
|
@ -23,10 +23,10 @@ use crate::core::ser::{self, Readable, Reader, Writeable, Writer};
|
|||
use crate::types::{Capabilities, PeerAddr, ReasonForBan};
|
||||
use grin_store::{self, option_to_not_found, to_key, Error};
|
||||
|
||||
const DB_NAME: &'static str = "peer";
|
||||
const STORE_SUBPATH: &'static str = "peers";
|
||||
const DB_NAME: &str = "peer";
|
||||
const STORE_SUBPATH: &str = "peers";
|
||||
|
||||
const PEER_PREFIX: u8 = 'P' as u8;
|
||||
const PEER_PREFIX: u8 = b'P';
|
||||
|
||||
// Types of messages
|
||||
enum_from_primitive! {
|
||||
|
|
|
@ -333,17 +333,17 @@ bitflags! {
|
|||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Capabilities: u32 {
|
||||
/// We don't know (yet) what the peer can do.
|
||||
const UNKNOWN = 0b00000000;
|
||||
const UNKNOWN = 0b0000_0000;
|
||||
/// Can provide full history of headers back to genesis
|
||||
/// (for at least one arbitrary fork).
|
||||
const HEADER_HIST = 0b00000001;
|
||||
const HEADER_HIST = 0b0000_0001;
|
||||
/// Can provide block headers and the TxHashSet for some recent-enough
|
||||
/// height.
|
||||
const TXHASHSET_HIST = 0b00000010;
|
||||
const TXHASHSET_HIST = 0b0000_0010;
|
||||
/// Can provide a list of healthy peers
|
||||
const PEER_LIST = 0b00000100;
|
||||
const PEER_LIST = 0b0000_0100;
|
||||
/// Can broadcast and request txs by kernel hash.
|
||||
const TX_KERNEL_HASH = 0b00001000;
|
||||
const TX_KERNEL_HASH = 0b0000_1000;
|
||||
|
||||
/// All nodes right now are "full nodes".
|
||||
/// Some nodes internally may maintain longer block histories (archival_mode)
|
||||
|
@ -470,11 +470,11 @@ pub struct PeerInfoDisplay {
|
|||
impl From<PeerInfo> for PeerInfoDisplay {
|
||||
fn from(info: PeerInfo) -> PeerInfoDisplay {
|
||||
PeerInfoDisplay {
|
||||
capabilities: info.capabilities.clone(),
|
||||
capabilities: info.capabilities,
|
||||
user_agent: info.user_agent.clone(),
|
||||
version: info.version,
|
||||
addr: info.addr.clone(),
|
||||
direction: info.direction.clone(),
|
||||
addr: info.addr,
|
||||
direction: info.direction,
|
||||
total_difficulty: info.total_difficulty(),
|
||||
height: info.height(),
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ impl Pool {
|
|||
(
|
||||
txs,
|
||||
kern_ids
|
||||
.into_iter()
|
||||
.iter()
|
||||
.filter(|id| !found_ids.contains(id))
|
||||
.cloned()
|
||||
.collect(),
|
||||
|
@ -412,7 +412,7 @@ impl Pool {
|
|||
let mut found_txs = vec![];
|
||||
|
||||
// Gather all the kernels of the multi-kernel transaction in one set
|
||||
let kernel_set = kernels.into_iter().collect::<HashSet<_>>();
|
||||
let kernel_set = kernels.iter().collect::<HashSet<_>>();
|
||||
|
||||
// Check each transaction in the pool
|
||||
for entry in &self.entries {
|
||||
|
@ -468,7 +468,7 @@ impl Bucket {
|
|||
fn new(tx: Transaction, age_idx: usize) -> Bucket {
|
||||
Bucket {
|
||||
fee_to_weight: tx.fee_to_weight(),
|
||||
raw_txs: vec![tx.clone()],
|
||||
raw_txs: vec![tx],
|
||||
age_idx,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -192,19 +192,12 @@ impl TransactionPool {
|
|||
let bucket_transactions = self.txpool.bucket_transactions(Weighting::NoLimit);
|
||||
|
||||
// Get last transaction and remove it
|
||||
match bucket_transactions.last() {
|
||||
Some(evictable_transaction) => {
|
||||
if let Some(evictable_transaction) = bucket_transactions.last() {
|
||||
// Remove transaction
|
||||
self.txpool.entries = self
|
||||
.txpool
|
||||
self.txpool
|
||||
.entries
|
||||
.iter()
|
||||
.filter(|x| x.tx != *evictable_transaction)
|
||||
.map(|x| x.clone())
|
||||
.collect::<Vec<_>>();
|
||||
}
|
||||
None => (),
|
||||
}
|
||||
.retain(|x| x.tx != *evictable_transaction);
|
||||
};
|
||||
}
|
||||
|
||||
// Old txs will "age out" after 30 mins.
|
||||
|
@ -277,9 +270,9 @@ impl TransactionPool {
|
|||
}
|
||||
|
||||
// Check that the stempool can accept this transaction
|
||||
if stem && self.stempool.size() > self.config.max_stempool_size {
|
||||
return Err(PoolError::OverCapacity);
|
||||
} else if self.total_size() > self.config.max_pool_size {
|
||||
if stem && self.stempool.size() > self.config.max_stempool_size
|
||||
|| self.total_size() > self.config.max_pool_size
|
||||
{
|
||||
return Err(PoolError::OverCapacity);
|
||||
}
|
||||
|
||||
|
|
|
@ -95,7 +95,7 @@ impl Store {
|
|||
Some(n) => n.to_owned(),
|
||||
None => "lmdb".to_owned(),
|
||||
};
|
||||
let full_path = [root_path.to_owned(), name.clone()].join("/");
|
||||
let full_path = [root_path.to_owned(), name].join("/");
|
||||
fs::create_dir_all(&full_path)
|
||||
.expect("Unable to create directory 'db_root' to store chain_data");
|
||||
|
||||
|
|
|
@ -174,7 +174,7 @@ impl<T: PMMRable> Backend<T> for PMMRBackend<T> {
|
|||
fn data_as_temp_file(&self) -> Result<File, String> {
|
||||
self.data_file
|
||||
.as_temp_file()
|
||||
.map_err(|_| format!("Failed to build temp data file"))
|
||||
.map_err(|_| "Failed to build temp data file".to_string())
|
||||
}
|
||||
|
||||
/// Rewind the PMMR backend to the given position.
|
||||
|
|
|
@ -155,7 +155,7 @@ where
|
|||
/// Write the file out to disk, pruning removed elements.
|
||||
pub fn save_prune(&mut self, prune_pos: &[u64]) -> io::Result<()> {
|
||||
// Need to convert from 1-index to 0-index (don't ask).
|
||||
let prune_idx: Vec<_> = prune_pos.into_iter().map(|x| x - 1).collect();
|
||||
let prune_idx: Vec<_> = prune_pos.iter().map(|x| x - 1).collect();
|
||||
self.file.save_prune(prune_idx.as_slice())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ pub fn copy_dir_to(src: &Path, dst: &Path) -> io::Result<u64> {
|
|||
for entry_result in src.read_dir()? {
|
||||
let entry = entry_result?;
|
||||
let file_type = entry.file_type()?;
|
||||
let count = copy_to(&entry.path(), &file_type, &dst.join(entry.file_name()))?;
|
||||
let count = copy_to(&entry.path(), file_type, &dst.join(entry.file_name()))?;
|
||||
counter += count;
|
||||
}
|
||||
Ok(counter)
|
||||
|
@ -55,7 +55,7 @@ pub fn list_files(path: &Path) -> Vec<PathBuf> {
|
|||
.collect()
|
||||
}
|
||||
|
||||
fn copy_to(src: &Path, src_type: &fs::FileType, dst: &Path) -> io::Result<u64> {
|
||||
fn copy_to(src: &Path, src_type: fs::FileType, dst: &Path) -> io::Result<u64> {
|
||||
if src_type.is_file() {
|
||||
fs::copy(src, dst)
|
||||
} else if src_type.is_dir() {
|
||||
|
|
|
@ -340,7 +340,7 @@ fn send_panic_to_log() {
|
|||
None => error!("thread '{}' panicked at '{}'{:?}", thread, msg, backtrace),
|
||||
}
|
||||
//also print to stderr
|
||||
let tui_running = TUI_RUNNING.lock().clone();
|
||||
let tui_running = *TUI_RUNNING.lock();
|
||||
if !tui_running {
|
||||
let config = LOGGING_CONFIG.lock();
|
||||
|
||||
|
|
Loading…
Reference in a new issue