From 8504efb796bda1bdf6b54ae4f32c4b758fdca19b Mon Sep 17 00:00:00 2001 From: Ignotus Peverell Date: Fri, 29 Sep 2017 18:44:25 +0000 Subject: [PATCH] Cargo fmt all the things --- api/src/client.rs | 23 +- api/src/rest.rs | 94 +- api/src/types.rs | 11 +- chain/src/chain.rs | 37 +- chain/src/pipe.rs | 47 +- chain/src/store.rs | 89 +- chain/src/sumtree.rs | 92 +- chain/src/types.rs | 5 +- chain/tests/mine_simple_chain.rs | 30 +- chain/tests/test_coinbase_maturity.rs | 142 +- core/benches/sumtree.rs | 76 +- core/src/consensus.rs | 107 +- core/src/core/block.rs | 303 +++-- core/src/core/build.rs | 55 +- core/src/core/hash.rs | 4 +- core/src/core/mod.rs | 36 +- core/src/core/pmmr.rs | 137 +- core/src/core/target.rs | 22 +- core/src/core/transaction.rs | 59 +- core/src/global.rs | 105 +- core/src/ser.rs | 51 +- grin/src/adapters.rs | 81 +- grin/src/lib.rs | 2 +- grin/src/miner.rs | 6 +- grin/src/seed.rs | 171 +-- grin/src/server.rs | 67 +- grin/src/sync.rs | 25 +- grin/src/types.rs | 4 +- p2p/src/conn.rs | 118 +- p2p/src/handshake.rs | 142 +- p2p/src/msg.rs | 109 +- p2p/src/peer.rs | 70 +- p2p/src/protocol.rs | 86 +- p2p/src/rate_limit.rs | 24 +- p2p/src/server.rs | 50 +- p2p/src/store.rs | 34 +- p2p/src/types.rs | 6 +- p2p/tests/peer_handshake.rs | 75 +- pool/src/blockchain.rs | 226 ++-- pool/src/graph.rs | 357 +++--- pool/src/pool.rs | 1710 +++++++++++++------------ pool/src/types.rs | 598 +++++---- pow/src/cuckoo.rs | 272 +++- pow/src/lib.rs | 58 +- pow/src/plugin.rs | 16 +- pow/src/types.rs | 12 +- src/bin/grin.rs | 4 +- store/src/sumtree.rs | 90 +- store/tests/sumtree.rs | 7 +- util/src/hex.rs | 41 +- util/src/lib.rs | 28 +- wallet/src/checker.rs | 33 +- wallet/src/extkey.rs | 45 +- wallet/src/info.rs | 7 +- wallet/src/receiver.rs | 95 +- wallet/src/sender.rs | 25 +- wallet/src/types.rs | 59 +- 57 files changed, 3678 insertions(+), 2600 deletions(-) diff --git a/api/src/client.rs b/api/src/client.rs index 80e8df75d..0b7dcb790 100644 --- a/api/src/client.rs +++ b/api/src/client.rs @@ -26,12 +26,14 @@ use rest::Error; /// returns a JSON object. Handles request building, JSON deserialization and /// response code checking. pub fn get<'a, T>(url: &'a str) -> Result - where for<'de> T: Deserialize<'de> +where + for<'de> T: Deserialize<'de>, { let client = hyper::Client::new(); let res = check_error(client.get(url).send())?; - serde_json::from_reader(res) - .map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e))) + serde_json::from_reader(res).map_err(|e| { + Error::Internal(format!("Server returned invalid JSON: {}", e)) + }) } /// Helper function to easily issue a HTTP POST request with the provided JSON @@ -39,15 +41,18 @@ pub fn get<'a, T>(url: &'a str) -> Result /// building, JSON serialization and deserialization, and response code /// checking. pub fn post<'a, IN, OUT>(url: &'a str, input: &IN) -> Result - where IN: Serialize, - for<'de> OUT: Deserialize<'de> +where + IN: Serialize, + for<'de> OUT: Deserialize<'de>, { - let in_json = serde_json::to_string(input) - .map_err(|e| Error::Internal(format!("Could not serialize data to JSON: {}", e)))?; + let in_json = serde_json::to_string(input).map_err(|e| { + Error::Internal(format!("Could not serialize data to JSON: {}", e)) + })?; let client = hyper::Client::new(); let res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?; - serde_json::from_reader(res) - .map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e))) + serde_json::from_reader(res).map_err(|e| { + Error::Internal(format!("Server returned invalid JSON: {}", e)) + }) } // convert hyper error and check for non success response codes diff --git a/api/src/rest.rs b/api/src/rest.rs index 90004e2d9..ef263399e 100644 --- a/api/src/rest.rs +++ b/api/src/rest.rs @@ -203,25 +203,35 @@ struct OpWrapper { } impl Handler for OpWrapper - where E: ApiEndpoint +where + E: ApiEndpoint, { fn handle(&self, req: &mut Request) -> IronResult { - let t: E::OP_IN = serde_json::from_reader(req.body.by_ref()) - .map_err(|e| IronError::new(e, status::BadRequest))?; + let t: E::OP_IN = serde_json::from_reader(req.body.by_ref()).map_err(|e| { + IronError::new(e, status::BadRequest) + })?; let res = self.endpoint.operation(self.operation.clone(), t)?; - let res_json = serde_json::to_string(&res) - .map_err(|e| IronError::new(e, status::InternalServerError))?; + let res_json = serde_json::to_string(&res).map_err(|e| { + IronError::new(e, status::InternalServerError) + })?; Ok(Response::with((status::Ok, res_json))) } } fn extract_param(req: &mut Request, param: &'static str) -> IronResult - where ID: ToString + FromStr, - ::Err: Debug + Send + error::Error + 'static +where + ID: ToString + FromStr, + ::Err: Debug + Send + error::Error + 'static, { - let id = req.extensions.get::().unwrap().find(param).unwrap_or(""); - id.parse::().map_err(|e| IronError::new(e, status::BadRequest)) + let id = req.extensions + .get::() + .unwrap() + .find(param) + .unwrap_or(""); + id.parse::().map_err( + |e| IronError::new(e, status::BadRequest), + ) } /// HTTP server allowing the registration of ApiEndpoint implementations. @@ -229,7 +239,6 @@ pub struct ApiServer { root: String, router: Router, server_listener: Option, - } impl ApiServer { @@ -245,7 +254,7 @@ impl ApiServer { /// Starts the ApiServer at the provided address. pub fn start(&mut self, addr: A) -> Result<(), String> { - //replace this value to satisfy borrow checker + // replace this value to satisfy borrow checker let r = mem::replace(&mut self.router, Router::new()); let result = Iron::new(r).http(addr); let return_value = result.as_ref().map(|_| ()).map_err(|e| e.to_string()); @@ -254,7 +263,7 @@ impl ApiServer { } /// Stops the API server - pub fn stop(&mut self){ + pub fn stop(&mut self) { let r = mem::replace(&mut self.server_listener, None); r.unwrap().close().unwrap(); } @@ -262,8 +271,9 @@ impl ApiServer { /// Register a new API endpoint, providing a relative URL for the new /// endpoint. pub fn register_endpoint(&mut self, subpath: String, endpoint: E) - where E: ApiEndpoint, - <::ID as FromStr>::Err: Debug + Send + error::Error + where + E: ApiEndpoint, + <::ID as FromStr>::Err: Debug + Send + error::Error, { assert_eq!(subpath.chars().nth(0).unwrap(), '/'); @@ -281,7 +291,12 @@ impl ApiServer { endpoint: endpoint.clone(), }; let full_path = format!("{}/{}", root.clone(), op_s.clone()); - self.router.route(op.to_method(), full_path.clone(), wrapper, route_name); + self.router.route( + op.to_method(), + full_path.clone(), + wrapper, + route_name, + ); info!("route: POST {}", full_path); } else { @@ -294,15 +309,21 @@ impl ApiServer { _ => panic!("unreachable"), }; let wrapper = ApiWrapper(endpoint.clone()); - self.router.route(op.to_method(), full_path.clone(), wrapper, route_name); + self.router.route( + op.to_method(), + full_path.clone(), + wrapper, + route_name, + ); info!("route: {} {}", op.to_method(), full_path); } } // support for the HTTP Options method by differentiating what's on the // root resource vs the id resource - let (root_opts, sub_opts) = - endpoint.operations().iter().fold((vec![], vec![]), |mut acc, op| { + let (root_opts, sub_opts) = endpoint.operations().iter().fold( + (vec![], vec![]), + |mut acc, op| { let m = op.to_method(); if m == Method::Post { acc.0.push(m); @@ -310,19 +331,26 @@ impl ApiServer { acc.1.push(m); } acc - }); - self.router.options(root.clone(), - move |_: &mut Request| { - Ok(Response::with((status::Ok, - Header(headers::Allow(root_opts.clone()))))) - }, - "option_".to_string() + route_postfix); - self.router.options(root.clone() + "/:id", - move |_: &mut Request| { - Ok(Response::with((status::Ok, - Header(headers::Allow(sub_opts.clone()))))) - }, - "option_id_".to_string() + route_postfix); + }, + ); + self.router.options( + root.clone(), + move |_: &mut Request| { + Ok(Response::with( + (status::Ok, Header(headers::Allow(root_opts.clone()))), + )) + }, + "option_".to_string() + route_postfix, + ); + self.router.options( + root.clone() + "/:id", + move |_: &mut Request| { + Ok(Response::with( + (status::Ok, Header(headers::Allow(sub_opts.clone()))), + )) + }, + "option_id_".to_string() + route_postfix, + ); } } @@ -344,8 +372,8 @@ mod test { impl ApiEndpoint for TestApi { type ID = String; type T = Animal; - type OP_IN = (); - type OP_OUT = (); + type OP_IN = (); + type OP_OUT = (); fn operations(&self) -> Vec { vec![Operation::Get] diff --git a/api/src/types.rs b/api/src/types.rs index 240d5c2f8..7fc12a02d 100644 --- a/api/src/types.rs +++ b/api/src/types.rs @@ -30,9 +30,7 @@ pub struct Tip { impl Tip { pub fn from_tip(tip: chain::Tip) -> Tip { - Tip { - height: tip.height, - } + Tip { height: tip.height } } } @@ -60,8 +58,11 @@ impl Output { pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> Output { let (output_type, lock_height) = match output.features { x if x.contains(core::transaction::COINBASE_OUTPUT) => { - (OutputType::Coinbase, block_header.height + consensus::COINBASE_MATURITY) - }, + ( + OutputType::Coinbase, + block_header.height + consensus::COINBASE_MATURITY, + ) + } _ => (OutputType::Transaction, 0), }; Output { diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 541caeeb6..9dcefb78c 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -29,7 +29,7 @@ use store; use sumtree; use types::*; -use core::global::{MiningParameterMode,MINING_PARAMETER_MODE}; +use core::global::{MiningParameterMode, MINING_PARAMETER_MODE}; const MAX_ORPHANS: usize = 20; @@ -44,7 +44,7 @@ pub struct Chain { orphans: Arc>>, sumtrees: Arc>, - //POW verification function + // POW verification function pow_verifier: fn(&BlockHeader, u32) -> bool, } @@ -52,14 +52,13 @@ unsafe impl Sync for Chain {} unsafe impl Send for Chain {} impl Chain { - /// Check whether the chain exists. If not, the call to 'init' will /// expect an already mined genesis block. This keeps the chain free /// from needing to know about the mining implementation - pub fn chain_exists(db_root: String)->bool { + pub fn chain_exists(db_root: String) -> bool { let chain_store = store::ChainKVStore::new(db_root).unwrap(); match chain_store.head() { - Ok(_) => {true}, + Ok(_) => true, Err(NotFoundErr) => false, Err(_) => false, } @@ -138,7 +137,12 @@ impl Chain { orphans.truncate(MAX_ORPHANS); } Err(ref e) => { - info!("Rejected block {} at {} : {:?}", b.hash(), b.header.height, e); + info!( + "Rejected block {} at {} : {:?}", + b.hash(), + b.header.height, + e + ); } } @@ -161,7 +165,7 @@ impl Chain { fn ctx_from_head(&self, head: Tip, opts: Options) -> pipe::BlockContext { let opts_in = opts; - let param_ref=MINING_PARAMETER_MODE.read().unwrap(); + let param_ref = MINING_PARAMETER_MODE.read().unwrap(); let opts_in = match *param_ref { MiningParameterMode::AutomatedTesting => opts_in | EASY_POW, MiningParameterMode::UserTesting => opts_in | EASY_POW, @@ -178,7 +182,7 @@ impl Chain { } } - /// Pop orphans out of the queue and check if we can now accept them. + /// Pop orphans out of the queue and check if we can now accept them. fn check_orphans(&self) { // first check how many we have to retry, unfort. we can't extend the lock // in the loop as it needs to be freed before going in process_block @@ -209,7 +213,9 @@ impl Chain { let sumtrees = self.sumtrees.read().unwrap(); let is_unspent = sumtrees.is_unspent(output_ref)?; if is_unspent { - self.store.get_output_by_commit(output_ref).map_err(&Error::StoreErr) + self.store.get_output_by_commit(output_ref).map_err( + &Error::StoreErr, + ) } else { Err(Error::OutputNotFound) } @@ -219,7 +225,7 @@ impl Chain { /// current sumtree state. pub fn set_sumtree_roots(&self, b: &mut Block) -> Result<(), Error> { let mut sumtrees = self.sumtrees.write().unwrap(); - + let roots = sumtree::extending(&mut sumtrees, |mut extension| { // apply the block on the sumtrees and check the resulting root extension.apply_block(b)?; @@ -266,10 +272,13 @@ impl Chain { } /// Gets the block header by the provided output commitment - pub fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result { - self.store.get_block_header_by_output_commit(commit).map_err( - &Error::StoreErr, - ) + pub fn get_block_header_by_output_commit( + &self, + commit: &Commitment, + ) -> Result { + self.store + .get_block_header_by_output_commit(commit) + .map_err(&Error::StoreErr) } /// Get the tip of the header chain diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index c6d739178..d3898d1cd 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -139,7 +139,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E if header.height != prev.height + 1 { return Err(Error::InvalidBlockHeight); } - if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode(){ + if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() { // prevent time warp attacks and some timestamp manipulations by forcing strict // time progression (but not in CI mode) return Err(Error::InvalidBlockTime); @@ -182,11 +182,15 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E } /// Fully validate the block content. -fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extension) -> Result<(), Error> { +fn validate_block( + b: &Block, + ctx: &mut BlockContext, + ext: &mut sumtree::Extension, +) -> Result<(), Error> { if b.header.height > ctx.head.height + 1 { return Err(Error::Orphan); } - + // main isolated block validation, checks all commitment sums and sigs let curve = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); try!(b.validate(&curve).map_err(&Error::InvalidBlockProof)); @@ -194,10 +198,13 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio // check that all the outputs of the block are "new" - // that they do not clobber any existing unspent outputs (by their commitment) // - // TODO - do we need to do this here (and can we do this here if we need access to the chain) - // see check_duplicate_outputs in pool for the analogous operation on transaction outputs + // TODO - do we need to do this here (and can we do this here if we need access + // to the chain) + // see check_duplicate_outputs in pool for the analogous operation on + // transaction outputs // for output in &block.outputs { - // here we would check that the output is not a duplicate output based on the current chain + // here we would check that the output is not a duplicate output based on the + // current chain // }; @@ -206,7 +213,7 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio // standard head extension ext.apply_block(b)?; } else { - + // extending a fork, first identify the block where forking occurred // keeping the hashes of blocks along the fork let mut current = b.header.previous; @@ -228,7 +235,11 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio if forked_block.header.height > 0 { let last_output = &forked_block.outputs[forked_block.outputs.len() - 1]; let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1]; - ext.rewind(forked_block.header.height, last_output, last_kernel)?; + ext.rewind( + forked_block.header.height, + last_output, + last_kernel, + )?; } // apply all forked blocks, including this new one @@ -240,27 +251,33 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio } let (utxo_root, rproof_root, kernel_root) = ext.roots(); - if utxo_root.hash != b.header.utxo_root || - rproof_root.hash != b.header.range_proof_root || - kernel_root.hash != b.header.kernel_root { + if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root || + kernel_root.hash != b.header.kernel_root + { return Err(Error::InvalidRoot); } - // check that any coinbase outputs are spendable (that they have matured sufficiently) + // check that any coinbase outputs are spendable (that they have matured + // sufficiently) for input in &b.inputs { if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) { if output.features.contains(transaction::COINBASE_OUTPUT) { - if let Ok(output_header) = ctx.store.get_block_header_by_output_commit(&input.commitment()) { + if let Ok(output_header) = + ctx.store.get_block_header_by_output_commit( + &input.commitment(), + ) + { - // TODO - make sure we are not off-by-1 here vs. the equivalent tansaction validation rule + // TODO - make sure we are not off-by-1 here vs. the equivalent tansaction + // validation rule if b.header.height <= output_header.height + consensus::COINBASE_MATURITY { return Err(Error::ImmatureCoinbase); } }; }; }; - }; + } Ok(()) } diff --git a/chain/src/store.rs b/chain/src/store.rs index 698c152ac..363e59266 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -85,7 +85,9 @@ impl ChainStore for ChainKVStore { } fn get_block_header(&self, h: &Hash) -> Result { - option_to_not_found(self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec()))) + option_to_not_found(self.db.get_ser( + &to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec()), + )) } fn check_block_exists(&self, h: &Hash) -> Result { @@ -97,13 +99,30 @@ impl ChainStore for ChainKVStore { let mut batch = self.db .batch() .put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)? - .put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..], &b.header)?; + .put_ser( + &to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..], + &b.header, + )?; // saving the full output under its hash, as well as a commitment to hash index for out in &b.outputs { batch = batch - .put_ser(&to_key(OUTPUT_COMMIT_PREFIX, &mut out.commitment().as_ref().to_vec())[..], out)? - .put_ser(&to_key(HEADER_BY_OUTPUT_PREFIX, &mut out.commitment().as_ref().to_vec())[..], &b.hash())?; + .put_ser( + &to_key( + OUTPUT_COMMIT_PREFIX, + &mut out.commitment().as_ref().to_vec(), + ) + [..], + out, + )? + .put_ser( + &to_key( + HEADER_BY_OUTPUT_PREFIX, + &mut out.commitment().as_ref().to_vec(), + ) + [..], + &b.hash(), + )?; } batch.write() } @@ -111,11 +130,14 @@ impl ChainStore for ChainKVStore { // lookup the block header hash by output commitment // lookup the block header based on this hash // to check the chain is correct compare this block header to - // the block header currently indexed at the relevant block height (tbd if actually necessary) + // the block header currently indexed at the relevant block height (tbd if + // actually necessary) // // NOTE: This index is not exhaustive. - // This node may not have seen this full block, so may not have populated the index. - // Block headers older than some threshold (2 months?) will not necessarily be included + // This node may not have seen this full block, so may not have populated the + // index. + // Block headers older than some threshold (2 months?) will not necessarily be + // included // in this index. // fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result { @@ -133,13 +155,16 @@ impl ChainStore for ChainKVStore { } else { Err(Error::NotFoundErr) } - }, - None => Err(Error::NotFoundErr) + } + None => Err(Error::NotFoundErr), } } fn save_block_header(&self, bh: &BlockHeader) -> Result<(), Error> { - self.db.put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut bh.hash().to_vec())[..], bh) + self.db.put_ser( + &to_key(BLOCK_HEADER_PREFIX, &mut bh.hash().to_vec())[..], + bh, + ) } fn get_header_by_height(&self, height: u64) -> Result { @@ -154,26 +179,44 @@ impl ChainStore for ChainKVStore { } fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), Error> { - self.db.put_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())[..], &pos) + self.db.put_ser( + &to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())[..], + &pos, + ) } fn get_output_pos(&self, commit: &Commitment) -> Result { - option_to_not_found(self.db.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec()))) + option_to_not_found(self.db.get_ser(&to_key( + COMMIT_POS_PREFIX, + &mut commit.as_ref().to_vec(), + ))) } fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> { - self.db.put_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], &pos) + self.db.put_ser( + &to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], + &pos, + ) } fn get_kernel_pos(&self, excess: &Commitment) -> Result { - option_to_not_found(self.db.get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec()))) + option_to_not_found(self.db.get_ser(&to_key( + KERNEL_POS_PREFIX, + &mut excess.as_ref().to_vec(), + ))) } - /// Maintain consistency of the "header_by_height" index by traversing back through the - /// current chain and updating "header_by_height" until we reach a block_header - /// that is consistent with its height (everything prior to this will be consistent) + /// Maintain consistency of the "header_by_height" index by traversing back + /// through the + /// current chain and updating "header_by_height" until we reach a + /// block_header + /// that is consistent with its height (everything prior to this will be + /// consistent) fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> { - self.db.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), bh)?; + self.db.put_ser( + &u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), + bh, + )?; if bh.height == 0 { return Ok(()); } @@ -184,10 +227,12 @@ impl ChainStore for ChainKVStore { let prev = self.get_header_by_height(prev_height)?; if prev.hash() != prev_h { let real_prev = self.get_block_header(&prev_h)?; - self.db.put_ser( - &u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height), - &real_prev, - ).unwrap(); + self.db + .put_ser( + &u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height), + &real_prev, + ) + .unwrap(); prev_h = real_prev.previous; prev_height = real_prev.height - 1; } else { diff --git a/chain/src/sumtree.rs b/chain/src/sumtree.rs index a35fd42d6..f68025ddf 100644 --- a/chain/src/sumtree.rs +++ b/chain/src/sumtree.rs @@ -35,12 +35,18 @@ const UTXO_SUBDIR: &'static str = "utxo"; const RANGE_PROOF_SUBDIR: &'static str = "rangeproof"; const KERNEL_SUBDIR: &'static str = "kernel"; -struct PMMRHandle where T: Summable + Clone { +struct PMMRHandle +where + T: Summable + Clone, +{ backend: PMMRBackend, last_pos: u64, } -impl PMMRHandle where T: Summable + Clone { +impl PMMRHandle +where + T: Summable + Clone, +{ fn new(root_dir: String, file_name: &str) -> Result, Error> { let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name); fs::create_dir_all(path.clone())?; @@ -88,7 +94,7 @@ impl SumTrees { match rpos { Ok(pos) => Ok(self.output_pmmr_h.backend.get(pos).is_some()), Err(grin_store::Error::NotFoundErr) => Ok(false), - Err(e) => Err(Error::StoreErr(e)) + Err(e) => Err(Error::StoreErr(e)), } } } @@ -101,8 +107,10 @@ impl SumTrees { /// If the closure returns an error, modifications are canceled and the unit /// of work is abandoned. Otherwise, the unit of work is permanently applied. pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result - where F: FnOnce(&mut Extension) -> Result { - +where + F: FnOnce(&mut Extension) -> Result, +{ + let sizes: (u64, u64, u64); let res: Result; let rollback: bool; @@ -153,17 +161,25 @@ pub struct Extension<'a> { commit_index: Arc, new_output_commits: HashMap, new_kernel_excesses: HashMap, - rollback: bool + rollback: bool, } impl<'a> Extension<'a> { - // constructor fn new(trees: &'a mut SumTrees, commit_index: Arc) -> Extension<'a> { Extension { - output_pmmr: PMMR::at(&mut trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos), - rproof_pmmr: PMMR::at(&mut trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.last_pos), - kernel_pmmr: PMMR::at(&mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos), + output_pmmr: PMMR::at( + &mut trees.output_pmmr_h.backend, + trees.output_pmmr_h.last_pos, + ), + rproof_pmmr: PMMR::at( + &mut trees.rproof_pmmr_h.backend, + trees.rproof_pmmr_h.last_pos, + ), + kernel_pmmr: PMMR::at( + &mut trees.kernel_pmmr_h.backend, + trees.kernel_pmmr_h.last_pos, + ), commit_index: commit_index, new_output_commits: HashMap::new(), new_kernel_excesses: HashMap::new(), @@ -184,14 +200,17 @@ impl<'a> Extension<'a> { if let Ok(pos) = pos_res { match self.output_pmmr.prune(pos, b.header.height as u32) { Ok(true) => { - self.rproof_pmmr.prune(pos, b.header.height as u32) + self.rproof_pmmr + .prune(pos, b.header.height as u32) .map_err(|s| Error::SumTreeErr(s))?; - }, + } Ok(false) => return Err(Error::AlreadySpent), Err(s) => return Err(Error::SumTreeErr(s)), } } else { - return Err(Error::SumTreeErr(format!("Missing index for {:?}", input.commitment()))); + return Err(Error::SumTreeErr( + format!("Missing index for {:?}", input.commitment()), + )); } } @@ -200,15 +219,19 @@ impl<'a> Extension<'a> { return Err(Error::DuplicateCommitment(out.commitment())); } // push new outputs commitments in their MMR and save them in the index - let pos = self.output_pmmr.push(SumCommit { - commit: out.commitment(), - secp: secp.clone(), - }).map_err(&Error::SumTreeErr)?; + let pos = self.output_pmmr + .push(SumCommit { + commit: out.commitment(), + secp: secp.clone(), + }) + .map_err(&Error::SumTreeErr)?; self.new_output_commits.insert(out.commitment(), pos); // push range proofs in their MMR - self.rproof_pmmr.push(NoSum(out.proof)).map_err(&Error::SumTreeErr)?; + self.rproof_pmmr.push(NoSum(out.proof)).map_err( + &Error::SumTreeErr, + )?; } for kernel in &b.kernels { @@ -216,7 +239,9 @@ impl<'a> Extension<'a> { return Err(Error::DuplicateKernel(kernel.excess.clone())); } // push kernels in their MMR - let pos = self.kernel_pmmr.push(NoSum(kernel.clone())).map_err(&Error::SumTreeErr)?; + let pos = self.kernel_pmmr.push(NoSum(kernel.clone())).map_err( + &Error::SumTreeErr, + )?; self.new_kernel_excesses.insert(kernel.excess, pos); } Ok(()) @@ -238,16 +263,28 @@ impl<'a> Extension<'a> { let out_pos_rew = self.commit_index.get_output_pos(&output.commitment())?; let kern_pos_rew = self.commit_index.get_kernel_pos(&kernel.excess)?; - self.output_pmmr.rewind(out_pos_rew, height as u32).map_err(&Error::SumTreeErr)?; - self.rproof_pmmr.rewind(out_pos_rew, height as u32).map_err(&Error::SumTreeErr)?; - self.kernel_pmmr.rewind(kern_pos_rew, height as u32).map_err(&Error::SumTreeErr)?; + self.output_pmmr + .rewind(out_pos_rew, height as u32) + .map_err(&Error::SumTreeErr)?; + self.rproof_pmmr + .rewind(out_pos_rew, height as u32) + .map_err(&Error::SumTreeErr)?; + self.kernel_pmmr + .rewind(kern_pos_rew, height as u32) + .map_err(&Error::SumTreeErr)?; Ok(()) } /// Current root hashes and sums (if applicable) for the UTXO, range proof /// and kernel sum trees. - pub fn roots(&self) -> (HashSum, HashSum>, HashSum>) { - (self.output_pmmr.root(), self.rproof_pmmr.root(), self.kernel_pmmr.root()) + pub fn roots( + &self, + ) -> (HashSum, HashSum>, HashSum>) { + ( + self.output_pmmr.root(), + self.rproof_pmmr.root(), + self.kernel_pmmr.root(), + ) } /// Force the rollback of this extension, no matter the result @@ -257,7 +294,10 @@ impl<'a> Extension<'a> { // Sizes of the sum trees, used by `extending` on rollback. fn sizes(&self) -> (u64, u64, u64) { - (self.output_pmmr.unpruned_size(), self.rproof_pmmr.unpruned_size(), - self.kernel_pmmr.unpruned_size()) + ( + self.output_pmmr.unpruned_size(), + self.rproof_pmmr.unpruned_size(), + self.kernel_pmmr.unpruned_size(), + ) } } diff --git a/chain/src/types.rs b/chain/src/types.rs index 0c22ec431..427e8698e 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -208,7 +208,10 @@ pub trait ChainStore: Send + Sync { fn get_output_by_commit(&self, commit: &Commitment) -> Result; /// Gets a block_header for the given input commit - fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result; + fn get_block_header_by_output_commit( + &self, + commit: &Commitment, + ) -> Result; /// Saves the position of an output, represented by its commitment, in the /// UTXO MMR. Used as an index for spending and pruning. diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index 66c826cbd..f9996d597 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -35,7 +35,7 @@ use core::global::MiningParameterMode; use pow::{types, cuckoo, MiningWorker}; -fn clean_output_dir(dir_name:&str){ +fn clean_output_dir(dir_name: &str) { let _ = fs::remove_dir_all(dir_name); } @@ -44,11 +44,15 @@ fn setup(dir_name: &str) -> Chain { clean_output_dir(dir_name); global::set_mining_mode(MiningParameterMode::AutomatedTesting); let mut genesis_block = None; - if !chain::Chain::chain_exists(dir_name.to_string()){ - genesis_block=pow::mine_genesis_block(None); + if !chain::Chain::chain_exists(dir_name.to_string()) { + genesis_block = pow::mine_genesis_block(None); } - chain::Chain::init(dir_name.to_string(), Arc::new(NoopAdapter {}), - genesis_block, pow::verify_size).unwrap() + chain::Chain::init( + dir_name.to_string(), + Arc::new(NoopAdapter {}), + genesis_block, + pow::verify_size, + ).unwrap() } #[test] @@ -67,7 +71,10 @@ fn mine_empty_chain() { miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps")); let mut cuckoo_miner = cuckoo::Miner::new( - consensus::EASINESS, global::sizeshift() as u32, global::proofsize()); + consensus::EASINESS, + global::sizeshift() as u32, + global::proofsize(), + ); for n in 1..4 { let prev = chain.head_header().unwrap(); let reward_key = secp::key::SecretKey::new(&secp, &mut rng); @@ -83,7 +90,7 @@ fn mine_empty_chain() { &mut b.header, difficulty, global::sizeshift() as u32, - ).unwrap(); + ).unwrap(); let bhash = b.hash(); chain.process_block(b, chain::EASY_POW).unwrap(); @@ -110,8 +117,9 @@ fn mine_empty_chain() { // now check the header output index let output = block.outputs[0]; - let header_by_output_commit = chain. - get_block_header_by_output_commit(&output.commitment()).unwrap(); + let header_by_output_commit = chain + .get_block_header_by_output_commit(&output.commitment()) + .unwrap(); assert_eq!(header_by_output_commit.hash(), bhash); } } @@ -141,7 +149,7 @@ fn mine_forks() { // checking our new head let head = chain.head().unwrap(); - assert_eq!(head.height, (n+1) as u64); + assert_eq!(head.height, (n + 1) as u64); assert_eq!(head.last_block_h, bhash); assert_eq!(head.prev_block_h, prev.hash()); @@ -151,7 +159,7 @@ fn mine_forks() { // checking head switch let head = chain.head().unwrap(); - assert_eq!(head.height, (n+1) as u64); + assert_eq!(head.height, (n + 1) as u64); assert_eq!(head.last_block_h, bhash); assert_eq!(head.prev_block_h, prev.hash()); } diff --git a/chain/tests/test_coinbase_maturity.rs b/chain/tests/test_coinbase_maturity.rs index 396c7d269..01da6b2c6 100644 --- a/chain/tests/test_coinbase_maturity.rs +++ b/chain/tests/test_coinbase_maturity.rs @@ -33,23 +33,27 @@ use core::global::MiningParameterMode; use pow::{types, cuckoo, MiningWorker}; -fn clean_output_dir(dir_name:&str){ - let _ = fs::remove_dir_all(dir_name); +fn clean_output_dir(dir_name: &str) { + let _ = fs::remove_dir_all(dir_name); } #[test] fn test_coinbase_maturity() { - let _ = env_logger::init(); + let _ = env_logger::init(); clean_output_dir(".grin"); - global::set_mining_mode(MiningParameterMode::AutomatedTesting); + global::set_mining_mode(MiningParameterMode::AutomatedTesting); let mut rng = OsRng::new().unwrap(); let mut genesis_block = None; - if !chain::Chain::chain_exists(".grin".to_string()){ - genesis_block=pow::mine_genesis_block(None); + if !chain::Chain::chain_exists(".grin".to_string()) { + genesis_block = pow::mine_genesis_block(None); } - let chain = chain::Chain::init(".grin".to_string(), Arc::new(NoopAdapter {}), - genesis_block, pow::verify_size).unwrap(); + let chain = chain::Chain::init( + ".grin".to_string(), + Arc::new(NoopAdapter {}), + genesis_block, + pow::verify_size, + ).unwrap(); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); @@ -60,10 +64,14 @@ fn test_coinbase_maturity() { }; miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps")); - let mut cuckoo_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize()); + let mut cuckoo_miner = cuckoo::Miner::new( + consensus::EASINESS, + global::sizeshift() as u32, + global::proofsize(), + ); let prev = chain.head_header().unwrap(); - let reward_key = secp::key::SecretKey::new(&secp, &mut rng); + let reward_key = secp::key::SecretKey::new(&secp, &mut rng); let mut block = core::core::Block::new(&prev, vec![], reward_key).unwrap(); block.header.timestamp = prev.timestamp + time::Duration::seconds(60); @@ -78,21 +86,23 @@ fn test_coinbase_maturity() { global::sizeshift() as u32, ).unwrap(); - assert_eq!(block.outputs.len(), 1); - assert!(block.outputs[0].features.contains(transaction::COINBASE_OUTPUT)); + assert_eq!(block.outputs.len(), 1); + assert!(block.outputs[0].features.contains( + transaction::COINBASE_OUTPUT, + )); chain.process_block(block, chain::EASY_POW).unwrap(); - let prev = chain.head_header().unwrap(); + let prev = chain.head_header().unwrap(); - let amount = consensus::REWARD; - let (coinbase_txn, _) = build::transaction(vec![ - build::input(amount, reward_key), - build::output_rand(amount-1), - build::with_fee(1)] - ).unwrap(); + let amount = consensus::REWARD; + let (coinbase_txn, _) = build::transaction(vec![ + build::input(amount, reward_key), + build::output_rand(amount - 1), + build::with_fee(1), + ]).unwrap(); - let reward_key = secp::key::SecretKey::new(&secp, &mut rng); + let reward_key = secp::key::SecretKey::new(&secp, &mut rng); let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], reward_key).unwrap(); block.header.timestamp = prev.timestamp + time::Duration::seconds(60); @@ -109,56 +119,56 @@ fn test_coinbase_maturity() { ).unwrap(); let result = chain.process_block(block, chain::EASY_POW); - match result { - Err(Error::ImmatureCoinbase) => (), - _ => panic!("expected ImmatureCoinbase error here"), - }; + match result { + Err(Error::ImmatureCoinbase) => (), + _ => panic!("expected ImmatureCoinbase error here"), + }; - // mine 10 blocks so we increase the height sufficiently - // coinbase will mature and be spendable in the block after these - for _ in 0..10 { - let prev = chain.head_header().unwrap(); + // mine 10 blocks so we increase the height sufficiently + // coinbase will mature and be spendable in the block after these + for _ in 0..10 { + let prev = chain.head_header().unwrap(); - let reward_key = secp::key::SecretKey::new(&secp, &mut rng); - let mut block = core::core::Block::new(&prev, vec![], reward_key).unwrap(); - block.header.timestamp = prev.timestamp + time::Duration::seconds(60); + let reward_key = secp::key::SecretKey::new(&secp, &mut rng); + let mut block = core::core::Block::new(&prev, vec![], reward_key).unwrap(); + block.header.timestamp = prev.timestamp + time::Duration::seconds(60); - let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); - block.header.difficulty = difficulty.clone(); - chain.set_sumtree_roots(&mut block).unwrap(); - - pow::pow_size( - &mut cuckoo_miner, - &mut block.header, - difficulty, - global::sizeshift() as u32, - ).unwrap(); - - chain.process_block(block, chain::EASY_POW).unwrap(); - }; - - let prev = chain.head_header().unwrap(); - - let reward_key = secp::key::SecretKey::new(&secp, &mut rng); - let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], reward_key).unwrap(); - - block.header.timestamp = prev.timestamp + time::Duration::seconds(60); - - let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); - block.header.difficulty = difficulty.clone(); + let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); + block.header.difficulty = difficulty.clone(); chain.set_sumtree_roots(&mut block).unwrap(); - pow::pow_size( - &mut cuckoo_miner, - &mut block.header, - difficulty, - global::sizeshift() as u32, - ).unwrap(); + pow::pow_size( + &mut cuckoo_miner, + &mut block.header, + difficulty, + global::sizeshift() as u32, + ).unwrap(); - let result = chain.process_block(block, chain::EASY_POW); - match result { - Ok(_) => (), - Err(Error::ImmatureCoinbase) => panic!("we should not get an ImmatureCoinbase here"), - Err(_) => panic!("we did not expect an error here"), - }; + chain.process_block(block, chain::EASY_POW).unwrap(); + } + + let prev = chain.head_header().unwrap(); + + let reward_key = secp::key::SecretKey::new(&secp, &mut rng); + let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], reward_key).unwrap(); + + block.header.timestamp = prev.timestamp + time::Duration::seconds(60); + + let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); + block.header.difficulty = difficulty.clone(); + chain.set_sumtree_roots(&mut block).unwrap(); + + pow::pow_size( + &mut cuckoo_miner, + &mut block.header, + difficulty, + global::sizeshift() as u32, + ).unwrap(); + + let result = chain.process_block(block, chain::EASY_POW); + match result { + Ok(_) => (), + Err(Error::ImmatureCoinbase) => panic!("we should not get an ImmatureCoinbase here"), + Err(_) => panic!("we did not expect an error here"), + }; } diff --git a/core/benches/sumtree.rs b/core/benches/sumtree.rs index e8abfb697..5b9e50506 100644 --- a/core/benches/sumtree.rs +++ b/core/benches/sumtree.rs @@ -26,50 +26,50 @@ use core::ser::{Writeable, Writer, Error}; #[derive(Copy, Clone, Debug)] struct TestElem([u32; 4]); impl Summable for TestElem { - type Sum = u64; - fn sum(&self) -> u64 { - // sums are not allowed to overflow, so we use this simple - // non-injective "sum" function that will still be homomorphic - self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 + - self.0[3] as u64 - } + type Sum = u64; + fn sum(&self) -> u64 { + // sums are not allowed to overflow, so we use this simple + // non-injective "sum" function that will still be homomorphic + self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 + + self.0[3] as u64 + } } impl Writeable for TestElem { - fn write(&self, writer: &mut W) -> Result<(), Error> { - try!(writer.write_u32(self.0[0])); - try!(writer.write_u32(self.0[1])); - try!(writer.write_u32(self.0[2])); - writer.write_u32(self.0[3]) - } + fn write(&self, writer: &mut W) -> Result<(), Error> { + try!(writer.write_u32(self.0[0])); + try!(writer.write_u32(self.0[1])); + try!(writer.write_u32(self.0[2])); + writer.write_u32(self.0[3]) + } } #[bench] fn bench_small_tree(b: &mut Bencher) { - let mut rng = rand::thread_rng(); - b.iter(|| { - let mut big_tree = SumTree::new(); - for i in 0..1000 { - // To avoid RNG overflow we generate random elements that are small. - // Though to avoid repeat elements they have to be reasonably big. - let new_elem; - let word1 = rng.gen::() as u32; - let word2 = rng.gen::() as u32; - if rng.gen() { - if rng.gen() { - new_elem = TestElem([word1, word2, 0, 0]); - } else { - new_elem = TestElem([word1, 0, word2, 0]); - } - } else { - if rng.gen() { - new_elem = TestElem([0, word1, 0, word2]); - } else { - new_elem = TestElem([0, 0, word1, word2]); - } - } + let mut rng = rand::thread_rng(); + b.iter(|| { + let mut big_tree = SumTree::new(); + for i in 0..1000 { + // To avoid RNG overflow we generate random elements that are small. + // Though to avoid repeat elements they have to be reasonably big. + let new_elem; + let word1 = rng.gen::() as u32; + let word2 = rng.gen::() as u32; + if rng.gen() { + if rng.gen() { + new_elem = TestElem([word1, word2, 0, 0]); + } else { + new_elem = TestElem([word1, 0, word2, 0]); + } + } else { + if rng.gen() { + new_elem = TestElem([0, word1, 0, word2]); + } else { + new_elem = TestElem([0, 0, word1, word2]); + } + } - big_tree.push(new_elem); - } - }); + big_tree.push(new_elem); + } + }); } diff --git a/core/src/consensus.rs b/core/src/consensus.rs index b0772d405..543e3e61e 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -27,7 +27,8 @@ use core::target::Difficulty; pub const REWARD: u64 = 1_000_000_000; /// Number of blocks before a coinbase matures and can be spent -/// TODO - reduced this for testing - need to investigate if we can lower this in test env +/// TODO - reduced this for testing - need to investigate if we can lower this +/// in test env // pub const COINBASE_MATURITY: u64 = 1_000; pub const COINBASE_MATURITY: u64 = 3; @@ -99,7 +100,8 @@ impl fmt::Display for TargetError { /// difference between the median timestamps at the beginning and the end /// of the window. pub fn next_difficulty(cursor: T) -> Result - where T: IntoIterator> +where + T: IntoIterator>, { // Block times at the begining and end of the adjustment window, used to @@ -155,8 +157,9 @@ pub fn next_difficulty(cursor: T) -> Result ts_damp }; - Ok(diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / - Difficulty::from_num(adj_ts)) + Ok( + diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / Difficulty::from_num(adj_ts), + ) } #[cfg(test)] @@ -171,24 +174,25 @@ mod test { // Builds an iterator for next difficulty calculation with the provided // constant time interval, difficulty and total length. fn repeat(interval: u64, diff: u64, len: u64) -> Vec> { - //watch overflow here, length shouldn't be ridiculous anyhow + // watch overflow here, length shouldn't be ridiculous anyhow assert!(len < std::usize::MAX as u64); let diffs = vec![Difficulty::from_num(diff); len as usize]; let times = (0..(len as usize)).map(|n| n * interval as usize).rev(); let pairs = times.zip(diffs.iter()); - pairs.map(|(t, d)| Ok((t as u64, d.clone()))).collect::>() + pairs + .map(|(t, d)| Ok((t as u64, d.clone()))) + .collect::>() } - fn repeat_offs(from: u64, - interval: u64, - diff: u64, - len: u64) - -> Vec> { - map_vec!(repeat(interval, diff, len), |e| { - match e.clone() { - Err(e) => Err(e), - Ok((t, d)) => Ok((t + from, d)), - } + fn repeat_offs( + from: u64, + interval: u64, + diff: u64, + len: u64, + ) -> Vec> { + map_vec!(repeat(interval, diff, len), |e| match e.clone() { + Err(e) => Err(e), + Ok((t, d)) => Ok((t + from, d)), }) } @@ -196,19 +200,28 @@ mod test { #[test] fn next_target_adjustment() { // not enough data - assert_eq!(next_difficulty(vec![]).unwrap(), Difficulty::from_num(MINIMUM_DIFFICULTY)); + assert_eq!( + next_difficulty(vec![]).unwrap(), + Difficulty::from_num(MINIMUM_DIFFICULTY) + ); - assert_eq!(next_difficulty(vec![Ok((60, Difficulty::one()))]).unwrap(), - Difficulty::from_num(MINIMUM_DIFFICULTY)); + assert_eq!( + next_difficulty(vec![Ok((60, Difficulty::one()))]).unwrap(), + Difficulty::from_num(MINIMUM_DIFFICULTY) + ); - assert_eq!(next_difficulty(repeat(60, 10, DIFFICULTY_ADJUST_WINDOW)).unwrap(), - Difficulty::from_num(MINIMUM_DIFFICULTY)); + assert_eq!( + next_difficulty(repeat(60, 10, DIFFICULTY_ADJUST_WINDOW)).unwrap(), + Difficulty::from_num(MINIMUM_DIFFICULTY) + ); // just enough data, right interval, should stay constant let just_enough = DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW; - assert_eq!(next_difficulty(repeat(60, 1000, just_enough)).unwrap(), - Difficulty::from_num(1000)); + assert_eq!( + next_difficulty(repeat(60, 1000, just_enough)).unwrap(), + Difficulty::from_num(1000) + ); // checking averaging works, window length is odd so need to compensate a little let sec = DIFFICULTY_ADJUST_WINDOW / 2 + 1 + MEDIAN_TIME_WINDOW; @@ -218,28 +231,44 @@ mod test { assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(999)); // too slow, diff goes down - assert_eq!(next_difficulty(repeat(90, 1000, just_enough)).unwrap(), - Difficulty::from_num(889)); - assert_eq!(next_difficulty(repeat(120, 1000, just_enough)).unwrap(), - Difficulty::from_num(800)); + assert_eq!( + next_difficulty(repeat(90, 1000, just_enough)).unwrap(), + Difficulty::from_num(889) + ); + assert_eq!( + next_difficulty(repeat(120, 1000, just_enough)).unwrap(), + Difficulty::from_num(800) + ); // too fast, diff goes up - assert_eq!(next_difficulty(repeat(55, 1000, just_enough)).unwrap(), - Difficulty::from_num(1021)); - assert_eq!(next_difficulty(repeat(45, 1000, just_enough)).unwrap(), - Difficulty::from_num(1067)); + assert_eq!( + next_difficulty(repeat(55, 1000, just_enough)).unwrap(), + Difficulty::from_num(1021) + ); + assert_eq!( + next_difficulty(repeat(45, 1000, just_enough)).unwrap(), + Difficulty::from_num(1067) + ); // hitting lower time bound, should always get the same result below - assert_eq!(next_difficulty(repeat(20, 1000, just_enough)).unwrap(), - Difficulty::from_num(1200)); - assert_eq!(next_difficulty(repeat(10, 1000, just_enough)).unwrap(), - Difficulty::from_num(1200)); + assert_eq!( + next_difficulty(repeat(20, 1000, just_enough)).unwrap(), + Difficulty::from_num(1200) + ); + assert_eq!( + next_difficulty(repeat(10, 1000, just_enough)).unwrap(), + Difficulty::from_num(1200) + ); // hitting higher time bound, should always get the same result above - assert_eq!(next_difficulty(repeat(160, 1000, just_enough)).unwrap(), - Difficulty::from_num(750)); - assert_eq!(next_difficulty(repeat(200, 1000, just_enough)).unwrap(), - Difficulty::from_num(750)); + assert_eq!( + next_difficulty(repeat(160, 1000, just_enough)).unwrap(), + Difficulty::from_num(750) + ); + assert_eq!( + next_difficulty(repeat(200, 1000, just_enough)).unwrap(), + Difficulty::from_num(750) + ); } } diff --git a/core/src/core/block.rs b/core/src/core/block.rs index dd4d0bfe2..cc5351cc3 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -85,14 +85,16 @@ impl Default for BlockHeader { /// Serialization of a block header impl Writeable for BlockHeader { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - ser_multiwrite!(writer, - [write_u64, self.height], - [write_fixed_bytes, &self.previous], - [write_i64, self.timestamp.to_timespec().sec], - [write_fixed_bytes, &self.utxo_root], - [write_fixed_bytes, &self.range_proof_root], - [write_fixed_bytes, &self.kernel_root], - [write_u8, self.features.bits()]); + ser_multiwrite!( + writer, + [write_u64, self.height], + [write_fixed_bytes, &self.previous], + [write_i64, self.timestamp.to_timespec().sec], + [write_fixed_bytes, &self.utxo_root], + [write_fixed_bytes, &self.range_proof_root], + [write_fixed_bytes, &self.kernel_root], + [write_u8, self.features.bits()] + ); try!(writer.write_u64(self.nonce)); try!(self.difficulty.write(writer)); @@ -129,7 +131,9 @@ impl Readable for BlockHeader { utxo_root: utxo_root, range_proof_root: rproof_root, kernel_root: kernel_root, - features: BlockFeatures::from_bits(features).ok_or(ser::Error::CorruptedData)?, + features: BlockFeatures::from_bits(features).ok_or( + ser::Error::CorruptedData, + )?, pow: pow, nonce: nonce, difficulty: difficulty, @@ -162,10 +166,12 @@ impl Writeable for Block { try!(self.header.write(writer)); if writer.serialization_mode() != ser::SerializationMode::Hash { - ser_multiwrite!(writer, - [write_u64, self.inputs.len() as u64], - [write_u64, self.outputs.len() as u64], - [write_u64, self.kernels.len() as u64]); + ser_multiwrite!( + writer, + [write_u64, self.inputs.len() as u64], + [write_u64, self.outputs.len() as u64], + [write_u64, self.kernels.len() as u64] + ); for inp in &self.inputs { try!(inp.write(writer)); @@ -234,10 +240,11 @@ impl Block { /// Builds a new block from the header of the previous block, a vector of /// transactions and the private key that will receive the reward. Checks /// that all transactions are valid and calculates the Merkle tree. - pub fn new(prev: &BlockHeader, - txs: Vec<&Transaction>, - reward_key: SecretKey) - -> Result { + pub fn new( + prev: &BlockHeader, + txs: Vec<&Transaction>, + reward_key: SecretKey, + ) -> Result { let secp = Secp256k1::with_caps(secp::ContextFlag::Commit); let (reward_out, reward_proof) = try!(Block::reward_output(reward_key, &secp)); @@ -248,11 +255,12 @@ impl Block { /// Builds a new block ready to mine from the header of the previous block, /// a vector of transactions and the reward information. Checks /// that all transactions are valid and calculates the Merkle tree. - pub fn with_reward(prev: &BlockHeader, - txs: Vec<&Transaction>, - reward_out: Output, - reward_kern: TxKernel) - -> Result { + pub fn with_reward( + prev: &BlockHeader, + txs: Vec<&Transaction>, + reward_out: Output, + reward_kern: TxKernel, + ) -> Result { // note: the following reads easily but may not be the most efficient due to // repeated iterations, revisit if a problem let secp = Secp256k1::with_caps(secp::ContextFlag::Commit); @@ -264,18 +272,16 @@ impl Block { // build vectors with all inputs and all outputs, ordering them by hash // needs to be a fold so we don't end up with a vector of vectors and we // want to fully own the refs (not just a pointer like flat_map). - let mut inputs = txs.iter() - .fold(vec![], |mut acc, ref tx| { - let mut inputs = tx.inputs.clone(); - acc.append(&mut inputs); - acc - }); - let mut outputs = txs.iter() - .fold(vec![], |mut acc, ref tx| { - let mut outputs = tx.outputs.clone(); - acc.append(&mut outputs); - acc - }); + let mut inputs = txs.iter().fold(vec![], |mut acc, ref tx| { + let mut inputs = tx.inputs.clone(); + acc.append(&mut inputs); + acc + }); + let mut outputs = txs.iter().fold(vec![], |mut acc, ref tx| { + let mut outputs = tx.outputs.clone(); + acc.append(&mut outputs); + acc + }); outputs.push(reward_out); inputs.sort_by_key(|inp| inp.hash()); @@ -283,19 +289,24 @@ impl Block { // calculate the overall Merkle tree and fees - Ok(Block { + Ok( + Block { header: BlockHeader { height: prev.height + 1, - timestamp: time::Tm { tm_nsec: 0, ..time::now_utc() }, + timestamp: time::Tm { + tm_nsec: 0, + ..time::now_utc() + }, previous: prev.hash(), - total_difficulty: prev.pow.clone().to_difficulty() + prev.total_difficulty.clone(), + total_difficulty: prev.pow.clone().to_difficulty() + + prev.total_difficulty.clone(), ..Default::default() }, inputs: inputs, outputs: outputs, kernels: kernels, - } - .compact()) + }.compact(), + ) } @@ -312,37 +323,37 @@ impl Block { /// Matches any output with a potential spending input, eliminating them /// from the block. Provides a simple way to compact the block. The /// elimination is stable with respect to inputs and outputs order. - /// - /// NOTE: exclude coinbase from compaction process - /// if a block contains a new coinbase output and - /// is a transaction spending a previous coinbase - /// we do not want to compact these away - /// + /// + /// NOTE: exclude coinbase from compaction process + /// if a block contains a new coinbase output and + /// is a transaction spending a previous coinbase + /// we do not want to compact these away + /// pub fn compact(&self) -> Block { - let in_set = self.inputs - .iter() - .map(|inp| inp.commitment()) - .collect::>(); + let in_set = self.inputs + .iter() + .map(|inp| inp.commitment()) + .collect::>(); - let out_set = self.outputs - .iter() - .filter(|out| !out.features.contains(COINBASE_OUTPUT)) - .map(|out| out.commitment()) - .collect::>(); + let out_set = self.outputs + .iter() + .filter(|out| !out.features.contains(COINBASE_OUTPUT)) + .map(|out| out.commitment()) + .collect::>(); - let commitments_to_compact = in_set.intersection(&out_set).collect::>(); + let commitments_to_compact = in_set.intersection(&out_set).collect::>(); - let new_inputs = self.inputs - .iter() - .filter(|inp| !commitments_to_compact.contains(&inp.commitment())) - .map(|&inp| inp) - .collect::>(); + let new_inputs = self.inputs + .iter() + .filter(|inp| !commitments_to_compact.contains(&inp.commitment())) + .map(|&inp| inp) + .collect::>(); - let new_outputs = self.outputs - .iter() - .filter(|out| !commitments_to_compact.contains(&out.commitment())) - .map(|&out| out) - .collect::>(); + let new_outputs = self.outputs + .iter() + .filter(|out| !commitments_to_compact.contains(&out.commitment())) + .map(|&out| out) + .collect::>(); Block { header: BlockHeader { @@ -374,18 +385,17 @@ impl Block { all_outputs.sort_by_key(|out| out.hash()); Block { - // compact will fix the merkle tree - header: BlockHeader { - pow: self.header.pow.clone(), - difficulty: self.header.difficulty.clone(), - total_difficulty: self.header.total_difficulty.clone(), - ..self.header - }, - inputs: all_inputs, - outputs: all_outputs, - kernels: all_kernels, - } - .compact() + // compact will fix the merkle tree + header: BlockHeader { + pow: self.header.pow.clone(), + difficulty: self.header.difficulty.clone(), + total_difficulty: self.header.total_difficulty.clone(), + ..self.header + }, + inputs: all_inputs, + outputs: all_outputs, + kernels: all_kernels, + }.compact() } /// Validates all the elements in a block that can be checked without @@ -394,7 +404,7 @@ impl Block { pub fn validate(&self, secp: &Secp256k1) -> Result<(), secp::Error> { self.verify_coinbase(secp)?; self.verify_kernels(secp)?; - Ok(()) + Ok(()) } /// Validate the sum of input/output commitments match the sum in kernels @@ -441,23 +451,25 @@ impl Block { // verifying the kernels on a block composed of just the coinbase outputs // and kernels checks all we need Block { - header: BlockHeader::default(), - inputs: vec![], - outputs: cb_outs, - kernels: cb_kerns, - } - .verify_kernels(secp) + header: BlockHeader::default(), + inputs: vec![], + outputs: cb_outs, + kernels: cb_kerns, + }.verify_kernels(secp) } /// Builds the blinded output and related signature proof for the block /// reward. - pub fn reward_output(skey: secp::key::SecretKey, - secp: &Secp256k1) - -> Result<(Output, TxKernel), secp::Error> { - let msg = try!(secp::Message::from_slice(&[0; secp::constants::MESSAGE_SIZE])); + pub fn reward_output( + skey: secp::key::SecretKey, + secp: &Secp256k1, + ) -> Result<(Output, TxKernel), secp::Error> { + let msg = try!(secp::Message::from_slice( + &[0; secp::constants::MESSAGE_SIZE], + )); let sig = try!(secp.sign(&msg, &skey)); let commit = secp.commit(REWARD, skey).unwrap(); - //let switch_commit = secp.switch_commit(skey).unwrap(); + // let switch_commit = secp.switch_commit(skey).unwrap(); let nonce = secp.nonce(); let rproof = secp.range_proof(0, REWARD, skey, commit, nonce); @@ -560,78 +572,85 @@ mod test { assert_eq!(b3.outputs.len(), 4); } - #[test] - fn empty_block_with_coinbase_is_valid() { - let ref secp = new_secp(); - let b = new_block(vec![], secp); + #[test] + fn empty_block_with_coinbase_is_valid() { + let ref secp = new_secp(); + let b = new_block(vec![], secp); - assert_eq!(b.inputs.len(), 0); - assert_eq!(b.outputs.len(), 1); - assert_eq!(b.kernels.len(), 1); + assert_eq!(b.inputs.len(), 0); + assert_eq!(b.outputs.len(), 1); + assert_eq!(b.kernels.len(), 1); - let coinbase_outputs = b.outputs + let coinbase_outputs = b.outputs .iter() .filter(|out| out.features.contains(COINBASE_OUTPUT)) - .map(|o| o.clone()) + .map(|o| o.clone()) .collect::>(); - assert_eq!(coinbase_outputs.len(), 1); + assert_eq!(coinbase_outputs.len(), 1); - let coinbase_kernels = b.kernels + let coinbase_kernels = b.kernels .iter() .filter(|out| out.features.contains(COINBASE_KERNEL)) - .map(|o| o.clone()) + .map(|o| o.clone()) .collect::>(); - assert_eq!(coinbase_kernels.len(), 1); + assert_eq!(coinbase_kernels.len(), 1); - // the block should be valid here (single coinbase output with corresponding txn kernel) - assert_eq!(b.validate(&secp), Ok(())); - } + // the block should be valid here (single coinbase output with corresponding + // txn kernel) + assert_eq!(b.validate(&secp), Ok(())); + } - #[test] - // test that flipping the COINBASE_OUTPUT flag on the output features - // invalidates the block and specifically it causes verify_coinbase to fail - // additionally verifying the merkle_inputs_outputs also fails - fn remove_coinbase_output_flag() { - let ref secp = new_secp(); - let mut b = new_block(vec![], secp); + #[test] + // test that flipping the COINBASE_OUTPUT flag on the output features + // invalidates the block and specifically it causes verify_coinbase to fail + // additionally verifying the merkle_inputs_outputs also fails + fn remove_coinbase_output_flag() { + let ref secp = new_secp(); + let mut b = new_block(vec![], secp); - assert!(b.outputs[0].features.contains(COINBASE_OUTPUT)); - b.outputs[0].features.remove(COINBASE_OUTPUT); + assert!(b.outputs[0].features.contains(COINBASE_OUTPUT)); + b.outputs[0].features.remove(COINBASE_OUTPUT); - assert_eq!(b.verify_coinbase(&secp), Err(secp::Error::IncorrectCommitSum)); - assert_eq!(b.verify_kernels(&secp), Ok(())); + assert_eq!( + b.verify_coinbase(&secp), + Err(secp::Error::IncorrectCommitSum) + ); + assert_eq!(b.verify_kernels(&secp), Ok(())); - assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum)); - } + assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum)); + } - #[test] - // test that flipping the COINBASE_KERNEL flag on the kernel features - // invalidates the block and specifically it causes verify_coinbase to fail - fn remove_coinbase_kernel_flag() { - let ref secp = new_secp(); - let mut b = new_block(vec![], secp); + #[test] + // test that flipping the COINBASE_KERNEL flag on the kernel features + // invalidates the block and specifically it causes verify_coinbase to fail + fn remove_coinbase_kernel_flag() { + let ref secp = new_secp(); + let mut b = new_block(vec![], secp); - assert!(b.kernels[0].features.contains(COINBASE_KERNEL)); - b.kernels[0].features.remove(COINBASE_KERNEL); + assert!(b.kernels[0].features.contains(COINBASE_KERNEL)); + b.kernels[0].features.remove(COINBASE_KERNEL); - assert_eq!(b.verify_coinbase(&secp), Err(secp::Error::IncorrectCommitSum)); - assert_eq!(b.verify_kernels(&secp), Ok(())); + assert_eq!( + b.verify_coinbase(&secp), + Err(secp::Error::IncorrectCommitSum) + ); + assert_eq!(b.verify_kernels(&secp), Ok(())); - assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum)); - } + assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum)); + } - #[test] - fn serialize_deserialize_block() { - let ref secp = new_secp(); - let b = new_block(vec![], secp); + #[test] + fn serialize_deserialize_block() { + let ref secp = new_secp(); + let b = new_block(vec![], secp); - let mut vec = Vec::new(); - ser::serialize(&mut vec, &b).expect("serialization failed"); - let b2: Block = ser::deserialize(&mut &vec[..]).unwrap(); + let mut vec = Vec::new(); + ser::serialize(&mut vec, &b).expect("serialization failed"); + let b2: Block = ser::deserialize(&mut &vec[..]).unwrap(); - assert_eq!(b.inputs, b2.inputs); - assert_eq!(b.outputs, b2.outputs); - assert_eq!(b.kernels, b2.kernels); - assert_eq!(b.header, b2.header); - } + assert_eq!(b.inputs, b2.inputs); + assert_eq!(b.outputs, b2.outputs); + assert_eq!(b.kernels, b2.kernels); + assert_eq!(b.header, b2.header); + } } diff --git a/core/src/core/build.rs b/core/src/core/build.rs index 060053152..0415cc0f3 100644 --- a/core/src/core/build.rs +++ b/core/src/core/build.rs @@ -112,12 +112,14 @@ pub fn output(value: u64, blinding: SecretKey) -> Box { let commit = build.secp.commit(value, blinding).unwrap(); let nonce = build.secp.nonce(); let rproof = build.secp.range_proof(0, value, blinding, commit, nonce); - (tx.with_output(Output { - features: DEFAULT_OUTPUT, - commit: commit, - proof: rproof, - }), - sum.add(blinding)) + ( + tx.with_output(Output { + features: DEFAULT_OUTPUT, + commit: commit, + proof: rproof, + }), + sum.add(blinding), + ) }) } @@ -130,30 +132,38 @@ pub fn output_rand(value: u64) -> Box { let commit = build.secp.commit(value, blinding).unwrap(); let nonce = build.secp.nonce(); let rproof = build.secp.range_proof(0, value, blinding, commit, nonce); - (tx.with_output(Output { - features: DEFAULT_OUTPUT, - commit: commit, - proof: rproof, - }), - sum.add(blinding)) + ( + tx.with_output(Output { + features: DEFAULT_OUTPUT, + commit: commit, + proof: rproof, + }), + sum.add(blinding), + ) }) } /// Sets the fee on the transaction being built. pub fn with_fee(fee: u64) -> Box { - Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) { (tx.with_fee(fee), sum) }) + Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) { + (tx.with_fee(fee), sum) + }) } /// Sets a known excess value on the transaction being built. Usually used in /// combination with the initial_tx function when a new transaction is built /// by adding to a pre-existing one. pub fn with_excess(excess: SecretKey) -> Box { - Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) { (tx, sum.add(excess)) }) + Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) { + (tx, sum.add(excess)) + }) } /// Sets an initial transaction to add to when building a new transaction. pub fn initial_tx(tx: Transaction) -> Box { - Box::new(move |_build, (_, sum)| -> (Transaction, BlindSum) { (tx.clone(), sum) }) + Box::new(move |_build, (_, sum)| -> (Transaction, BlindSum) { + (tx.clone(), sum) + }) } /// Builds a new transaction by combining all the combinators provided in a @@ -171,8 +181,10 @@ pub fn transaction(elems: Vec>) -> Result<(Transaction, SecretKey), secp: Secp256k1::with_caps(secp::ContextFlag::Commit), rng: OsRng::new().unwrap(), }; - let (mut tx, sum) = elems.iter().fold((Transaction::empty(), BlindSum::new()), - |acc, elem| elem(&mut ctx, acc)); + let (mut tx, sum) = elems.iter().fold( + (Transaction::empty(), BlindSum::new()), + |acc, elem| elem(&mut ctx, acc), + ); let blind_sum = sum.sum(&ctx.secp)?; let msg = secp::Message::from_slice(&u64_to_32bytes(tx.fee))?; @@ -199,9 +211,12 @@ mod test { #[test] fn blind_simple_tx() { let secp = Secp256k1::with_caps(secp::ContextFlag::Commit); - let (tx, _) = - transaction(vec![input_rand(10), input_rand(11), output_rand(20), with_fee(1)]) - .unwrap(); + let (tx, _) = transaction(vec![ + input_rand(10), + input_rand(11), + output_rand(20), + with_fee(1), + ]).unwrap(); tx.verify_sig(&secp).unwrap(); } #[test] diff --git a/core/src/core/hash.rs b/core/src/core/hash.rs index 234553778..b02487095 100644 --- a/core/src/core/hash.rs +++ b/core/src/core/hash.rs @@ -143,9 +143,9 @@ impl HashWriter { /// Consume the `HashWriter`, outputting a `Hash` corresponding to its /// current state pub fn into_hash(self) -> Hash { - let mut res = [0; 32]; + let mut res = [0; 32]; (&mut res).copy_from_slice(self.state.finalize().as_bytes()); - Hash(res) + Hash(res) } } diff --git a/core/src/core/mod.rs b/core/src/core/mod.rs index 6086efad6..8be52840f 100644 --- a/core/src/core/mod.rs +++ b/core/src/core/mod.rs @@ -20,7 +20,7 @@ pub mod hash; pub mod pmmr; pub mod target; pub mod transaction; -//pub mod txoset; +// pub mod txoset; #[allow(dead_code)] use std::fmt; @@ -82,7 +82,7 @@ pub trait Committed { /// Proof of work pub struct Proof { /// The nonces - pub nonces:Vec, + pub nonces: Vec, /// The proof size pub proof_size: usize, @@ -125,9 +125,8 @@ impl Clone for Proof { } impl Proof { - /// Builds a proof with all bytes zeroed out - pub fn new(in_nonces:Vec) -> Proof { + pub fn new(in_nonces: Vec) -> Proof { Proof { proof_size: in_nonces.len(), nonces: in_nonces, @@ -135,10 +134,10 @@ impl Proof { } /// Builds a proof with all bytes zeroed out - pub fn zero(proof_size:usize) -> Proof { + pub fn zero(proof_size: usize) -> Proof { Proof { proof_size: proof_size, - nonces: vec![0;proof_size], + nonces: vec![0; proof_size], } } @@ -251,9 +250,12 @@ mod test { #[test] fn hash_output() { - let (tx, _) = - build::transaction(vec![input_rand(75), output_rand(42), output_rand(32), with_fee(1)]) - .unwrap(); + let (tx, _) = build::transaction(vec![ + input_rand(75), + output_rand(42), + output_rand(32), + with_fee(1), + ]).unwrap(); let h = tx.outputs[0].hash(); assert!(h != ZERO_HASH); let h2 = tx.outputs[1].hash(); @@ -309,9 +311,11 @@ mod test { // From now on, Bob only has the obscured transaction and the sum of // blinding factors. He adds his output, finalizes the transaction so it's // ready for broadcast. - let (tx_final, _) = - build::transaction(vec![initial_tx(tx_alice), with_excess(blind_sum), output_rand(5)]) - .unwrap(); + let (tx_final, _) = build::transaction(vec![ + initial_tx(tx_alice), + with_excess(blind_sum), + output_rand(5), + ]).unwrap(); tx_final.validate(&secp).unwrap(); } @@ -357,8 +361,12 @@ mod test { // utility producing a transaction with 2 inputs and a single outputs pub fn tx2i1o() -> Transaction { - build::transaction(vec![input_rand(10), input_rand(11), output_rand(20), with_fee(1)]) - .map(|(tx, _)| tx) + build::transaction(vec![ + input_rand(10), + input_rand(11), + output_rand(20), + with_fee(1), + ]).map(|(tx, _)| tx) .unwrap() } diff --git a/core/src/core/pmmr.rs b/core/src/core/pmmr.rs index db2263f70..520271285 100644 --- a/core/src/core/pmmr.rs +++ b/core/src/core/pmmr.rs @@ -15,7 +15,11 @@ //! Persistent and prunable Merkle Mountain Range implementation. For a high //! level description of MMRs, see: //! -//! https://github.com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.md +//! https://github. +//! com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range. +//! +//! +//! md //! //! This implementation is built in two major parts: //! @@ -91,7 +95,10 @@ impl Summable for NoSum { return 0; } } -impl Writeable for NoSum where T: Writeable { +impl Writeable for NoSum +where + T: Writeable, +{ fn write(&self, writer: &mut W) -> Result<(), ser::Error> { self.0.write(writer) } @@ -100,14 +107,20 @@ impl Writeable for NoSum where T: Writeable { /// A utility type to handle (Hash, Sum) pairs more conveniently. The addition /// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct HashSum where T: Summable { +pub struct HashSum +where + T: Summable, +{ /// The hash pub hash: Hash, /// The sum pub sum: T::Sum, } -impl HashSum where T: Summable + Hashed { +impl HashSum +where + T: Summable + Hashed, +{ /// Create a hash sum from a summable pub fn from_summable(idx: u64, elmt: &T) -> HashSum { let hash = elmt.hash(); @@ -120,7 +133,10 @@ impl HashSum where T: Summable + Hashed { } } -impl Readable for HashSum where T: Summable { +impl Readable for HashSum +where + T: Summable, +{ fn read(r: &mut Reader) -> Result, ser::Error> { Ok(HashSum { hash: Hash::read(r)?, @@ -129,14 +145,20 @@ impl Readable for HashSum where T: Summable { } } -impl Writeable for HashSum where T: Summable { +impl Writeable for HashSum +where + T: Summable, +{ fn write(&self, w: &mut W) -> Result<(), ser::Error> { self.hash.write(w)?; self.sum.write(w) } } -impl ops::Add for HashSum where T: Summable { +impl ops::Add for HashSum +where + T: Summable, +{ type Output = HashSum; fn add(self, other: HashSum) -> HashSum { HashSum { @@ -150,8 +172,10 @@ impl ops::Add for HashSum where T: Summable { /// The PMMR itself does not need the Backend to be accurate on the existence /// of an element (i.e. remove could be a no-op) but layers above can /// depend on an accurate Backend to check existence. -pub trait Backend where T: Summable { - +pub trait Backend +where + T: Summable, +{ /// Append the provided HashSums to the backend storage. The position of the /// first element of the Vec in the MMR is provided to help the /// implementation. @@ -176,15 +200,22 @@ pub trait Backend where T: Summable { /// Heavily relies on navigation operations within a binary tree. In particular, /// all the implementation needs to keep track of the MMR structure is how far /// we are in the sequence of nodes making up the MMR. -pub struct PMMR<'a, T, B> where T: Summable, B: 'a + Backend { +pub struct PMMR<'a, T, B> +where + T: Summable, + B: 'a + Backend, +{ last_pos: u64, backend: &'a mut B, // only needed for parameterizing Backend summable: PhantomData, } -impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backend { - +impl<'a, T, B> PMMR<'a, T, B> +where + T: Summable + Hashed + Clone, + B: 'a + Backend, +{ /// Build a new prunable Merkle Mountain Range using the provided backend. pub fn new(backend: &'a mut B) -> PMMR { PMMR { @@ -194,7 +225,8 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen } } - /// Build a new prunable Merkle Mountain Range pre-initlialized until last_pos + /// Build a new prunable Merkle Mountain Range pre-initlialized until + /// last_pos /// with the provided backend. pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR { PMMR { @@ -215,7 +247,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen ret = match (ret, peak) { (None, x) => x, (Some(hsum), None) => Some(hsum), - (Some(lhsum), Some(rhsum)) => Some(lhsum + rhsum) + (Some(lhsum), Some(rhsum)) => Some(lhsum + rhsum), } } ret.expect("no root, invalid tree") @@ -234,10 +266,11 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen // height it means we have to build a higher peak by summing with a previous // sibling. we do it iteratively in case the new peak itself allows the // creation of another parent. - while bintree_postorder_height(pos+1) > height { + while bintree_postorder_height(pos + 1) > height { let left_sibling = bintree_jump_left_sibling(pos); - let left_hashsum = self.backend.get(left_sibling) - .expect("missing left sibling in tree, should not have been pruned"); + let left_hashsum = self.backend.get(left_sibling).expect( + "missing left sibling in tree, should not have been pruned", + ); current_hashsum = left_hashsum + current_hashsum; to_append.push(current_hashsum.clone()); @@ -259,7 +292,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen // position is a leaf, which may had some parent that needs to exist // afterward for the MMR to be valid let mut pos = position; - while bintree_postorder_height(pos+1) > 0 { + while bintree_postorder_height(pos + 1) > 0 { pos += 1; } @@ -268,13 +301,14 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen Ok(()) } - /// Prune an element from the tree given its position. Note that to be able to + /// Prune an element from the tree given its position. Note that to be able + /// to /// provide that position and prune, consumers of this API are expected to /// keep an index of elements to positions in the tree. Prunes parent /// nodes as well when they become childless. pub fn prune(&mut self, position: u64, index: u32) -> Result { if let None = self.backend.get(position) { - return Ok(false) + return Ok(false); } let prunable_height = bintree_postorder_height(position); if prunable_height > 0 { @@ -286,7 +320,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen // the tree. let mut to_prune = vec![]; let mut current = position; - while current+1 < self.last_pos { + while current + 1 < self.last_pos { let (parent, sibling) = family(current); if parent > self.last_pos { // can't prune when our parent isn't here yet @@ -330,7 +364,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen print!("{:>8} ", n + 1); } println!(""); - for n in 1..(sz+1) { + for n in 1..(sz + 1) { let ohs = self.get(n); match ohs { Some(hs) => print!("{} ", hs.hash), @@ -345,36 +379,45 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen /// compact the Vector itself but still frees the reference to the /// underlying HashSum. #[derive(Clone)] -pub struct VecBackend where T: Summable + Clone { +pub struct VecBackend +where + T: Summable + Clone, +{ pub elems: Vec>>, } -impl Backend for VecBackend where T: Summable + Clone { +impl Backend for VecBackend +where + T: Summable + Clone, +{ #[allow(unused_variables)] fn append(&mut self, position: u64, data: Vec>) -> Result<(), String> { self.elems.append(&mut map_vec!(data, |d| Some(d.clone()))); Ok(()) } fn get(&self, position: u64) -> Option> { - self.elems[(position-1) as usize].clone() + self.elems[(position - 1) as usize].clone() } fn remove(&mut self, positions: Vec, index: u32) -> Result<(), String> { for n in positions { - self.elems[(n-1) as usize] = None + self.elems[(n - 1) as usize] = None } Ok(()) } #[allow(unused_variables)] fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> { - self.elems = self.elems[0..(position as usize)+1].to_vec(); + self.elems = self.elems[0..(position as usize) + 1].to_vec(); Ok(()) } } -impl VecBackend where T: Summable + Clone { +impl VecBackend +where + T: Summable + Clone, +{ /// Instantiates a new VecBackend pub fn new() -> VecBackend { - VecBackend{elems: vec![]} + VecBackend { elems: vec![] } } /// Current number of HashSum elements in the underlying Vec. @@ -418,7 +461,7 @@ pub struct PruneList { impl PruneList { /// Instantiate a new empty prune list pub fn new() -> PruneList { - PruneList{pruned_nodes: vec![]} + PruneList { pruned_nodes: vec![] } } /// Computes by how many positions a node at pos should be shifted given the @@ -501,7 +544,7 @@ fn peaks(num: u64) -> Vec { // detecting an invalid mountain range, when siblings exist but no parent // exists - if bintree_postorder_height(num+1) > bintree_postorder_height(num) { + if bintree_postorder_height(num + 1) > bintree_postorder_height(num) { return vec![]; } @@ -616,7 +659,7 @@ pub fn family(pos: u64) -> (u64, u64) { let parent: u64; let pos_height = bintree_postorder_height(pos); - let next_height = bintree_postorder_height(pos+1); + let next_height = bintree_postorder_height(pos + 1); if next_height > pos_height { sibling = bintree_jump_left_sibling(pos); parent = pos + 1; @@ -710,15 +753,19 @@ mod test { #[test] #[allow(unused_variables)] fn first_50_mmr_heights() { - let first_100_str = - "0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 \ + let first_100_str = "0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 \ 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 5 \ 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 0 0 1 0 0"; let first_100 = first_100_str.split(' ').map(|n| n.parse::().unwrap()); let mut count = 1; for n in first_100 { - assert_eq!(n, bintree_postorder_height(count), "expected {}, got {}", - n, bintree_postorder_height(count)); + assert_eq!( + n, + bintree_postorder_height(count), + "expected {}, got {}", + n, + bintree_postorder_height(count) + ); count += 1; } } @@ -785,7 +832,13 @@ mod test { let hash = Hashed::hash(&elems[0]); let sum = elems[0].sum(); let node_hash = (1 as u64, &sum, hash).hash(); - assert_eq!(pmmr.root(), HashSum{hash: node_hash, sum: sum}); + assert_eq!( + pmmr.root(), + HashSum { + hash: node_hash, + sum: sum, + } + ); assert_eq!(pmmr.unpruned_size(), 1); // two elements @@ -802,7 +855,8 @@ mod test { // four elements pmmr.push(elems[3]).unwrap(); - let sum4 = sum2 + (HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3])); + let sum4 = sum2 + + (HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3])); assert_eq!(pmmr.root(), sum4); assert_eq!(pmmr.unpruned_size(), 7); @@ -814,7 +868,8 @@ mod test { // six elements pmmr.push(elems[5]).unwrap(); - let sum6 = sum4.clone() + (HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])); + let sum6 = sum4.clone() + + (HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])); assert_eq!(pmmr.root(), sum6.clone()); assert_eq!(pmmr.unpruned_size(), 10); @@ -826,7 +881,9 @@ mod test { // eight elements pmmr.push(elems[7]).unwrap(); - let sum8 = sum4 + ((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) + (HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7]))); + let sum8 = sum4 + + ((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) + + (HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7]))); assert_eq!(pmmr.root(), sum8); assert_eq!(pmmr.unpruned_size(), 15); diff --git a/core/src/core/target.rs b/core/src/core/target.rs index b30cbfab9..8c6b34ac1 100644 --- a/core/src/core/target.rs +++ b/core/src/core/target.rs @@ -59,8 +59,8 @@ impl Difficulty { /// provided hash. pub fn from_hash(h: &Hash) -> Difficulty { let max_target = BigEndian::read_u64(&MAX_TARGET); - //Use the first 64 bits of the given hash - let mut in_vec=h.to_vec(); + // Use the first 64 bits of the given hash + let mut in_vec = h.to_vec(); in_vec.truncate(8); let num = BigEndian::read_u64(&in_vec); Difficulty { num: max_target / num } @@ -121,7 +121,8 @@ impl Readable for Difficulty { impl Serialize for Difficulty { fn serialize(&self, serializer: S) -> Result - where S: Serializer + where + S: Serializer, { serializer.serialize_u64(self.num) } @@ -129,7 +130,8 @@ impl Serialize for Difficulty { impl<'de> Deserialize<'de> for Difficulty { fn deserialize(deserializer: D) -> Result - where D: Deserializer<'de> + where + D: Deserializer<'de>, { deserializer.deserialize_u64(DiffVisitor) } @@ -145,12 +147,16 @@ impl<'de> de::Visitor<'de> for DiffVisitor { } fn visit_str(self, s: &str) -> Result - where E: de::Error + where + E: de::Error, { let num_in = s.parse::(); - if let Err(_)=num_in { - return Err(de::Error::invalid_value(de::Unexpected::Str(s), &"a value number")); - }; + if let Err(_) = num_in { + return Err(de::Error::invalid_value( + de::Unexpected::Str(s), + &"a value number", + )); + }; Ok(Difficulty { num: num_in.unwrap() }) } } diff --git a/core/src/core/transaction.rs b/core/src/core/transaction.rs index ef023c6e5..58fc1889f 100644 --- a/core/src/core/transaction.rs +++ b/core/src/core/transaction.rs @@ -54,11 +54,13 @@ pub struct TxKernel { impl Writeable for TxKernel { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - ser_multiwrite!(writer, - [write_u8, self.features.bits()], - [write_fixed_bytes, &self.excess], - [write_bytes, &self.excess_sig], - [write_u64, self.fee]); + ser_multiwrite!( + writer, + [write_u8, self.features.bits()], + [write_fixed_bytes, &self.excess], + [write_bytes, &self.excess_sig], + [write_u64, self.fee] + ); Ok(()) } } @@ -66,8 +68,9 @@ impl Writeable for TxKernel { impl Readable for TxKernel { fn read(reader: &mut Reader) -> Result { Ok(TxKernel { - features: - KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?, + features: KernelFeatures::from_bits(reader.read_u8()?).ok_or( + ser::Error::CorruptedData, + )?, excess: Commitment::read(reader)?, excess_sig: reader.read_vec()?, fee: reader.read_u64()?, @@ -104,11 +107,13 @@ pub struct Transaction { /// write the transaction as binary. impl Writeable for Transaction { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - ser_multiwrite!(writer, - [write_u64, self.fee], - [write_bytes, &self.excess_sig], - [write_u64, self.inputs.len() as u64], - [write_u64, self.outputs.len() as u64]); + ser_multiwrite!( + writer, + [write_u64, self.fee], + [write_bytes, &self.excess_sig], + [write_u64, self.inputs.len() as u64], + [write_u64, self.outputs.len() as u64] + ); for inp in &self.inputs { try!(inp.write(writer)); } @@ -185,7 +190,10 @@ impl Transaction { pub fn with_input(self, input: Input) -> Transaction { let mut new_ins = self.inputs; new_ins.push(input); - Transaction { inputs: new_ins, ..self } + Transaction { + inputs: new_ins, + ..self + } } /// Builds a new transaction with the provided output added. Existing @@ -193,7 +201,10 @@ impl Transaction { pub fn with_output(self, output: Output) -> Transaction { let mut new_outs = self.outputs; new_outs.push(output); - Transaction { outputs: new_outs, ..self } + Transaction { + outputs: new_outs, + ..self + } } /// Builds a new transaction with the provided fee. @@ -304,9 +315,11 @@ pub struct Output { /// an Output as binary. impl Writeable for Output { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - ser_multiwrite!(writer, - [write_u8, self.features.bits()], - [write_fixed_bytes, &self.commit]); + ser_multiwrite!( + writer, + [write_u8, self.features.bits()], + [write_fixed_bytes, &self.commit] + ); // The hash of an output doesn't include the range proof if writer.serialization_mode() == ser::SerializationMode::Full { writer.write_bytes(&self.proof)? @@ -320,8 +333,9 @@ impl Writeable for Output { impl Readable for Output { fn read(reader: &mut Reader) -> Result { Ok(Output { - features: - OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?, + features: OutputFeatures::from_bits(reader.read_u8()?).ok_or( + ser::Error::CorruptedData, + )?, commit: Commitment::read(reader)?, proof: RangeProof::read(reader)?, }) @@ -341,8 +355,6 @@ impl Output { /// Validates the range proof using the commitment pub fn verify_proof(&self, secp: &Secp256k1) -> Result<(), secp::Error> { - /// secp.verify_range_proof returns range if and only if both min_value and max_value less than 2^64 - /// since group order is much larger (~2^256) we can be sure overflow is not the case secp.verify_range_proof(self.commit, self.proof).map(|_| ()) } } @@ -392,7 +404,10 @@ impl ops::Add for SumCommit { type Output = SumCommit; fn add(self, other: SumCommit) -> SumCommit { - let sum = match self.secp.commit_sum(vec![self.commit.clone(), other.commit.clone()], vec![]) { + let sum = match self.secp.commit_sum( + vec![self.commit.clone(), other.commit.clone()], + vec![], + ) { Ok(s) => s, Err(_) => Commitment::from_vec(vec![1; 33]), }; diff --git a/core/src/global.rs b/core/src/global.rs index fb0d676a5..0a3448ddd 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -21,7 +21,7 @@ /// different sets of parameters for different purposes, /// e.g. CI, User testing, production values -use std::sync::{RwLock}; +use std::sync::RwLock; use consensus::PROOFSIZE; use consensus::DEFAULT_SIZESHIFT; @@ -29,16 +29,16 @@ use consensus::DEFAULT_SIZESHIFT; /// by users /// Automated testing sizeshift -pub const AUTOMATED_TESTING_SIZESHIFT:u8 = 10; +pub const AUTOMATED_TESTING_SIZESHIFT: u8 = 10; /// Automated testing proof size -pub const AUTOMATED_TESTING_PROOF_SIZE:usize = 4; +pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 4; /// User testing sizeshift -pub const USER_TESTING_SIZESHIFT:u8 = 16; +pub const USER_TESTING_SIZESHIFT: u8 = 16; /// User testing proof size -pub const USER_TESTING_PROOF_SIZE:usize = 42; +pub const USER_TESTING_PROOF_SIZE: usize = 42; /// Mining parameter modes #[derive(Debug, Clone, Serialize, Deserialize)] @@ -55,18 +55,19 @@ pub enum MiningParameterMode { lazy_static!{ /// The mining parameter mode - pub static ref MINING_PARAMETER_MODE: RwLock = RwLock::new(MiningParameterMode::Production); + pub static ref MINING_PARAMETER_MODE: RwLock = + RwLock::new(MiningParameterMode::Production); } /// Set the mining mode -pub fn set_mining_mode(mode:MiningParameterMode){ - let mut param_ref=MINING_PARAMETER_MODE.write().unwrap(); - *param_ref=mode; +pub fn set_mining_mode(mode: MiningParameterMode) { + let mut param_ref = MINING_PARAMETER_MODE.write().unwrap(); + *param_ref = mode; } /// The sizeshift pub fn sizeshift() -> u8 { - let param_ref=MINING_PARAMETER_MODE.read().unwrap(); + let param_ref = MINING_PARAMETER_MODE.read().unwrap(); match *param_ref { MiningParameterMode::AutomatedTesting => AUTOMATED_TESTING_SIZESHIFT, MiningParameterMode::UserTesting => USER_TESTING_SIZESHIFT, @@ -76,7 +77,7 @@ pub fn sizeshift() -> u8 { /// The proofsize pub fn proofsize() -> usize { - let param_ref=MINING_PARAMETER_MODE.read().unwrap(); + let param_ref = MINING_PARAMETER_MODE.read().unwrap(); match *param_ref { MiningParameterMode::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE, MiningParameterMode::UserTesting => USER_TESTING_PROOF_SIZE, @@ -86,8 +87,8 @@ pub fn proofsize() -> usize { /// Are we in automated testing mode? pub fn is_automated_testing_mode() -> bool { - let param_ref=MINING_PARAMETER_MODE.read().unwrap(); - if let MiningParameterMode::AutomatedTesting=*param_ref { + let param_ref = MINING_PARAMETER_MODE.read().unwrap(); + if let MiningParameterMode::AutomatedTesting = *param_ref { return true; } else { return false; @@ -96,8 +97,8 @@ pub fn is_automated_testing_mode() -> bool { /// Are we in production mode? pub fn is_production_mode() -> bool { - let param_ref=MINING_PARAMETER_MODE.read().unwrap(); - if let MiningParameterMode::Production=*param_ref { + let param_ref = MINING_PARAMETER_MODE.read().unwrap(); + if let MiningParameterMode::Production = *param_ref { return true; } else { return false; @@ -105,30 +106,72 @@ pub fn is_production_mode() -> bool { } -/// Helper function to get a nonce known to create a valid POW on +/// Helper function to get a nonce known to create a valid POW on /// the genesis block, to prevent it taking ages. Should be fine for now -/// as the genesis block POW solution turns out to be the same for every new block chain +/// as the genesis block POW solution turns out to be the same for every new +/// block chain /// at the moment pub fn get_genesis_nonce() -> u64 { - let param_ref=MINING_PARAMETER_MODE.read().unwrap(); + let param_ref = MINING_PARAMETER_MODE.read().unwrap(); match *param_ref { - MiningParameterMode::AutomatedTesting => 0, //won't make a difference - MiningParameterMode::UserTesting => 22141, //Magic nonce for current genesis block at cuckoo16 - MiningParameterMode::Production => 1429942738856787200, //Magic nonce for current genesis at cuckoo30 + // won't make a difference + MiningParameterMode::AutomatedTesting => 0, + // Magic nonce for current genesis block at cuckoo16 + MiningParameterMode::UserTesting => 22141, + // Magic nonce for current genesis at cuckoo30 + MiningParameterMode::Production => 1429942738856787200, } } -/// Returns the genesis POW for production, because it takes far too long to mine at production values +/// Returns the genesis POW for production, because it takes far too long to +/// mine at production values /// using the internal miner -pub fn get_genesis_pow() -> [u32;42]{ - //TODO: This is diff 26, probably just want a 10: mine one - [7444824, 11926557, 28520390, 30594072, 50854023, 52797085, 57882033, - 59816511, 61404804, 84947619, 87779345, 115270337, 162618676, - 166860710, 178656003, 178971372, 200454733, 209197630, 221231015, - 228598741, 241012783, 245401183, 279080304, 295848517, 327300943, - 329741709, 366394532, 382493153, 389329248, 404353381, 406012911, - 418813499, 426573907, 452566575, 456930760, 463021458, 474340589, - 476248039, 478197093, 487576917, 495653489, 501862896] +pub fn get_genesis_pow() -> [u32; 42] { + // TODO: This is diff 26, probably just want a 10: mine one + [ + 7444824, + 11926557, + 28520390, + 30594072, + 50854023, + 52797085, + 57882033, + 59816511, + 61404804, + 84947619, + 87779345, + 115270337, + 162618676, + 166860710, + 178656003, + 178971372, + 200454733, + 209197630, + 221231015, + 228598741, + 241012783, + 245401183, + 279080304, + 295848517, + 327300943, + 329741709, + 366394532, + 382493153, + 389329248, + 404353381, + 406012911, + 418813499, + 426573907, + 452566575, + 456930760, + 463021458, + 474340589, + 476248039, + 478197093, + 487576917, + 495653489, + 501862896, + ] } diff --git a/core/src/ser.rs b/core/src/ser.rs index 46db4e249..dbcd53a19 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -55,9 +55,10 @@ impl fmt::Display for Error { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { match *self { Error::IOErr(ref e) => write!(f, "{}", e), - Error::UnexpectedData { expected: ref e, received: ref r } => { - write!(f, "expected {:?}, got {:?}", e, r) - } + Error::UnexpectedData { + expected: ref e, + received: ref r, + } => write!(f, "expected {:?}, got {:?}", e, r), Error::CorruptedData => f.write_str("corrupted data"), Error::TooLargeReadErr => f.write_str("too large read"), } @@ -75,7 +76,10 @@ impl error::Error for Error { fn description(&self) -> &str { match *self { Error::IOErr(ref e) => error::Error::description(e), - Error::UnexpectedData { expected: _, received: _ } => "unexpected data", + Error::UnexpectedData { + expected: _, + received: _, + } => "unexpected data", Error::CorruptedData => "corrupted data", Error::TooLargeReadErr => "too large read", } @@ -180,7 +184,8 @@ pub trait Writeable { /// Reads directly to a Reader, a utility type thinly wrapping an /// underlying Read implementation. pub trait Readable - where Self: Sized +where + Self: Sized, { /// Reads the data necessary to this Readable from the provided reader fn read(reader: &mut Reader) -> Result; @@ -245,7 +250,9 @@ impl<'a> Reader for BinReader<'a> { return Err(Error::TooLargeReadErr); } let mut buf = vec![0; length]; - self.source.read_exact(&mut buf).map(move |_| buf).map_err(Error::IOErr) + self.source.read_exact(&mut buf).map(move |_| buf).map_err( + Error::IOErr, + ) } fn expect_u8(&mut self, val: u8) -> Result { @@ -338,14 +345,19 @@ impl_int!(u32, write_u32, read_u32); impl_int!(u64, write_u64, read_u64); impl_int!(i64, write_i64, read_i64); -impl Readable for Vec where T: Readable { +impl Readable for Vec +where + T: Readable, +{ fn read(reader: &mut Reader) -> Result, Error> { let mut buf = Vec::new(); loop { let elem = T::read(reader); match elem { Ok(e) => buf.push(e), - Err(Error::IOErr(ref ioerr)) if ioerr.kind() == io::ErrorKind::UnexpectedEof => break, + Err(Error::IOErr(ref ioerr)) if ioerr.kind() == io::ErrorKind::UnexpectedEof => { + break + } Err(e) => return Err(e), } } @@ -353,7 +365,10 @@ impl Readable for Vec where T: Readable { } } -impl Writeable for Vec where T: Writeable { +impl Writeable for Vec +where + T: Writeable, +{ fn write(&self, writer: &mut W) -> Result<(), Error> { for elmt in self { elmt.write(writer)?; @@ -400,18 +415,22 @@ impl Writeable for (A, B impl Readable for (A, B, C) { fn read(reader: &mut Reader) -> Result<(A, B, C), Error> { - Ok((try!(Readable::read(reader)), - try!(Readable::read(reader)), - try!(Readable::read(reader)))) + Ok(( + try!(Readable::read(reader)), + try!(Readable::read(reader)), + try!(Readable::read(reader)), + )) } } impl Readable for (A, B, C, D) { fn read(reader: &mut Reader) -> Result<(A, B, C, D), Error> { - Ok((try!(Readable::read(reader)), - try!(Readable::read(reader)), - try!(Readable::read(reader)), - try!(Readable::read(reader)))) + Ok(( + try!(Readable::read(reader)), + try!(Readable::read(reader)), + try!(Readable::read(reader)), + try!(Readable::read(reader)), + )) } } diff --git a/grin/src/adapters.rs b/grin/src/adapters.rs index 259a692cd..6eaed4d36 100644 --- a/grin/src/adapters.rs +++ b/grin/src/adapters.rs @@ -27,7 +27,7 @@ use secp::pedersen::Commitment; use util::OneTime; use store; use sync; -use core::global::{MiningParameterMode,MINING_PARAMETER_MODE}; +use core::global::{MiningParameterMode, MINING_PARAMETER_MODE}; /// Implementation of the NetAdapter for the blockchain. Gets notified when new /// blocks and transactions are received and forwards to the chain and pool @@ -56,7 +56,7 @@ impl NetAdapter for NetToChainAdapter { } fn block_received(&self, b: core::Block) { - let bhash = b.hash(); + let bhash = b.hash(); debug!("Received block {} from network, going to process.", bhash); // pushing the new block through the chain pipeline @@ -81,10 +81,12 @@ impl NetAdapter for NetToChainAdapter { added_hs.push(bh.hash()); } Err(chain::Error::Unfit(s)) => { - info!("Received unfit block header {} at {}: {}.", - bh.hash(), - bh.height, - s); + info!( + "Received unfit block header {} at {}: {}.", + bh.hash(), + bh.height, + s + ); } Err(chain::Error::StoreErr(e)) => { error!("Store error processing block header {}: {:?}", bh.hash(), e); @@ -150,7 +152,11 @@ impl NetAdapter for NetToChainAdapter { /// Find good peers we know with the provided capability and return their /// addresses. fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec { - let peers = self.peer_store.find_peers(State::Healthy, capab, p2p::MAX_PEER_ADDRS as usize); + let peers = self.peer_store.find_peers( + State::Healthy, + capab, + p2p::MAX_PEER_ADDRS as usize, + ); debug!("Got {} peer addrs to send.", peers.len()); map_vec!(peers, |p| p.addr) } @@ -192,10 +198,11 @@ impl NetAdapter for NetToChainAdapter { } impl NetToChainAdapter { - pub fn new(chain_ref: Arc, - tx_pool: Arc>>, - peer_store: Arc) - -> NetToChainAdapter { + pub fn new( + chain_ref: Arc, + tx_pool: Arc>>, + peer_store: Arc, + ) -> NetToChainAdapter { NetToChainAdapter { chain: chain_ref, peer_store: peer_store, @@ -209,13 +216,15 @@ impl NetToChainAdapter { pub fn start_sync(&self, sync: sync::Syncer) { let arc_sync = Arc::new(sync); self.syncer.init(arc_sync.clone()); - let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(move || { - let sync_run_result = arc_sync.run(); - match sync_run_result { - Ok(_) => {} - Err(_) => {} - } - }); + let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn( + move || { + let sync_run_result = arc_sync.run(); + match sync_run_result { + Ok(_) => {} + Err(_) => {} + } + }, + ); match spawn_result { Ok(_) => {} Err(_) => {} @@ -229,7 +238,7 @@ impl NetToChainAdapter { } else { chain::NONE }; - let param_ref=MINING_PARAMETER_MODE.read().unwrap(); + let param_ref = MINING_PARAMETER_MODE.read().unwrap(); let opts = match *param_ref { MiningParameterMode::AutomatedTesting => opts | chain::EASY_POW, MiningParameterMode::UserTesting => opts | chain::EASY_POW, @@ -251,9 +260,11 @@ impl ChainAdapter for ChainToPoolAndNetAdapter { fn block_accepted(&self, b: &core::Block) { { if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) { - error!("Pool could not update itself at block {}: {:?}", - b.hash(), - e); + error!( + "Pool could not update itself at block {}: {:?}", + b.hash(), + e + ); } } self.p2p.borrow().broadcast_block(b); @@ -261,8 +272,9 @@ impl ChainAdapter for ChainToPoolAndNetAdapter { } impl ChainToPoolAndNetAdapter { - pub fn new(tx_pool: Arc>>) - -> ChainToPoolAndNetAdapter { + pub fn new( + tx_pool: Arc>>, + ) -> ChainToPoolAndNetAdapter { ChainToPoolAndNetAdapter { tx_pool: tx_pool, p2p: OneTime::new(), @@ -294,21 +306,28 @@ impl PoolToChainAdapter { impl pool::BlockChain for PoolToChainAdapter { fn get_unspent(&self, output_ref: &Commitment) -> Result { - self.chain.borrow().get_unspent(output_ref) - .map_err(|e| match e { + self.chain.borrow().get_unspent(output_ref).map_err( + |e| match e { chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound, chain::types::Error::OutputSpent => pool::PoolError::OutputSpent, _ => pool::PoolError::GenericPoolError, - }) + }, + ) } - fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result { - self.chain.borrow().get_block_header_by_output_commit(commit) + fn get_block_header_by_output_commit( + &self, + commit: &Commitment, + ) -> Result { + self.chain + .borrow() + .get_block_header_by_output_commit(commit) .map_err(|_| pool::PoolError::GenericPoolError) } fn head_header(&self) -> Result { - self.chain.borrow().head_header() - .map_err(|_| pool::PoolError::GenericPoolError) + self.chain.borrow().head_header().map_err(|_| { + pool::PoolError::GenericPoolError + }) } } diff --git a/grin/src/lib.rs b/grin/src/lib.rs index 9f141450a..ae4783216 100644 --- a/grin/src/lib.rs +++ b/grin/src/lib.rs @@ -55,5 +55,5 @@ mod sync; mod types; mod miner; -pub use server::{Server}; +pub use server::Server; pub use types::{ServerConfig, Seeding, ServerStats}; diff --git a/grin/src/miner.rs b/grin/src/miner.rs index 595e8be00..11ad92224 100644 --- a/grin/src/miner.rs +++ b/grin/src/miner.rs @@ -231,7 +231,7 @@ impl Miner { next_stat_output = time::get_time().sec + stat_output_interval; } } - //avoid busy wait + // avoid busy wait let sleep_dur = std::time::Duration::from_millis(100); thread::sleep(sleep_dur); } @@ -540,7 +540,9 @@ impl Miner { b.header.nonce = rng.gen(); b.header.difficulty = difficulty; b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0)); - self.chain.set_sumtree_roots(&mut b).expect("Error setting sum tree roots"); + self.chain.set_sumtree_roots(&mut b).expect( + "Error setting sum tree roots", + ); b } diff --git a/grin/src/seed.rs b/grin/src/seed.rs index bf09f9d4f..996a6d5a2 100644 --- a/grin/src/seed.rs +++ b/grin/src/seed.rs @@ -44,10 +44,11 @@ pub struct Seeder { } impl Seeder { - pub fn new(capabilities: p2p::Capabilities, - peer_store: Arc, - p2p: Arc) - -> Seeder { + pub fn new( + capabilities: p2p::Capabilities, + peer_store: Arc, + p2p: Arc, + ) -> Seeder { Seeder { peer_store: peer_store, p2p: p2p, @@ -55,17 +56,20 @@ impl Seeder { } } - pub fn connect_and_monitor(&self, - h: reactor::Handle, - seed_list: Box, Error = String>>) { + pub fn connect_and_monitor( + &self, + h: reactor::Handle, + seed_list: Box, Error = String>>, + ) { // open a channel with a listener that connects every peer address sent below // max peer count let (tx, rx) = futures::sync::mpsc::unbounded(); h.spawn(self.listen_for_addrs(h.clone(), rx)); // check seeds and start monitoring connections - let seeder = self.connect_to_seeds(tx.clone(), seed_list) - .join(self.monitor_peers(tx.clone())); + let seeder = self.connect_to_seeds(tx.clone(), seed_list).join( + self.monitor_peers(tx.clone()), + ); h.spawn(seeder.map(|_| ()).map_err(|e| { error!("Seeding or peer monitoring error: {}", e); @@ -73,9 +77,10 @@ impl Seeder { })); } - fn monitor_peers(&self, - tx: mpsc::UnboundedSender) - -> Box> { + fn monitor_peers( + &self, + tx: mpsc::UnboundedSender, + ) -> Box> { let peer_store = self.peer_store.clone(); let p2p_server = self.p2p.clone(); @@ -91,8 +96,8 @@ impl Seeder { for p in disconnected { if p.is_banned() { debug!("Marking peer {} as banned.", p.info.addr); - let update_result = peer_store.update_state( - p.info.addr, p2p::State::Banned); + let update_result = + peer_store.update_state(p.info.addr, p2p::State::Banned); match update_result { Ok(()) => {} Err(_) => {} @@ -102,9 +107,11 @@ impl Seeder { // we don't have enough peers, getting more from db if p2p_server.peer_count() < PEER_PREFERRED_COUNT { - let mut peers = peer_store.find_peers(p2p::State::Healthy, - p2p::UNKNOWN, - (2 * PEER_MAX_COUNT) as usize); + let mut peers = peer_store.find_peers( + p2p::State::Healthy, + p2p::UNKNOWN, + (2 * PEER_MAX_COUNT) as usize, + ); peers.retain(|p| !p2p_server.is_known(p.addr)); if peers.len() > 0 { debug!("Got {} more peers from db, trying to connect.", peers.len()); @@ -124,20 +131,24 @@ impl Seeder { // Check if we have any pre-existing peer in db. If so, start with those, // otherwise use the seeds provided. - fn connect_to_seeds(&self, - tx: mpsc::UnboundedSender, - seed_list: Box, Error = String>>) - -> Box> { + fn connect_to_seeds( + &self, + tx: mpsc::UnboundedSender, + seed_list: Box, Error = String>>, + ) -> Box> { let peer_store = self.peer_store.clone(); // a thread pool is required so we don't block the event loop with a // db query let thread_pool = cpupool::CpuPool::new(1); - let seeder = thread_pool.spawn_fn(move || { + let seeder = thread_pool + .spawn_fn(move || { // check if we have some peers in db - let peers = peer_store.find_peers(p2p::State::Healthy, - p2p::FULL_HIST, - (2 * PEER_MAX_COUNT) as usize); + let peers = peer_store.find_peers( + p2p::State::Healthy, + p2p::FULL_HIST, + (2 * PEER_MAX_COUNT) as usize, + ); Ok(peers) }) .and_then(|mut peers| { @@ -168,10 +179,11 @@ impl Seeder { /// addresses to and initiate a connection if the max peer count isn't /// exceeded. A request for more peers is also automatically sent after /// connection. - fn listen_for_addrs(&self, - h: reactor::Handle, - rx: mpsc::UnboundedReceiver) - -> Box> { + fn listen_for_addrs( + &self, + h: reactor::Handle, + rx: mpsc::UnboundedReceiver, + ) -> Box> { let capab = self.capabilities; let p2p_store = self.peer_store.clone(); let p2p_server = self.p2p.clone(); @@ -180,11 +192,13 @@ impl Seeder { debug!("New peer address to connect to: {}.", peer_addr); let inner_h = h.clone(); if p2p_server.peer_count() < PEER_MAX_COUNT { - connect_and_req(capab, - p2p_store.clone(), - p2p_server.clone(), - inner_h, - peer_addr) + connect_and_req( + capab, + p2p_store.clone(), + p2p_server.clone(), + inner_h, + peer_addr, + ) } else { Box::new(future::ok(())) } @@ -201,7 +215,8 @@ pub fn web_seeds(h: reactor::Handle) -> Box, Error let client = hyper::Client::new(&h); // http get, filtering out non 200 results - client.get(url) + client + .get(url) .map_err(|e| e.to_string()) .and_then(|res| { if res.status() != hyper::Ok { @@ -211,14 +226,17 @@ pub fn web_seeds(h: reactor::Handle) -> Box, Error }) .and_then(|res| { // collect all chunks and split around whitespace to get a list of SocketAddr - res.body().collect().map_err(|e| e.to_string()).and_then(|chunks| { - let res = chunks.iter().fold("".to_string(), |acc, ref chunk| { - acc + str::from_utf8(&chunk[..]).unwrap() - }); - let addrs = - res.split_whitespace().map(|s| s.parse().unwrap()).collect::>(); - Ok(addrs) - }) + res.body().collect().map_err(|e| e.to_string()).and_then( + |chunks| { + let res = chunks.iter().fold("".to_string(), |acc, ref chunk| { + acc + str::from_utf8(&chunk[..]).unwrap() + }); + let addrs = res.split_whitespace() + .map(|s| s.parse().unwrap()) + .collect::>(); + Ok(addrs) + }, + ) }) }); Box::new(seeds) @@ -226,40 +244,47 @@ pub fn web_seeds(h: reactor::Handle) -> Box, Error /// Convenience function when the seed list is immediately known. Mostly used /// for tests. -pub fn predefined_seeds(addrs_str: Vec) - -> Box, Error = String>> { - let seeds = future::ok(()) - .and_then(move |_| Ok(addrs_str.iter().map(|s| s.parse().unwrap()).collect::>())); +pub fn predefined_seeds( + addrs_str: Vec, +) -> Box, Error = String>> { + let seeds = future::ok(()).and_then(move |_| { + Ok( + addrs_str + .iter() + .map(|s| s.parse().unwrap()) + .collect::>(), + ) + }); Box::new(seeds) } -fn connect_and_req(capab: p2p::Capabilities, - peer_store: Arc, - p2p: Arc, - h: reactor::Handle, - addr: SocketAddr) - -> Box> { - let fut = p2p.connect_peer(addr, h) - .then(move |p| { - match p { - Ok(Some(p)) => { - let peer_result = p.send_peer_request(capab); - match peer_result { - Ok(()) => {} - Err(_) => {} - } +fn connect_and_req( + capab: p2p::Capabilities, + peer_store: Arc, + p2p: Arc, + h: reactor::Handle, + addr: SocketAddr, +) -> Box> { + let fut = p2p.connect_peer(addr, h).then(move |p| { + match p { + Ok(Some(p)) => { + let peer_result = p.send_peer_request(capab); + match peer_result { + Ok(()) => {} + Err(_) => {} } - Err(e) => { - error!("Peer request error: {:?}", e); - let update_result = peer_store.update_state(addr, p2p::State::Defunct); - match update_result { - Ok(()) => {} - Err(_) => {} - } - } - _ => {} } - Ok(()) - }); + Err(e) => { + error!("Peer request error: {:?}", e); + let update_result = peer_store.update_state(addr, p2p::State::Defunct); + match update_result { + Ok(()) => {} + Err(_) => {} + } + } + _ => {} + } + Ok(()) + }); Box::new(fut) } diff --git a/grin/src/server.rs b/grin/src/server.rs index 7950fdf75..a17edf582 100644 --- a/grin/src/server.rs +++ b/grin/src/server.rs @@ -79,35 +79,47 @@ impl Server { pub fn future(mut config: ServerConfig, evt_handle: &reactor::Handle) -> Result { let pool_adapter = Arc::new(PoolToChainAdapter::new()); - let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(pool_adapter.clone()))); + let tx_pool = Arc::new(RwLock::new( + pool::TransactionPool::new(pool_adapter.clone()), + )); let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(tx_pool.clone())); let mut genesis_block = None; - if !chain::Chain::chain_exists(config.db_root.clone()){ - genesis_block=pow::mine_genesis_block(config.mining_config.clone()); + if !chain::Chain::chain_exists(config.db_root.clone()) { + genesis_block = pow::mine_genesis_block(config.mining_config.clone()); } - let shared_chain = Arc::new(chain::Chain::init(config.db_root.clone(), - chain_adapter.clone(), - genesis_block, - pow::verify_size)?); - + let shared_chain = Arc::new(chain::Chain::init( + config.db_root.clone(), + chain_adapter.clone(), + genesis_block, + pow::verify_size, + )?); + pool_adapter.set_chain(shared_chain.clone()); let peer_store = Arc::new(p2p::PeerStore::new(config.db_root.clone())?); - let net_adapter = Arc::new(NetToChainAdapter::new(shared_chain.clone(), - tx_pool.clone(), - peer_store.clone())); - let p2p_server = - Arc::new(p2p::Server::new(config.capabilities, config.p2p_config.unwrap(), net_adapter.clone())); + let net_adapter = Arc::new(NetToChainAdapter::new( + shared_chain.clone(), + tx_pool.clone(), + peer_store.clone(), + )); + let p2p_server = Arc::new(p2p::Server::new( + config.capabilities, + config.p2p_config.unwrap(), + net_adapter.clone(), + )); chain_adapter.init(p2p_server.clone()); let seed = seed::Seeder::new(config.capabilities, peer_store.clone(), p2p_server.clone()); match config.seeding_type.clone() { Seeding::None => {} Seeding::List => { - seed.connect_and_monitor(evt_handle.clone(), seed::predefined_seeds(config.seeds.as_mut().unwrap().clone())); + seed.connect_and_monitor( + evt_handle.clone(), + seed::predefined_seeds(config.seeds.as_mut().unwrap().clone()), + ); } Seeding::WebStatic => { seed.connect_and_monitor(evt_handle.clone(), seed::web_seeds(evt_handle.clone())); @@ -121,9 +133,11 @@ impl Server { info!("Starting rest apis at: {}", &config.api_http_addr); - api::start_rest_apis(config.api_http_addr.clone(), - shared_chain.clone(), - tx_pool.clone()); + api::start_rest_apis( + config.api_http_addr.clone(), + shared_chain.clone(), + tx_pool.clone(), + ); warn!("Grin server started."); Ok(Server { @@ -138,7 +152,12 @@ impl Server { /// Asks the server to connect to a peer at the provided network address. pub fn connect_peer(&self, addr: SocketAddr) -> Result<(), Error> { let handle = self.evt_handle.clone(); - handle.spawn(self.p2p.connect_peer(addr, handle.clone()).map(|_| ()).map_err(|_| ())); + handle.spawn( + self.p2p + .connect_peer(addr, handle.clone()) + .map(|_| ()) + .map_err(|_| ()), + ); Ok(()) } @@ -154,7 +173,7 @@ impl Server { let proof_size = global::proofsize(); let mut miner = miner::Miner::new(config.clone(), self.chain.clone(), self.tx_pool.clone()); - miner.set_debug_output_id(format!("Port {}",self.config.p2p_config.unwrap().port)); + miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.unwrap().port)); thread::spawn(move || { miner.run_loop(config.clone(), cuckoo_size as u32, proof_size); }); @@ -165,12 +184,14 @@ impl Server { self.chain.head().unwrap() } - /// Returns a set of stats about this server. This and the ServerStats structure - /// can be updated over time to include any information needed by tests or other + /// Returns a set of stats about this server. This and the ServerStats + /// structure + /// can be updated over time to include any information needed by tests or + /// other /// consumers - pub fn get_server_stats(&self) -> Result{ - Ok(ServerStats{ + pub fn get_server_stats(&self) -> Result { + Ok(ServerStats { peer_count: self.peer_count(), head: self.head(), }) diff --git a/grin/src/sync.rs b/grin/src/sync.rs index f59e53341..7bc582d49 100644 --- a/grin/src/sync.rs +++ b/grin/src/sync.rs @@ -129,8 +129,10 @@ impl Syncer { prev_h = header.previous; } - debug!("Added {} full block hashes to download.", - blocks_to_download.len()); + debug!( + "Added {} full block hashes to download.", + blocks_to_download.len() + ); Ok(()) } @@ -141,7 +143,8 @@ impl Syncer { if blocks_downloading.len() > MAX_BODY_DOWNLOADS { // clean up potentially dead downloads let twenty_sec_ago = Instant::now() - Duration::from_secs(20); - blocks_downloading.iter() + blocks_downloading + .iter() .position(|&h| h.1 < twenty_sec_ago) .map(|n| blocks_downloading.remove(n)); } else { @@ -158,8 +161,10 @@ impl Syncer { } blocks_downloading.push((h, Instant::now())); } - debug!("Requesting more full block hashes to download, total: {}.", - blocks_to_download.len()); + debug!( + "Requesting more full block hashes to download, total: {}.", + blocks_to_download.len() + ); } } @@ -181,10 +186,12 @@ impl Syncer { let peer = self.p2p.most_work_peer(); let locator = self.get_locator(&tip)?; if let Some(p) = peer { - debug!("Asking peer {} for more block headers starting from {} at {}.", - p.info.addr, - tip.last_block_h, - tip.height); + debug!( + "Asking peer {} for more block headers starting from {} at {}.", + p.info.addr, + tip.last_block_h, + tip.height + ); p.send_header_request(locator)?; } else { warn!("Could not get most worked peer to request headers."); diff --git a/grin/src/types.rs b/grin/src/types.rs index 296300617..6465ccb7b 100644 --- a/grin/src/types.rs +++ b/grin/src/types.rs @@ -119,12 +119,10 @@ impl Default for ServerConfig { /// /// /// - #[derive(Clone)] pub struct ServerStats { /// Number of peers - pub peer_count:u32, + pub peer_count: u32, /// Chain head pub head: chain::Tip, } - diff --git a/p2p/src/conn.rs b/p2p/src/conn.rs index cc69cb41e..555e5cdc1 100644 --- a/p2p/src/conn.rs +++ b/p2p/src/conn.rs @@ -42,22 +42,26 @@ pub trait Handler: Sync + Send { /// Handle function to implement to process incoming messages. A sender to /// reply immediately as well as the message header and its unparsed body /// are provided. - fn handle(&self, - sender: UnboundedSender>, - header: MsgHeader, - body: Vec) - -> Result, ser::Error>; + fn handle( + &self, + sender: UnboundedSender>, + header: MsgHeader, + body: Vec, + ) -> Result, ser::Error>; } impl Handler for F - where F: Fn(UnboundedSender>, MsgHeader, Vec) -> Result, ser::Error>, - F: Sync + Send +where + F: Fn(UnboundedSender>, MsgHeader, Vec) + -> Result, ser::Error>, + F: Sync + Send, { - fn handle(&self, - sender: UnboundedSender>, - header: MsgHeader, - body: Vec) - -> Result, ser::Error> { + fn handle( + &self, + sender: UnboundedSender>, + header: MsgHeader, + body: Vec, + ) -> Result, ser::Error> { self(sender, header, body) } } @@ -87,10 +91,12 @@ impl Connection { /// Start listening on the provided connection and wraps it. Does not hang /// the current thread, instead just returns a future and the Connection /// itself. - pub fn listen(conn: TcpStream, - handler: F) - -> (Connection, Box>) - where F: Handler + 'static + pub fn listen( + conn: TcpStream, + handler: F, + ) -> (Connection, Box>) + where + F: Handler + 'static, { let (reader, writer) = conn.split(); @@ -105,7 +111,9 @@ impl Connection { // same for closing the connection let (close_tx, close_rx) = futures::sync::mpsc::channel(1); - let close_conn = close_rx.for_each(|_| Ok(())).map_err(|_| Error::ConnectionClose); + let close_conn = close_rx.for_each(|_| Ok(())).map_err( + |_| Error::ConnectionClose, + ); let me = Connection { outbound_chan: tx.clone(), @@ -123,21 +131,25 @@ impl Connection { let write_msg = me.write_msg(rx, writer).map(|_| ()); // select between our different futures and return them - let fut = - Box::new(close_conn.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e)) + let fut = Box::new( + close_conn + .select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e)) .map(|_| ()) - .map_err(|(e, _)| e)); + .map_err(|(e, _)| e), + ); (me, fut) } /// Prepares the future that gets message data produced by our system and /// sends it to the peer connection - fn write_msg(&self, - rx: UnboundedReceiver>, - writer: W) - -> Box> - where W: AsyncWrite + 'static + fn write_msg( + &self, + rx: UnboundedReceiver>, + writer: W, + ) -> Box> + where + W: AsyncWrite + 'static, { let sent_bytes = self.sent_bytes.clone(); @@ -158,13 +170,15 @@ impl Connection { /// Prepares the future reading from the peer connection, parsing each /// message and forwarding them appropriately based on their type - fn read_msg(&self, - sender: UnboundedSender>, - reader: R, - handler: F) - -> Box> - where F: Handler + 'static, - R: AsyncRead + 'static + fn read_msg( + &self, + sender: UnboundedSender>, + reader: R, + handler: F, + ) -> Box> + where + F: Handler + 'static, + R: AsyncRead + 'static, { // infinite iterator stream so we repeat the message reading logic until the @@ -218,10 +232,15 @@ impl Connection { let mut body_data = vec![]; try!(ser::serialize(&mut body_data, body)); let mut data = vec![]; - try!(ser::serialize(&mut data, &MsgHeader::new(t, body_data.len() as u64))); + try!(ser::serialize( + &mut data, + &MsgHeader::new(t, body_data.len() as u64), + )); data.append(&mut body_data); - self.outbound_chan.send(data).map_err(|_| Error::ConnectionClose) + self.outbound_chan.send(data).map_err( + |_| Error::ConnectionClose, + ) } /// Bytes sent and received by this peer to the remote peer. @@ -242,10 +261,12 @@ pub struct TimeoutConnection { impl TimeoutConnection { /// Same as Connection - pub fn listen(conn: TcpStream, - handler: F) - -> (TimeoutConnection, Box>) - where F: Handler + 'static + pub fn listen( + conn: TcpStream, + handler: F, + ) -> (TimeoutConnection, Box>) + where + F: Handler + 'static, { let expects = Arc::new(Mutex::new(vec![])); @@ -258,7 +279,8 @@ impl TimeoutConnection { let recv_h = try!(handler.handle(sender, header, data)); let mut expects = exp.lock().unwrap(); - let filtered = expects.iter() + let filtered = expects + .iter() .filter(|&&(typ, h, _): &&(Type, Option, Instant)| { msg_type != typ || h.is_some() && recv_h != h }) @@ -288,17 +310,21 @@ impl TimeoutConnection { underlying: conn, expected_responses: expects, }; - (me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1))) + ( + me, + Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)), + ) } /// Sends a request and registers a timer on the provided message type and /// optionally the hash of the sent data. - pub fn send_request(&self, - t: Type, - rt: Type, - body: &W, - expect_h: Option<(Hash)>) - -> Result<(), Error> { + pub fn send_request( + &self, + t: Type, + rt: Type, + body: &W, + expect_h: Option<(Hash)>, + ) -> Result<(), Error> { let _sent = try!(self.underlying.send_msg(t, body)); let mut expects = self.expected_responses.lock().unwrap(); diff --git a/p2p/src/handshake.rs b/p2p/src/handshake.rs index dc4ee8cff..05e167c57 100644 --- a/p2p/src/handshake.rs +++ b/p2p/src/handshake.rs @@ -47,12 +47,13 @@ impl Handshake { } /// Handles connecting to a new remote peer, starting the version handshake. - pub fn connect(&self, - capab: Capabilities, - total_difficulty: Difficulty, - self_addr: SocketAddr, - conn: TcpStream) - -> Box> { + pub fn connect( + &self, + capab: Capabilities, + total_difficulty: Difficulty, + self_addr: SocketAddr, + conn: TcpStream, + ) -> Box> { // prepare the first part of the hanshake let nonce = self.next_nonce(); let hand = Hand { @@ -66,79 +67,84 @@ impl Handshake { }; // write and read the handshake response - Box::new(write_msg(conn, hand, Type::Hand) - .and_then(|conn| read_msg::(conn)) - .and_then(|(conn, shake)| { - if shake.version != 1 { - Err(Error::Serialization(ser::Error::UnexpectedData { - expected: vec![PROTOCOL_VERSION as u8], - received: vec![shake.version as u8], - })) - } else { - let peer_info = PeerInfo { - capabilities: shake.capabilities, - user_agent: shake.user_agent, - addr: conn.peer_addr().unwrap(), - version: shake.version, - total_difficulty: shake.total_difficulty, - }; + Box::new( + write_msg(conn, hand, Type::Hand) + .and_then(|conn| read_msg::(conn)) + .and_then(|(conn, shake)| { + if shake.version != 1 { + Err(Error::Serialization(ser::Error::UnexpectedData { + expected: vec![PROTOCOL_VERSION as u8], + received: vec![shake.version as u8], + })) + } else { + let peer_info = PeerInfo { + capabilities: shake.capabilities, + user_agent: shake.user_agent, + addr: conn.peer_addr().unwrap(), + version: shake.version, + total_difficulty: shake.total_difficulty, + }; - info!("Connected to peer {:?}", peer_info); - // when more than one protocol version is supported, choosing should go here - Ok((conn, ProtocolV1::new(), peer_info)) - } - })) + info!("Connected to peer {:?}", peer_info); + // when more than one protocol version is supported, choosing should go here + Ok((conn, ProtocolV1::new(), peer_info)) + } + }), + ) } /// Handles receiving a connection from a new remote peer that started the /// version handshake. - pub fn handshake(&self, - capab: Capabilities, - total_difficulty: Difficulty, - conn: TcpStream) - -> Box> { + pub fn handshake( + &self, + capab: Capabilities, + total_difficulty: Difficulty, + conn: TcpStream, + ) -> Box> { let nonces = self.nonces.clone(); - Box::new(read_msg::(conn) - .and_then(move |(conn, hand)| { - if hand.version != 1 { - return Err(Error::Serialization(ser::Error::UnexpectedData { - expected: vec![PROTOCOL_VERSION as u8], - received: vec![hand.version as u8], - })); - } - { - // check the nonce to see if we could be trying to connect to ourselves - let nonces = nonces.read().unwrap(); - if nonces.contains(&hand.nonce) { + Box::new( + read_msg::(conn) + .and_then(move |(conn, hand)| { + if hand.version != 1 { return Err(Error::Serialization(ser::Error::UnexpectedData { - expected: vec![], - received: vec![], + expected: vec![PROTOCOL_VERSION as u8], + received: vec![hand.version as u8], })); } - } - // all good, keep peer info - let peer_info = PeerInfo { - capabilities: hand.capabilities, - user_agent: hand.user_agent, - addr: hand.sender_addr.0, - version: hand.version, - total_difficulty: hand.total_difficulty, - }; - // send our reply with our info - let shake = Shake { - version: PROTOCOL_VERSION, - capabilities: capab, - total_difficulty: total_difficulty, - user_agent: USER_AGENT.to_string(), - }; - Ok((conn, shake, peer_info)) - }) - .and_then(|(conn, shake, peer_info)| { - debug!("Success handshake with {}.", peer_info.addr); - write_msg(conn, shake, Type::Shake) + { + // check the nonce to see if we could be trying to connect to ourselves + let nonces = nonces.read().unwrap(); + if nonces.contains(&hand.nonce) { + return Err(Error::Serialization(ser::Error::UnexpectedData { + expected: vec![], + received: vec![], + })); + } + } + // all good, keep peer info + let peer_info = PeerInfo { + capabilities: hand.capabilities, + user_agent: hand.user_agent, + addr: hand.sender_addr.0, + version: hand.version, + total_difficulty: hand.total_difficulty, + }; + // send our reply with our info + let shake = Shake { + version: PROTOCOL_VERSION, + capabilities: capab, + total_difficulty: total_difficulty, + user_agent: USER_AGENT.to_string(), + }; + Ok((conn, shake, peer_info)) + }) + .and_then(|(conn, shake, peer_info)| { + debug!("Success handshake with {}.", peer_info.addr); + write_msg(conn, shake, Type::Shake) // when more than one protocol version is supported, choosing should go here .map(|conn| (conn, ProtocolV1::new(), peer_info)) - })) + }), + ) } /// Generate a new random nonce and store it in our ring buffer diff --git a/p2p/src/msg.rs b/p2p/src/msg.rs index 9ae6a5cb9..2641650e0 100644 --- a/p2p/src/msg.rs +++ b/p2p/src/msg.rs @@ -70,7 +70,8 @@ enum_from_primitive! { /// the header first, handles its validation and then reads the Readable body, /// allocating buffers of the right size. pub fn read_msg(conn: TcpStream) -> Box> - where T: Readable + 'static +where + T: Readable + 'static, { let read_header = read_exact(conn, vec![0u8; HEADER_LEN as usize]) .from_err() @@ -84,7 +85,8 @@ pub fn read_msg(conn: TcpStream) -> Box(conn: TcpStream) -> Box(conn: TcpStream, - msg: T, - msg_type: Type) - -> Box> - where T: Writeable + 'static +pub fn write_msg( + conn: TcpStream, + msg: T, + msg_type: Type, +) -> Box> +where + T: Writeable + 'static, { let write_msg = ok((conn)).and_then(move |conn| { // prepare the body first so we know its serialized length @@ -149,11 +153,13 @@ impl MsgHeader { impl Writeable for MsgHeader { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - ser_multiwrite!(writer, - [write_u8, self.magic[0]], - [write_u8, self.magic[1]], - [write_u8, self.msg_type as u8], - [write_u64, self.msg_len]); + ser_multiwrite!( + writer, + [write_u8, self.magic[0]], + [write_u8, self.magic[1]], + [write_u8, self.msg_type as u8], + [write_u64, self.msg_len] + ); Ok(()) } } @@ -199,10 +205,12 @@ pub struct Hand { impl Writeable for Hand { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - ser_multiwrite!(writer, - [write_u32, self.version], - [write_u32, self.capabilities.bits()], - [write_u64, self.nonce]); + ser_multiwrite!( + writer, + [write_u32, self.version], + [write_u32, self.capabilities.bits()], + [write_u64, self.nonce] + ); self.total_difficulty.write(writer).unwrap(); self.sender_addr.write(writer).unwrap(); self.receiver_addr.write(writer).unwrap(); @@ -218,7 +226,9 @@ impl Readable for Hand { let receiver_addr = try!(SockAddr::read(reader)); let ua = try!(reader.read_vec()); let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)); - let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)); + let capabilities = try!(Capabilities::from_bits(capab).ok_or( + ser::Error::CorruptedData, + )); Ok(Hand { version: version, capabilities: capabilities, @@ -248,9 +258,11 @@ pub struct Shake { impl Writeable for Shake { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { - ser_multiwrite!(writer, - [write_u32, self.version], - [write_u32, self.capabilities.bits()]); + ser_multiwrite!( + writer, + [write_u32, self.version], + [write_u32, self.capabilities.bits()] + ); self.total_difficulty.write(writer).unwrap(); writer.write_bytes(&self.user_agent).unwrap(); Ok(()) @@ -263,7 +275,9 @@ impl Readable for Shake { let total_diff = try!(Difficulty::read(reader)); let ua = try!(reader.read_vec()); let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)); - let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)); + let capabilities = try!(Capabilities::from_bits(capab).ok_or( + ser::Error::CorruptedData, + )); Ok(Shake { version: version, capabilities: capabilities, @@ -288,7 +302,9 @@ impl Writeable for GetPeerAddrs { impl Readable for GetPeerAddrs { fn read(reader: &mut Reader) -> Result { let capab = try!(reader.read_u32()); - let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)); + let capabilities = try!(Capabilities::from_bits(capab).ok_or( + ser::Error::CorruptedData, + )); Ok(GetPeerAddrs { capabilities: capabilities }) } } @@ -345,7 +361,9 @@ impl Writeable for PeerError { impl Readable for PeerError { fn read(reader: &mut Reader) -> Result { let (code, msg) = ser_multiread!(reader, read_u32, read_vec); - let message = try!(String::from_utf8(msg).map_err(|_| ser::Error::CorruptedData)); + let message = try!(String::from_utf8(msg).map_err( + |_| ser::Error::CorruptedData, + )); Ok(PeerError { code: code, message: message, @@ -362,10 +380,12 @@ impl Writeable for SockAddr { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { match self.0 { SocketAddr::V4(sav4) => { - ser_multiwrite!(writer, - [write_u8, 0], - [write_fixed_bytes, &sav4.ip().octets().to_vec()], - [write_u16, sav4.port()]); + ser_multiwrite!( + writer, + [write_u8, 0], + [write_fixed_bytes, &sav4.ip().octets().to_vec()], + [write_u16, sav4.port()] + ); } SocketAddr::V6(sav6) => { try!(writer.write_u8(1)); @@ -385,25 +405,28 @@ impl Readable for SockAddr { if v4_or_v6 == 0 { let ip = try!(reader.read_fixed_bytes(4)); let port = try!(reader.read_u16()); - Ok(SockAddr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(ip[0], - ip[1], - ip[2], - ip[3]), - port)))) + Ok(SockAddr(SocketAddr::V4(SocketAddrV4::new( + Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]), + port, + )))) } else { let ip = try_map_vec!([0..8], |_| reader.read_u16()); let port = try!(reader.read_u16()); - Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(ip[0], - ip[1], - ip[2], - ip[3], - ip[4], - ip[5], - ip[6], - ip[7]), - port, - 0, - 0)))) + Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new( + Ipv6Addr::new( + ip[0], + ip[1], + ip[2], + ip[3], + ip[4], + ip[5], + ip[6], + ip[7], + ), + port, + 0, + 0, + )))) } } } diff --git a/p2p/src/peer.rs b/p2p/src/peer.rs index b0395da15..2e82684e0 100644 --- a/p2p/src/peer.rs +++ b/p2p/src/peer.rs @@ -42,48 +42,58 @@ unsafe impl Send for Peer {} impl Peer { /// Initiates the handshake with another peer. - pub fn connect(conn: TcpStream, - capab: Capabilities, - total_difficulty: Difficulty, - self_addr: SocketAddr, - hs: &Handshake) - -> Box> { + pub fn connect( + conn: TcpStream, + capab: Capabilities, + total_difficulty: Difficulty, + self_addr: SocketAddr, + hs: &Handshake, + ) -> Box> { let connect_peer = hs.connect(capab, total_difficulty, self_addr, conn) .and_then(|(conn, proto, info)| { - Ok((conn, - Peer { - info: info, - proto: Box::new(proto), - state: Arc::new(RwLock::new(State::Connected)), - })) + Ok(( + conn, + Peer { + info: info, + proto: Box::new(proto), + state: Arc::new(RwLock::new(State::Connected)), + }, + )) }); Box::new(connect_peer) } /// Accept a handshake initiated by another peer. - pub fn accept(conn: TcpStream, - capab: Capabilities, - total_difficulty: Difficulty, - hs: &Handshake) - -> Box> { - let hs_peer = hs.handshake(capab, total_difficulty, conn) - .and_then(|(conn, proto, info)| { - Ok((conn, - Peer { - info: info, - proto: Box::new(proto), - state: Arc::new(RwLock::new(State::Connected)), - })) - }); + pub fn accept( + conn: TcpStream, + capab: Capabilities, + total_difficulty: Difficulty, + hs: &Handshake, + ) -> Box> { + let hs_peer = hs.handshake(capab, total_difficulty, conn).and_then( + |(conn, + proto, + info)| { + Ok(( + conn, + Peer { + info: info, + proto: Box::new(proto), + state: Arc::new(RwLock::new(State::Connected)), + }, + )) + }, + ); Box::new(hs_peer) } /// Main peer loop listening for messages and forwarding to the rest of the /// system. - pub fn run(&self, - conn: TcpStream, - na: Arc) - -> Box> { + pub fn run( + &self, + conn: TcpStream, + na: Arc, + ) -> Box> { let addr = self.info.addr; let state = self.state.clone(); diff --git a/p2p/src/protocol.rs b/p2p/src/protocol.rs index 9cca056e9..69e19dfb1 100644 --- a/p2p/src/protocol.rs +++ b/p2p/src/protocol.rs @@ -44,10 +44,11 @@ impl ProtocolV1 { impl Protocol for ProtocolV1 { /// Sets up the protocol reading, writing and closing logic. - fn handle(&self, - conn: TcpStream, - adapter: Arc) - -> Box> { + fn handle( + &self, + conn: TcpStream, + adapter: Arc, + ) -> Box> { let (conn, listener) = TimeoutConnection::listen(conn, move |sender, header, data| { let adapt = adapter.as_ref(); @@ -81,10 +82,12 @@ impl Protocol for ProtocolV1 { } fn send_header_request(&self, locator: Vec) -> Result<(), Error> { - self.send_request(Type::GetHeaders, - Type::Headers, - &Locator { hashes: locator }, - None) + self.send_request( + Type::GetHeaders, + Type::Headers, + &Locator { hashes: locator }, + None, + ) } fn send_block_request(&self, h: Hash) -> Result<(), Error> { @@ -92,10 +95,12 @@ impl Protocol for ProtocolV1 { } fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> { - self.send_request(Type::GetPeerAddrs, - Type::PeerAddrs, - &GetPeerAddrs { capabilities: capab }, - None) + self.send_request( + Type::GetPeerAddrs, + Type::PeerAddrs, + &GetPeerAddrs { capabilities: capab }, + None, + ) } /// Close the connection to the remote peer @@ -109,21 +114,23 @@ impl ProtocolV1 { self.conn.borrow().send_msg(t, body) } - fn send_request(&self, - t: Type, - rt: Type, - body: &W, - expect_resp: Option) - -> Result<(), Error> { + fn send_request( + &self, + t: Type, + rt: Type, + body: &W, + expect_resp: Option, + ) -> Result<(), Error> { self.conn.borrow().send_request(t, rt, body, expect_resp) } } -fn handle_payload(adapter: &NetAdapter, - sender: UnboundedSender>, - header: MsgHeader, - buf: Vec) - -> Result, ser::Error> { +fn handle_payload( + adapter: &NetAdapter, + sender: UnboundedSender>, + header: MsgHeader, + buf: Vec, +) -> Result, ser::Error> { match header.msg_type { Type::Ping => { let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?; @@ -144,8 +151,10 @@ fn handle_payload(adapter: &NetAdapter, let mut body_data = vec![]; try!(ser::serialize(&mut body_data, &b)); let mut data = vec![]; - try!(ser::serialize(&mut data, - &MsgHeader::new(Type::Block, body_data.len() as u64))); + try!(ser::serialize( + &mut data, + &MsgHeader::new(Type::Block, body_data.len() as u64), + )); data.append(&mut body_data); sender.send(data).unwrap(); } @@ -164,10 +173,15 @@ fn handle_payload(adapter: &NetAdapter, // serialize and send all the headers over let mut body_data = vec![]; - try!(ser::serialize(&mut body_data, &Headers { headers: headers })); + try!(ser::serialize( + &mut body_data, + &Headers { headers: headers }, + )); let mut data = vec![]; - try!(ser::serialize(&mut data, - &MsgHeader::new(Type::Headers, body_data.len() as u64))); + try!(ser::serialize( + &mut data, + &MsgHeader::new(Type::Headers, body_data.len() as u64), + )); data.append(&mut body_data); sender.send(data).unwrap(); @@ -184,13 +198,17 @@ fn handle_payload(adapter: &NetAdapter, // serialize and send all the headers over let mut body_data = vec![]; - try!(ser::serialize(&mut body_data, - &PeerAddrs { - peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(), - })); + try!(ser::serialize( + &mut body_data, + &PeerAddrs { + peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(), + }, + )); let mut data = vec![]; - try!(ser::serialize(&mut data, - &MsgHeader::new(Type::PeerAddrs, body_data.len() as u64))); + try!(ser::serialize( + &mut data, + &MsgHeader::new(Type::PeerAddrs, body_data.len() as u64), + )); data.append(&mut body_data); sender.send(data).unwrap(); diff --git a/p2p/src/rate_limit.rs b/p2p/src/rate_limit.rs index 7e86d1d1c..88cae3614 100644 --- a/p2p/src/rate_limit.rs +++ b/p2p/src/rate_limit.rs @@ -77,11 +77,18 @@ impl io::Read for ThrottledReader { // Check if Allowed if self.allowed < 1 { - return Err(io::Error::new(io::ErrorKind::WouldBlock, "Reached Allowed Read Limit")) + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Reached Allowed Read Limit", + )); } // Read Max Allowed - let buf = if buf.len() > self.allowed { &mut buf[0..self.allowed]} else { buf }; + let buf = if buf.len() > self.allowed { + &mut buf[0..self.allowed] + } else { + buf + }; let res = self.reader.read(buf); // Decrement Allowed amount written @@ -92,7 +99,7 @@ impl io::Read for ThrottledReader { } } -impl AsyncRead for ThrottledReader { } +impl AsyncRead for ThrottledReader {} /// A Rate Limited Writer #[derive(Debug)] @@ -151,11 +158,18 @@ impl io::Write for ThrottledWriter { // Check if Allowed if self.allowed < 1 { - return Err(io::Error::new(io::ErrorKind::WouldBlock, "Reached Allowed Write Limit")) + return Err(io::Error::new( + io::ErrorKind::WouldBlock, + "Reached Allowed Write Limit", + )); } // Write max allowed - let buf = if buf.len() > self.allowed { &buf[0..self.allowed]} else { buf }; + let buf = if buf.len() > self.allowed { + &buf[0..self.allowed] + } else { + buf + }; let res = self.writer.write(buf); // Decrement Allowed amount written diff --git a/p2p/src/server.rs b/p2p/src/server.rs index af23bc004..29a820c5f 100644 --- a/p2p/src/server.rs +++ b/p2p/src/server.rs @@ -132,17 +132,22 @@ impl Server { let mut stop_mut = self.stop.borrow_mut(); *stop_mut = Some(stop); } - Box::new(server.select(stop_rx.map_err(|_| Error::ConnectionClose)).then(|res| match res { - Ok((_, _)) => Ok(()), - Err((e, _)) => Err(e), - })) + Box::new( + server + .select(stop_rx.map_err(|_| Error::ConnectionClose)) + .then(|res| match res { + Ok((_, _)) => Ok(()), + Err((e, _)) => Err(e), + }), + ) } /// Asks the server to connect to a new peer. - pub fn connect_peer(&self, - addr: SocketAddr, - h: reactor::Handle) - -> Box>, Error = Error>> { + pub fn connect_peer( + &self, + addr: SocketAddr, + h: reactor::Handle, + ) -> Box>, Error = Error>> { if let Some(p) = self.get_peer(addr) { // if we're already connected to the addr, just return the peer return Box::new(future::ok(Some(p))); @@ -163,7 +168,8 @@ impl Server { let socket = TcpStream::connect(&addr, &h).map_err(|e| Error::Connection(e)); let h2 = h.clone(); - let request = socket.and_then(move |socket| { + let request = socket + .and_then(move |socket| { let peers = peers.clone(); let total_diff = adapter1.clone().total_difficulty(); @@ -280,11 +286,13 @@ impl Server { } // Adds the peer built by the provided future in the peers map -fn add_to_peers(peers: Arc>>>, - adapter: Arc, - peer_fut: A) - -> Box), ()>, Error = Error>> - where A: IntoFuture + 'static +fn add_to_peers( + peers: Arc>>>, + adapter: Arc, + peer_fut: A, +) -> Box), ()>, Error = Error>> +where + A: IntoFuture + 'static, { let peer_add = peer_fut.into_future().map(move |(conn, peer)| { adapter.peer_connected(&peer.info); @@ -297,15 +305,17 @@ fn add_to_peers(peers: Arc>>>, } // Adds a timeout to a future -fn with_timeout(fut: Box, Error = Error>>, - h: &reactor::Handle) - -> Box> { +fn with_timeout( + fut: Box, Error = Error>>, + h: &reactor::Handle, +) -> Box> { let timeout = reactor::Timeout::new(Duration::new(5, 0), h).unwrap(); - let timed = fut.select(timeout.map(Err).from_err()) - .then(|res| match res { + let timed = fut.select(timeout.map(Err).from_err()).then( + |res| match res { Ok((Ok(inner), _timeout)) => Ok(inner), Ok((_, _accept)) => Err(Error::Timeout), Err((e, _other)) => Err(e), - }); + }, + ); Box::new(timed) } diff --git a/p2p/src/store.rs b/p2p/src/store.rs index 57a66af4f..754aa0497 100644 --- a/p2p/src/store.rs +++ b/p2p/src/store.rs @@ -53,10 +53,12 @@ pub struct PeerData { impl Writeable for PeerData { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { SockAddr(self.addr).write(writer)?; - ser_multiwrite!(writer, - [write_u32, self.capabilities.bits()], - [write_bytes, &self.user_agent], - [write_u8, self.flags as u8]); + ser_multiwrite!( + writer, + [write_u32, self.capabilities.bits()], + [write_bytes, &self.user_agent], + [write_u8, self.flags as u8] + ); Ok(()) } } @@ -66,7 +68,9 @@ impl Readable for PeerData { let addr = SockAddr::read(reader)?; let (capab, ua, fl) = ser_multiread!(reader, read_u32, read_vec, read_u8); let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?; - let capabilities = Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)?; + let capabilities = Capabilities::from_bits(capab).ok_or( + ser::Error::CorruptedData, + )?; match State::from_u8(fl) { Some(flags) => { Ok(PeerData { @@ -94,8 +98,10 @@ impl PeerStore { } pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> { - self.db.put_ser(&to_key(PEER_PREFIX, &mut format!("{}", p.addr).into_bytes())[..], - p) + self.db.put_ser( + &to_key(PEER_PREFIX, &mut format!("{}", p.addr).into_bytes())[..], + p, + ) } fn get_peer(&self, peer_addr: SocketAddr) -> Result { @@ -103,16 +109,22 @@ impl PeerStore { } pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result { - self.db.exists(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..]) + self.db.exists( + &to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..], + ) } pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> { - self.db.delete(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..]) + self.db.delete( + &to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..], + ) } pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec { - let peers_iter = self.db - .iter::(&to_key(PEER_PREFIX, &mut "".to_string().into_bytes())); + let peers_iter = self.db.iter::(&to_key( + PEER_PREFIX, + &mut "".to_string().into_bytes(), + )); let mut peers = Vec::with_capacity(count); for p in peers_iter { if p.flags == state && p.capabilities.contains(cap) { diff --git a/p2p/src/types.rs b/p2p/src/types.rs index dd5337e4f..b803e2cfe 100644 --- a/p2p/src/types.rs +++ b/p2p/src/types.rs @@ -117,10 +117,8 @@ pub trait Protocol { /// be known already, usually passed during construction. Will typically /// block so needs to be called withing a coroutine. Should also be called /// only once. - fn handle(&self, - conn: TcpStream, - na: Arc) - -> Box>; + fn handle(&self, conn: TcpStream, na: Arc) + -> Box>; /// Sends a ping message to the remote peer. fn send_ping(&self) -> Result<(), Error>; diff --git a/p2p/tests/peer_handshake.rs b/p2p/tests/peer_handshake.rs index 29718915a..60edaf92a 100644 --- a/p2p/tests/peer_handshake.rs +++ b/p2p/tests/peer_handshake.rs @@ -47,40 +47,47 @@ fn peer_handshake() { let rhandle = handle.clone(); let timeout = reactor::Timeout::new(time::Duration::new(1, 0), &handle).unwrap(); let timeout_send = reactor::Timeout::new(time::Duration::new(2, 0), &handle).unwrap(); - handle.spawn(timeout.from_err() - .and_then(move |_| { - let p2p_conf = p2p::P2PConfig::default(); - let addr = SocketAddr::new(p2p_conf.host, p2p_conf.port); - let socket = TcpStream::connect(&addr, &phandle).map_err(|e| p2p::Error::Connection(e)); - socket.and_then(move |socket| { - Peer::connect(socket, - p2p::UNKNOWN, - Difficulty::one(), - my_addr, - &p2p::handshake::Handshake::new()) - }) - .and_then(move |(socket, peer)| { - rhandle.spawn(peer.run(socket, net_adapter.clone()).map_err(|e| { - panic!("Client run failed: {:?}", e); - })); - peer.send_ping().unwrap(); - timeout_send.from_err().map(|_| peer) - }) - .and_then(|peer| { - let (sent, recv) = peer.transmitted_bytes(); - assert!(sent > 0); - assert!(recv > 0); - Ok(()) - }) - .and_then(|_| { - assert!(server.peer_count() > 0); - server.stop(); - Ok(()) - }) - }) - .map_err(|e| { - panic!("Client connection failed: {:?}", e); - })); + handle.spawn( + timeout + .from_err() + .and_then(move |_| { + let p2p_conf = p2p::P2PConfig::default(); + let addr = SocketAddr::new(p2p_conf.host, p2p_conf.port); + let socket = + TcpStream::connect(&addr, &phandle).map_err(|e| p2p::Error::Connection(e)); + socket + .and_then(move |socket| { + Peer::connect( + socket, + p2p::UNKNOWN, + Difficulty::one(), + my_addr, + &p2p::handshake::Handshake::new(), + ) + }) + .and_then(move |(socket, peer)| { + rhandle.spawn(peer.run(socket, net_adapter.clone()).map_err(|e| { + panic!("Client run failed: {:?}", e); + })); + peer.send_ping().unwrap(); + timeout_send.from_err().map(|_| peer) + }) + .and_then(|peer| { + let (sent, recv) = peer.transmitted_bytes(); + assert!(sent > 0); + assert!(recv > 0); + Ok(()) + }) + .and_then(|_| { + assert!(server.peer_count() > 0); + server.stop(); + Ok(()) + }) + }) + .map_err(|e| { + panic!("Client connection failed: {:?}", e); + }), + ); evtlp.run(run_server).unwrap(); diff --git a/pool/src/blockchain.rs b/pool/src/blockchain.rs index 18972e6b2..7e25dd343 100644 --- a/pool/src/blockchain.rs +++ b/pool/src/blockchain.rs @@ -22,142 +22,162 @@ use types::{BlockChain, PoolError}; #[derive(Debug)] pub struct DummyBlockHeaderIndex { - block_headers: HashMap + block_headers: HashMap, } impl DummyBlockHeaderIndex { - pub fn insert(&mut self, commit: Commitment, block_header: block::BlockHeader) { - self.block_headers.insert(commit, block_header); - } + pub fn insert(&mut self, commit: Commitment, block_header: block::BlockHeader) { + self.block_headers.insert(commit, block_header); + } - pub fn get_block_header_by_output_commit(&self, commit: Commitment) -> Result<&block::BlockHeader, PoolError> { - match self.block_headers.get(&commit) { - Some(h) => Ok(h), - None => Err(PoolError::GenericPoolError) - } - } + pub fn get_block_header_by_output_commit( + &self, + commit: Commitment, + ) -> Result<&block::BlockHeader, PoolError> { + match self.block_headers.get(&commit) { + Some(h) => Ok(h), + None => Err(PoolError::GenericPoolError), + } + } } /// A DummyUtxoSet for mocking up the chain pub struct DummyUtxoSet { - outputs : HashMap + outputs: HashMap, } #[allow(dead_code)] impl DummyUtxoSet { - pub fn empty() -> DummyUtxoSet{ - DummyUtxoSet{outputs: HashMap::new()} - } - pub fn root(&self) -> hash::Hash { - hash::ZERO_HASH - } - pub fn apply(&self, b: &block::Block) -> DummyUtxoSet { - let mut new_hashmap = self.outputs.clone(); - for input in &b.inputs { - new_hashmap.remove(&input.commitment()); - } - for output in &b.outputs { - new_hashmap.insert(output.commitment(), output.clone()); - } - DummyUtxoSet{outputs: new_hashmap} - } - pub fn with_block(&mut self, b: &block::Block) { - for input in &b.inputs { - self.outputs.remove(&input.commitment()); - } - for output in &b.outputs { - self.outputs.insert(output.commitment(), output.clone()); - } - } - pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet { - DummyUtxoSet{outputs: HashMap::new()} - } - pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> { - self.outputs.get(output_ref) - } + pub fn empty() -> DummyUtxoSet { + DummyUtxoSet { outputs: HashMap::new() } + } + pub fn root(&self) -> hash::Hash { + hash::ZERO_HASH + } + pub fn apply(&self, b: &block::Block) -> DummyUtxoSet { + let mut new_hashmap = self.outputs.clone(); + for input in &b.inputs { + new_hashmap.remove(&input.commitment()); + } + for output in &b.outputs { + new_hashmap.insert(output.commitment(), output.clone()); + } + DummyUtxoSet { outputs: new_hashmap } + } + pub fn with_block(&mut self, b: &block::Block) { + for input in &b.inputs { + self.outputs.remove(&input.commitment()); + } + for output in &b.outputs { + self.outputs.insert(output.commitment(), output.clone()); + } + } + pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet { + DummyUtxoSet { outputs: HashMap::new() } + } + pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> { + self.outputs.get(output_ref) + } - fn clone(&self) -> DummyUtxoSet { - DummyUtxoSet{outputs: self.outputs.clone()} - } + fn clone(&self) -> DummyUtxoSet { + DummyUtxoSet { outputs: self.outputs.clone() } + } - // only for testing: add an output to the map - pub fn add_output(&mut self, output: transaction::Output) { - self.outputs.insert(output.commitment(), output); - } - // like above, but doesn't modify in-place so no mut ref needed - pub fn with_output(&self, output: transaction::Output) -> DummyUtxoSet { - let mut new_map = self.outputs.clone(); - new_map.insert(output.commitment(), output); - DummyUtxoSet{outputs: new_map} - } + // only for testing: add an output to the map + pub fn add_output(&mut self, output: transaction::Output) { + self.outputs.insert(output.commitment(), output); + } + // like above, but doesn't modify in-place so no mut ref needed + pub fn with_output(&self, output: transaction::Output) -> DummyUtxoSet { + let mut new_map = self.outputs.clone(); + new_map.insert(output.commitment(), output); + DummyUtxoSet { outputs: new_map } + } } /// A DummyChain is the mocked chain for playing with what methods we would /// need #[allow(dead_code)] pub struct DummyChainImpl { - utxo: RwLock, - block_headers: RwLock, - head_header: RwLock>, + utxo: RwLock, + block_headers: RwLock, + head_header: RwLock>, } #[allow(dead_code)] impl DummyChainImpl { - pub fn new() -> DummyChainImpl { - DummyChainImpl{ - utxo: RwLock::new(DummyUtxoSet{outputs: HashMap::new()}), - block_headers: RwLock::new(DummyBlockHeaderIndex{block_headers: HashMap::new()}), - head_header: RwLock::new(vec![]), - } - } + pub fn new() -> DummyChainImpl { + DummyChainImpl { + utxo: RwLock::new(DummyUtxoSet { outputs: HashMap::new() }), + block_headers: RwLock::new(DummyBlockHeaderIndex { block_headers: HashMap::new() }), + head_header: RwLock::new(vec![]), + } + } } impl BlockChain for DummyChainImpl { - fn get_unspent(&self, commitment: &Commitment) -> Result { - let output = self.utxo.read().unwrap().get_output(commitment).cloned(); - match output { - Some(o) => Ok(o), - None => Err(PoolError::GenericPoolError), - } - } + fn get_unspent(&self, commitment: &Commitment) -> Result { + let output = self.utxo.read().unwrap().get_output(commitment).cloned(); + match output { + Some(o) => Ok(o), + None => Err(PoolError::GenericPoolError), + } + } - fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result { - match self.block_headers.read().unwrap().get_block_header_by_output_commit(*commit) { - Ok(h) => Ok(h.clone()), - Err(e) => Err(e), - } - } + fn get_block_header_by_output_commit( + &self, + commit: &Commitment, + ) -> Result { + match self.block_headers + .read() + .unwrap() + .get_block_header_by_output_commit(*commit) { + Ok(h) => Ok(h.clone()), + Err(e) => Err(e), + } + } - fn head_header(&self) -> Result { - let headers = self.head_header.read().unwrap(); - if headers.len() > 0 { - Ok(headers[0].clone()) - } else { - Err(PoolError::GenericPoolError) - } - } + fn head_header(&self) -> Result { + let headers = self.head_header.read().unwrap(); + if headers.len() > 0 { + Ok(headers[0].clone()) + } else { + Err(PoolError::GenericPoolError) + } + } } impl DummyChain for DummyChainImpl { - fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet) { - self.utxo = RwLock::new(new_utxo); - } - fn apply_block(&self, b: &block::Block) { - self.utxo.write().unwrap().with_block(b); - } - fn store_header_by_output_commitment(&self, commitment: Commitment, block_header: &block::BlockHeader) { - self.block_headers.write().unwrap().insert(commitment, block_header.clone()); - } - fn store_head_header(&self, block_header: &block::BlockHeader) { - let mut h = self.head_header.write().unwrap(); - h.clear(); - h.insert(0, block_header.clone()); - } + fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet) { + self.utxo = RwLock::new(new_utxo); + } + fn apply_block(&self, b: &block::Block) { + self.utxo.write().unwrap().with_block(b); + } + fn store_header_by_output_commitment( + &self, + commitment: Commitment, + block_header: &block::BlockHeader, + ) { + self.block_headers.write().unwrap().insert( + commitment, + block_header.clone(), + ); + } + fn store_head_header(&self, block_header: &block::BlockHeader) { + let mut h = self.head_header.write().unwrap(); + h.clear(); + h.insert(0, block_header.clone()); + } } pub trait DummyChain: BlockChain { - fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet); - fn apply_block(&self, b: &block::Block); - fn store_header_by_output_commitment(&self, commitment: Commitment, block_header: &block::BlockHeader); - fn store_head_header(&self, block_header: &block::BlockHeader); + fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet); + fn apply_block(&self, b: &block::Block); + fn store_header_by_output_commitment( + &self, + commitment: Commitment, + block_header: &block::BlockHeader, + ); + fn store_head_header(&self, block_header: &block::BlockHeader); } diff --git a/pool/src/graph.rs b/pool/src/graph.rs index b8d9c6e00..4e2587f1a 100644 --- a/pool/src/graph.rs +++ b/pool/src/graph.rs @@ -30,184 +30,210 @@ use core::core::hash::Hashed; /// An entry in the transaction pool. /// These are the vertices of both of the graph structures pub struct PoolEntry { - // Core data - /// Unique identifier of this pool entry and the corresponding transaction - pub transaction_hash: core::hash::Hash, + // Core data + /// Unique identifier of this pool entry and the corresponding transaction + pub transaction_hash: core::hash::Hash, - // Metadata - /// Size estimate - pub size_estimate: u64, - /// Receive timestamp - pub receive_ts: time::Tm, + // Metadata + /// Size estimate + pub size_estimate: u64, + /// Receive timestamp + pub receive_ts: time::Tm, } impl PoolEntry { - /// Create new transaction pool entry - pub fn new(tx: &core::transaction::Transaction) -> PoolEntry { - PoolEntry{ - transaction_hash: transaction_identifier(tx), - size_estimate : estimate_transaction_size(tx), - receive_ts: time::now_utc()} - } + /// Create new transaction pool entry + pub fn new(tx: &core::transaction::Transaction) -> PoolEntry { + PoolEntry { + transaction_hash: transaction_identifier(tx), + size_estimate: estimate_transaction_size(tx), + receive_ts: time::now_utc(), + } + } } /// TODO guessing this needs implementing fn estimate_transaction_size(_tx: &core::transaction::Transaction) -> u64 { - 0 + 0 } /// An edge connecting graph vertices. /// For various use cases, one of either the source or destination may be /// unpopulated pub struct Edge { - // Source and Destination are the vertex id's, the transaction (kernel) - // hash. - source: Option, - destination: Option, + // Source and Destination are the vertex id's, the transaction (kernel) + // hash. + source: Option, + destination: Option, - // Output is the output hash which this input/output pairing corresponds - // to. - output: Commitment, + // Output is the output hash which this input/output pairing corresponds + // to. + output: Commitment, } -impl Edge{ - /// Create new edge - pub fn new(source: Option, destination: Option, output: Commitment) -> Edge { - Edge{source: source, destination: destination, output: output} - } +impl Edge { + /// Create new edge + pub fn new( + source: Option, + destination: Option, + output: Commitment, + ) -> Edge { + Edge { + source: source, + destination: destination, + output: output, + } + } - /// Create new edge with a source - pub fn with_source(&self, src: Option) -> Edge { - Edge{source: src, destination: self.destination, output: self.output} - } + /// Create new edge with a source + pub fn with_source(&self, src: Option) -> Edge { + Edge { + source: src, + destination: self.destination, + output: self.output, + } + } - /// Create new edge with destination - pub fn with_destination(&self, dst: Option) -> Edge { - Edge{source: self.source, destination: dst, output: self.output} - } + /// Create new edge with destination + pub fn with_destination(&self, dst: Option) -> Edge { + Edge { + source: self.source, + destination: dst, + output: self.output, + } + } - /// The output commitment of the edge - pub fn output_commitment(&self) -> Commitment { - self.output - } + /// The output commitment of the edge + pub fn output_commitment(&self) -> Commitment { + self.output + } - /// The destination hash of the edge - pub fn destination_hash(&self) -> Option { - self.destination - } + /// The destination hash of the edge + pub fn destination_hash(&self) -> Option { + self.destination + } - /// The source hash of the edge - pub fn source_hash(&self) -> Option { - self.source - } + /// The source hash of the edge + pub fn source_hash(&self) -> Option { + self.source + } } impl fmt::Debug for Edge { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}", - self.source, self.destination, self.output) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}", + self.source, + self.destination, + self.output + ) + } } /// The generic graph container. Both graphs, the pool and orphans, embed this /// structure and add additional capability on top of it. pub struct DirectedGraph { - edges: HashMap, - vertices: Vec, + edges: HashMap, + vertices: Vec, - // A small optimization: keeping roots (vertices with in-degree 0) in a - // separate list makes topological sort a bit faster. (This is true for - // Kahn's, not sure about other implementations) - roots: Vec, + // A small optimization: keeping roots (vertices with in-degree 0) in a + // separate list makes topological sort a bit faster. (This is true for + // Kahn's, not sure about other implementations) + roots: Vec, } impl DirectedGraph { - /// Create an empty directed graph - pub fn empty() -> DirectedGraph { - DirectedGraph{ - edges: HashMap::new(), - vertices: Vec::new(), - roots: Vec::new(), - } - } + /// Create an empty directed graph + pub fn empty() -> DirectedGraph { + DirectedGraph { + edges: HashMap::new(), + vertices: Vec::new(), + roots: Vec::new(), + } + } - /// Get an edge by its commitment - pub fn get_edge_by_commitment(&self, output_commitment: &Commitment) -> Option<&Edge> { - self.edges.get(output_commitment) - } + /// Get an edge by its commitment + pub fn get_edge_by_commitment(&self, output_commitment: &Commitment) -> Option<&Edge> { + self.edges.get(output_commitment) + } - /// Remove an edge by its commitment - pub fn remove_edge_by_commitment(&mut self, output_commitment: &Commitment) -> Option { - self.edges.remove(output_commitment) - } + /// Remove an edge by its commitment + pub fn remove_edge_by_commitment(&mut self, output_commitment: &Commitment) -> Option { + self.edges.remove(output_commitment) + } - /// Remove a vertex by its hash - pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option { - match self.roots.iter().position(|x| x.transaction_hash == tx_hash) { - Some(i) => Some(self.roots.swap_remove(i)), - None => { - match self.vertices.iter().position(|x| x.transaction_hash == tx_hash) { - Some(i) => Some(self.vertices.swap_remove(i)), - None => None, - } - } - } - } + /// Remove a vertex by its hash + pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option { + match self.roots.iter().position( + |x| x.transaction_hash == tx_hash, + ) { + Some(i) => Some(self.roots.swap_remove(i)), + None => { + match self.vertices.iter().position( + |x| x.transaction_hash == tx_hash, + ) { + Some(i) => Some(self.vertices.swap_remove(i)), + None => None, + } + } + } + } - /// Adds a vertex and a set of incoming edges to the graph. - /// - /// The PoolEntry at vertex is added to the graph; depending on the - /// number of incoming edges, the vertex is either added to the vertices - /// or to the roots. - /// - /// Outgoing edges must not be included in edges; this method is designed - /// for adding vertices one at a time and only accepts incoming edges as - /// internal edges. - pub fn add_entry(&mut self, vertex: PoolEntry, mut edges: Vec) { - if edges.len() == 0 { - self.roots.push(vertex); - } else { - self.vertices.push(vertex); - for edge in edges.drain(..) { - self.edges.insert(edge.output_commitment(), edge); - } - } - } + /// Adds a vertex and a set of incoming edges to the graph. + /// + /// The PoolEntry at vertex is added to the graph; depending on the + /// number of incoming edges, the vertex is either added to the vertices + /// or to the roots. + /// + /// Outgoing edges must not be included in edges; this method is designed + /// for adding vertices one at a time and only accepts incoming edges as + /// internal edges. + pub fn add_entry(&mut self, vertex: PoolEntry, mut edges: Vec) { + if edges.len() == 0 { + self.roots.push(vertex); + } else { + self.vertices.push(vertex); + for edge in edges.drain(..) { + self.edges.insert(edge.output_commitment(), edge); + } + } + } - /// add_vertex_only adds a vertex, meant to be complemented by add_edge_only - /// in cases where delivering a vector of edges is not feasible or efficient - pub fn add_vertex_only(&mut self, vertex: PoolEntry, is_root: bool) { - if is_root { - self.roots.push(vertex); - } else { - self.vertices.push(vertex); - } - } + /// add_vertex_only adds a vertex, meant to be complemented by add_edge_only + /// in cases where delivering a vector of edges is not feasible or efficient + pub fn add_vertex_only(&mut self, vertex: PoolEntry, is_root: bool) { + if is_root { + self.roots.push(vertex); + } else { + self.vertices.push(vertex); + } + } - /// add_edge_only adds an edge - pub fn add_edge_only(&mut self, edge: Edge) { - self.edges.insert(edge.output_commitment(), edge); - } + /// add_edge_only adds an edge + pub fn add_edge_only(&mut self, edge: Edge) { + self.edges.insert(edge.output_commitment(), edge); + } - /// Number of vertices (root + internal) - pub fn len_vertices(&self) -> usize { - self.vertices.len() + self.roots.len() - } + /// Number of vertices (root + internal) + pub fn len_vertices(&self) -> usize { + self.vertices.len() + self.roots.len() + } - /// Number of root vertices only - pub fn len_roots(&self) -> usize { - self.roots.len() - } + /// Number of root vertices only + pub fn len_roots(&self) -> usize { + self.roots.len() + } - /// Number of edges - pub fn len_edges(&self) -> usize { - self.edges.len() - } + /// Number of edges + pub fn len_edges(&self) -> usize { + self.edges.len() + } - /// Get the current list of roots - pub fn get_roots(&self) -> Vec { - self.roots.iter().map(|x| x.transaction_hash).collect() - } + /// Get the current list of roots + pub fn get_roots(&self) -> Vec { + self.roots.iter().map(|x| x.transaction_hash).collect() + } } /// Using transaction merkle_inputs_outputs to calculate a deterministic hash; @@ -215,50 +241,57 @@ impl DirectedGraph { /// proofs and any extra data the kernel may cover, but it is used initially /// for testing purposes. pub fn transaction_identifier(tx: &core::transaction::Transaction) -> core::hash::Hash { - // core::transaction::merkle_inputs_outputs(&tx.inputs, &tx.outputs) - tx.hash() + // core::transaction::merkle_inputs_outputs(&tx.inputs, &tx.outputs) + tx.hash() } #[cfg(test)] mod tests { - use super::*; - use secp::{Secp256k1, ContextFlag}; - use secp::key; + use super::*; + use secp::{Secp256k1, ContextFlag}; + use secp::key; - #[test] - fn test_add_entry() { - let ec = Secp256k1::with_caps(ContextFlag::Commit); + #[test] + fn test_add_entry() { + let ec = Secp256k1::with_caps(ContextFlag::Commit); - let output_commit = ec.commit_value(70).unwrap(); - let inputs = vec![core::transaction::Input(ec.commit_value(50).unwrap()), - core::transaction::Input(ec.commit_value(25).unwrap())]; - let outputs = vec![core::transaction::Output{ - features: core::transaction::DEFAULT_OUTPUT, - commit: output_commit, - proof: ec.range_proof(0, 100, key::ZERO_KEY, output_commit, ec.nonce())}]; - let test_transaction = core::transaction::Transaction::new(inputs, - outputs, 5); + let output_commit = ec.commit_value(70).unwrap(); + let inputs = vec![ + core::transaction::Input(ec.commit_value(50).unwrap()), + core::transaction::Input(ec.commit_value(25).unwrap()), + ]; + let outputs = vec![ + core::transaction::Output { + features: core::transaction::DEFAULT_OUTPUT, + commit: output_commit, + proof: ec.range_proof(0, 100, key::ZERO_KEY, output_commit, ec.nonce()), + }, + ]; + let test_transaction = core::transaction::Transaction::new(inputs, outputs, 5); - let test_pool_entry = PoolEntry::new(&test_transaction); + let test_pool_entry = PoolEntry::new(&test_transaction); - let incoming_edge_1 = Edge::new(Some(random_hash()), - Some(core::hash::ZERO_HASH), output_commit); + let incoming_edge_1 = Edge::new( + Some(random_hash()), + Some(core::hash::ZERO_HASH), + output_commit, + ); - let mut test_graph = DirectedGraph::empty(); + let mut test_graph = DirectedGraph::empty(); - test_graph.add_entry(test_pool_entry, vec![incoming_edge_1]); + test_graph.add_entry(test_pool_entry, vec![incoming_edge_1]); - assert_eq!(test_graph.vertices.len(), 1); - assert_eq!(test_graph.roots.len(), 0); - assert_eq!(test_graph.edges.len(), 1); - } + assert_eq!(test_graph.vertices.len(), 1); + assert_eq!(test_graph.roots.len(), 0); + assert_eq!(test_graph.edges.len(), 1); + } } /// For testing/debugging: a random tx hash pub fn random_hash() -> core::hash::Hash { - let hash_bytes: [u8;32]= rand::random(); - core::hash::Hash(hash_bytes) + let hash_bytes: [u8; 32] = rand::random(); + core::hash::Hash(hash_bytes) } diff --git a/pool/src/pool.rs b/pool/src/pool.rs index 6c73d22eb..c77604ea1 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -32,974 +32,1094 @@ use std::collections::HashMap; /// The transactions HashMap holds ownership of all transactions in the pool, /// keyed by their transaction hash. pub struct TransactionPool { - /// All transactions in the pool - pub transactions: HashMap>, - /// The pool itself - pub pool : Pool, - /// Orphans in the pool - pub orphans: Orphans, + /// All transactions in the pool + pub transactions: HashMap>, + /// The pool itself + pub pool: Pool, + /// Orphans in the pool + pub orphans: Orphans, - // blockchain is a DummyChain, for now, which mimics what the future - // chain will offer to the pool - blockchain: Arc, + // blockchain is a DummyChain, for now, which mimics what the future + // chain will offer to the pool + blockchain: Arc, } -impl TransactionPool where T: BlockChain { - /// Create a new transaction pool - pub fn new(chain: Arc) -> TransactionPool { - TransactionPool{ - transactions: HashMap::new(), - pool: Pool::empty(), - orphans: Orphans::empty(), - blockchain: chain, - } - } +impl TransactionPool +where + T: BlockChain, +{ + /// Create a new transaction pool + pub fn new(chain: Arc) -> TransactionPool { + TransactionPool { + transactions: HashMap::new(), + pool: Pool::empty(), + orphans: Orphans::empty(), + blockchain: chain, + } + } - /// Searches for an output, designated by its commitment, from the current - /// best UTXO view, presented by taking the best blockchain UTXO set (as - /// determined by the blockchain component) and rectifying pool spent and - /// unspents. - /// Detects double spends and unknown references from the pool and - /// blockchain only; any conflicts with entries in the orphans set must - /// be accounted for separately, if relevant. - pub fn search_for_best_output(&self, output_commitment: &Commitment) -> Parent { - // The current best unspent set is: - // Pool unspent + (blockchain unspent - pool->blockchain spent) - // Pool unspents are unconditional so we check those first - self.pool.get_available_output(output_commitment). - map(|x| Parent::PoolTransaction{tx_ref: x.source_hash().unwrap()}). - or(self.search_blockchain_unspents(output_commitment)). - or(self.search_pool_spents(output_commitment)). - unwrap_or(Parent::Unknown) - } + /// Searches for an output, designated by its commitment, from the current + /// best UTXO view, presented by taking the best blockchain UTXO set (as + /// determined by the blockchain component) and rectifying pool spent and + /// unspents. + /// Detects double spends and unknown references from the pool and + /// blockchain only; any conflicts with entries in the orphans set must + /// be accounted for separately, if relevant. + pub fn search_for_best_output(&self, output_commitment: &Commitment) -> Parent { + // The current best unspent set is: + // Pool unspent + (blockchain unspent - pool->blockchain spent) + // Pool unspents are unconditional so we check those first + self.pool + .get_available_output(output_commitment) + .map(|x| { + Parent::PoolTransaction { tx_ref: x.source_hash().unwrap() } + }) + .or(self.search_blockchain_unspents(output_commitment)) + .or(self.search_pool_spents(output_commitment)) + .unwrap_or(Parent::Unknown) + } - // search_blockchain_unspents searches the current view of the blockchain - // unspent set, represented by blockchain unspents - pool spents, for an - // output designated by output_commitment. - fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option { - self.blockchain.get_unspent(output_commitment). - ok(). - map(|output| match self.pool.get_blockchain_spent(output_commitment) { - Some(x) => Parent::AlreadySpent{other_tx: x.destination_hash().unwrap()}, - None => Parent::BlockTransaction{output}, - }) - } + // search_blockchain_unspents searches the current view of the blockchain + // unspent set, represented by blockchain unspents - pool spents, for an + // output designated by output_commitment. + fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option { + self.blockchain.get_unspent(output_commitment).ok().map( + |output| match self.pool.get_blockchain_spent(output_commitment) { + Some(x) => Parent::AlreadySpent { other_tx: x.destination_hash().unwrap() }, + None => Parent::BlockTransaction { output }, + }, + ) + } - // search_pool_spents is the second half of pool input detection, after the - // available_outputs have been checked. This returns either a - // Parent::AlreadySpent or None. - fn search_pool_spents(&self, output_commitment: &Commitment) -> Option { - self.pool.get_internal_spent(output_commitment). - map(|x| Parent::AlreadySpent{other_tx: x.destination_hash().unwrap()}) + // search_pool_spents is the second half of pool input detection, after the + // available_outputs have been checked. This returns either a + // Parent::AlreadySpent or None. + fn search_pool_spents(&self, output_commitment: &Commitment) -> Option { + self.pool.get_internal_spent(output_commitment).map(|x| { + Parent::AlreadySpent { other_tx: x.destination_hash().unwrap() } + }) - } + } - /// Get the number of transactions in the pool - pub fn pool_size(&self) -> usize { - self.pool.num_transactions() - } + /// Get the number of transactions in the pool + pub fn pool_size(&self) -> usize { + self.pool.num_transactions() + } - /// Get the number of orphans in the pool - pub fn orphans_size(&self) -> usize { - self.orphans.num_transactions() - } + /// Get the number of orphans in the pool + pub fn orphans_size(&self) -> usize { + self.orphans.num_transactions() + } - /// Get the total size (transactions + orphans) of the pool - pub fn total_size(&self) -> usize { - self.pool.num_transactions() + self.orphans.num_transactions() - } + /// Get the total size (transactions + orphans) of the pool + pub fn total_size(&self) -> usize { + self.pool.num_transactions() + self.orphans.num_transactions() + } - /// Attempts to add a transaction to the pool. - /// - /// Adds a transaction to the memory pool, deferring to the orphans pool - /// if necessary, and performing any connection-related validity checks. - /// Happens under an exclusive mutable reference gated by the write portion - /// of a RWLock. - pub fn add_to_memory_pool(&mut self, _: TxSource, tx: transaction::Transaction) -> Result<(), PoolError> { - // Making sure the transaction is valid before anything else. - let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); - tx.validate(&secp).map_err(|_| PoolError::Invalid)?; + /// Attempts to add a transaction to the pool. + /// + /// Adds a transaction to the memory pool, deferring to the orphans pool + /// if necessary, and performing any connection-related validity checks. + /// Happens under an exclusive mutable reference gated by the write portion + /// of a RWLock. + pub fn add_to_memory_pool( + &mut self, + _: TxSource, + tx: transaction::Transaction, + ) -> Result<(), PoolError> { + // Making sure the transaction is valid before anything else. + let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); + tx.validate(&secp).map_err(|_| PoolError::Invalid)?; - // The first check involves ensuring that an identical transaction is - // not already in the pool's transaction set. - // A non-authoritative similar check should be performed under the - // pool's read lock before we get to this point, which would catch the - // majority of duplicate cases. The race condition is caught here. - // TODO: When the transaction identifier is finalized, the assumptions - // here may change depending on the exact coverage of the identifier. - // The current tx.hash() method, for example, does not cover changes - // to fees or other elements of the signature preimage. - let tx_hash = graph::transaction_identifier(&tx); - if self.transactions.contains_key(&tx_hash) { - return Err(PoolError::AlreadyInPool) - } + // The first check involves ensuring that an identical transaction is + // not already in the pool's transaction set. + // A non-authoritative similar check should be performed under the + // pool's read lock before we get to this point, which would catch the + // majority of duplicate cases. The race condition is caught here. + // TODO: When the transaction identifier is finalized, the assumptions + // here may change depending on the exact coverage of the identifier. + // The current tx.hash() method, for example, does not cover changes + // to fees or other elements of the signature preimage. + let tx_hash = graph::transaction_identifier(&tx); + if self.transactions.contains_key(&tx_hash) { + return Err(PoolError::AlreadyInPool); + } - // The next issue is to identify all unspent outputs that - // this transaction will consume and make sure they exist in the set. - let mut pool_refs: Vec = Vec::new(); - let mut orphan_refs: Vec = Vec::new(); - let mut blockchain_refs: Vec = Vec::new(); + // The next issue is to identify all unspent outputs that + // this transaction will consume and make sure they exist in the set. + let mut pool_refs: Vec = Vec::new(); + let mut orphan_refs: Vec = Vec::new(); + let mut blockchain_refs: Vec = Vec::new(); - for input in &tx.inputs { - let base = graph::Edge::new(None, Some(tx_hash), input.commitment()); + for input in &tx.inputs { + let base = graph::Edge::new(None, Some(tx_hash), input.commitment()); - // Note that search_for_best_output does not examine orphans, by - // design. If an incoming transaction consumes pool outputs already - // spent by the orphans set, this does not preclude its inclusion - // into the pool. - match self.search_for_best_output(&input.commitment()) { - Parent::PoolTransaction{tx_ref: x} => pool_refs.push(base.with_source(Some(x))), - Parent::BlockTransaction{output} => { - // TODO - pull this out into a separate function? - if output.features.contains(transaction::COINBASE_OUTPUT) { - if let Ok(out_header) = self.blockchain.get_block_header_by_output_commit(&output.commitment()) { - if let Ok(head_header) = self.blockchain.head_header() { - if head_header.height <= out_header.height + consensus::COINBASE_MATURITY { - return Err(PoolError::ImmatureCoinbase{ - header: out_header, - output: output.commitment() - }) - }; - }; - }; - }; - blockchain_refs.push(base); - }, - Parent::Unknown => orphan_refs.push(base), - Parent::AlreadySpent{other_tx: x} => return Err(PoolError::DoubleSpend{other_tx: x, spent_output: input.commitment()}), - } - } + // Note that search_for_best_output does not examine orphans, by + // design. If an incoming transaction consumes pool outputs already + // spent by the orphans set, this does not preclude its inclusion + // into the pool. + match self.search_for_best_output(&input.commitment()) { + Parent::PoolTransaction { tx_ref: x } => pool_refs.push(base.with_source(Some(x))), + Parent::BlockTransaction { output } => { + // TODO - pull this out into a separate function? + if output.features.contains(transaction::COINBASE_OUTPUT) { + if let Ok(out_header) = self.blockchain + .get_block_header_by_output_commit(&output.commitment()) + { + if let Ok(head_header) = self.blockchain.head_header() { + if head_header.height <= + out_header.height + consensus::COINBASE_MATURITY + { + return Err(PoolError::ImmatureCoinbase { + header: out_header, + output: output.commitment(), + }); + }; + }; + }; + }; + blockchain_refs.push(base); + } + Parent::Unknown => orphan_refs.push(base), + Parent::AlreadySpent { other_tx: x } => { + return Err(PoolError::DoubleSpend { + other_tx: x, + spent_output: input.commitment(), + }) + } + } + } - let is_orphan = orphan_refs.len() > 0; + let is_orphan = orphan_refs.len() > 0; - // Next we examine the outputs this transaction creates and ensure - // that they do not already exist. - // I believe its worth preventing duplicate outputs from being - // accepted, even though it is possible for them to be mined - // with strict ordering. In the future, if desirable, this could - // be node policy config or more intelligent. - for output in &tx.outputs { - self.check_duplicate_outputs(output, is_orphan)? - } + // Next we examine the outputs this transaction creates and ensure + // that they do not already exist. + // I believe its worth preventing duplicate outputs from being + // accepted, even though it is possible for them to be mined + // with strict ordering. In the future, if desirable, this could + // be node policy config or more intelligent. + for output in &tx.outputs { + self.check_duplicate_outputs(output, is_orphan)? + } - // Assertion: we have exactly as many resolved spending references as - // inputs to the transaction. - assert_eq!(tx.inputs.len(), - blockchain_refs.len() + pool_refs.len() + orphan_refs.len()); + // Assertion: we have exactly as many resolved spending references as + // inputs to the transaction. + assert_eq!( + tx.inputs.len(), + blockchain_refs.len() + pool_refs.len() + orphan_refs.len() + ); - // At this point we know if we're spending all known unspents and not - // creating any duplicate unspents. - let pool_entry = graph::PoolEntry::new(&tx); - let new_unspents = tx.outputs.iter(). - map(|x| graph::Edge::new(Some(tx_hash), None, x.commitment())). - collect(); + // At this point we know if we're spending all known unspents and not + // creating any duplicate unspents. + let pool_entry = graph::PoolEntry::new(&tx); + let new_unspents = tx.outputs + .iter() + .map(|x| graph::Edge::new(Some(tx_hash), None, x.commitment())) + .collect(); - if !is_orphan { - // In the non-orphan (pool) case, we've ensured that every input - // maps one-to-one with an unspent (available) output, and each - // output is unique. No further checks are necessary. - self.pool.add_pool_transaction(pool_entry, blockchain_refs, - pool_refs, new_unspents); + if !is_orphan { + // In the non-orphan (pool) case, we've ensured that every input + // maps one-to-one with an unspent (available) output, and each + // output is unique. No further checks are necessary. + self.pool.add_pool_transaction( + pool_entry, + blockchain_refs, + pool_refs, + new_unspents, + ); - self.reconcile_orphans().unwrap(); - self.transactions.insert(tx_hash, Box::new(tx)); - Ok(()) + self.reconcile_orphans().unwrap(); + self.transactions.insert(tx_hash, Box::new(tx)); + Ok(()) - } else { - // At this point, we're pretty sure the transaction is an orphan, - // but we have to explicitly check for double spends against the - // orphans set; we do not check this as part of the connectivity - // checking above. - // First, any references resolved to the pool need to be compared - // against active orphan pool_connections. - // Note that pool_connections here also does double duty to - // account for blockchain connections. - for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) { - match self.orphans.get_external_spent_output(&pool_ref.output_commitment()){ - // Should the below err be subtyped to orphans somehow? - Some(x) => return Err(PoolError::DoubleSpend{other_tx: x.destination_hash().unwrap(), spent_output: x.output_commitment()}), - None => {}, - } - } + } else { + // At this point, we're pretty sure the transaction is an orphan, + // but we have to explicitly check for double spends against the + // orphans set; we do not check this as part of the connectivity + // checking above. + // First, any references resolved to the pool need to be compared + // against active orphan pool_connections. + // Note that pool_connections here also does double duty to + // account for blockchain connections. + for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) { + match self.orphans.get_external_spent_output( + &pool_ref.output_commitment(), + ) { + // Should the below err be subtyped to orphans somehow? + Some(x) => { + return Err(PoolError::DoubleSpend { + other_tx: x.destination_hash().unwrap(), + spent_output: x.output_commitment(), + }) + } + None => {} + } + } - // Next, we have to consider the possibility of double spends - // within the orphans set. - // We also have to distinguish now between missing and internal - // references. - let missing_refs = self.resolve_orphan_refs(tx_hash, &mut orphan_refs)?; + // Next, we have to consider the possibility of double spends + // within the orphans set. + // We also have to distinguish now between missing and internal + // references. + let missing_refs = self.resolve_orphan_refs(tx_hash, &mut orphan_refs)?; - // We have passed all failure modes. - pool_refs.append(&mut blockchain_refs); - self.orphans.add_orphan_transaction(pool_entry, - pool_refs, orphan_refs, missing_refs, new_unspents); + // We have passed all failure modes. + pool_refs.append(&mut blockchain_refs); + self.orphans.add_orphan_transaction( + pool_entry, + pool_refs, + orphan_refs, + missing_refs, + new_unspents, + ); - Err(PoolError::OrphanTransaction) - } + Err(PoolError::OrphanTransaction) + } - } + } - /// Check the output for a conflict with an existing output. - /// - /// Checks the output (by commitment) against outputs in the blockchain - /// or in the pool. If the transaction is destined for orphans, the - /// orphans set is checked as well. - fn check_duplicate_outputs(&self, output : &transaction::Output, is_orphan: bool) -> Result<(), PoolError> { - // Checking against current blockchain unspent outputs - // We want outputs even if they're spent by pool txs, so we ignore - // consumed_blockchain_outputs - if self.blockchain.get_unspent(&output.commitment()).is_ok() { - return Err(PoolError::DuplicateOutput{ - other_tx: None, - in_chain: true, - output: output.commitment()}) - } + /// Check the output for a conflict with an existing output. + /// + /// Checks the output (by commitment) against outputs in the blockchain + /// or in the pool. If the transaction is destined for orphans, the + /// orphans set is checked as well. + fn check_duplicate_outputs( + &self, + output: &transaction::Output, + is_orphan: bool, + ) -> Result<(), PoolError> { + // Checking against current blockchain unspent outputs + // We want outputs even if they're spent by pool txs, so we ignore + // consumed_blockchain_outputs + if self.blockchain.get_unspent(&output.commitment()).is_ok() { + return Err(PoolError::DuplicateOutput { + other_tx: None, + in_chain: true, + output: output.commitment(), + }); + } - // Check for existence of this output in the pool - match self.pool.find_output(&output.commitment()) { - Some(x) => { - return Err(PoolError::DuplicateOutput{ - other_tx: Some(x), - in_chain: false, - output: output.commitment()}) - }, - None => {}, - }; + // Check for existence of this output in the pool + match self.pool.find_output(&output.commitment()) { + Some(x) => { + return Err(PoolError::DuplicateOutput { + other_tx: Some(x), + in_chain: false, + output: output.commitment(), + }) + } + None => {} + }; - // If the transaction might go into orphans, perform the same - // checks as above but against the orphan set instead. - if is_orphan { - // Checking against orphan outputs - match self.orphans.find_output(&output.commitment()){ - Some(x) => { - return Err(PoolError::DuplicateOutput{ - other_tx: Some(x), - in_chain: false, - output: output.commitment()}) - }, - None => {}, - }; - // No need to check pool connections since those are covered - // by pool unspents and blockchain connections. - } - Ok(()) - } + // If the transaction might go into orphans, perform the same + // checks as above but against the orphan set instead. + if is_orphan { + // Checking against orphan outputs + match self.orphans.find_output(&output.commitment()) { + Some(x) => { + return Err(PoolError::DuplicateOutput { + other_tx: Some(x), + in_chain: false, + output: output.commitment(), + }) + } + None => {} + }; + // No need to check pool connections since those are covered + // by pool unspents and blockchain connections. + } + Ok(()) + } - /// Distinguish between missing, unspent, and spent orphan refs. - /// - /// Takes the set of orphans_refs produced during transaction connectivity - /// validation, which do not point at valid unspents in the blockchain or - /// pool. These references point at either a missing (orphaned) commitment, - /// an unspent output of the orphans set, or a spent output either within - /// the orphans set or externally from orphans to the pool or blockchain. - /// The last case results in a failure condition and transaction acceptance - /// is aborted. - fn resolve_orphan_refs(&self, tx_hash: hash::Hash, orphan_refs: &mut Vec) -> Result, PoolError> { - let mut missing_refs: HashMap = HashMap::new(); - for (i, orphan_ref) in orphan_refs.iter_mut().enumerate() { - let orphan_commitment = &orphan_ref.output_commitment(); - match self.orphans.get_available_output(&orphan_commitment) { - // If the edge is an available output of orphans, - // update the prepared edge - Some(x) => *orphan_ref = x.with_destination(Some(tx_hash)), - // If the edge is not an available output, it is either - // already consumed or it belongs in missing_refs. - None => { - match self.orphans.get_internal_spent(&orphan_commitment) { - Some(x) => return Err(PoolError::DoubleSpend{ - other_tx: x.destination_hash().unwrap(), - spent_output: x.output_commitment()}), - None => { - // The reference does not resolve to anything. - // Make sure this missing_output has not already - // been claimed, then add this entry to - // missing_refs - match self.orphans.get_unknown_output(&orphan_commitment) { - Some(x) => return Err(PoolError::DoubleSpend{ - other_tx: x.destination_hash().unwrap(), - spent_output: x.output_commitment()}), - None => missing_refs.insert(i, ()), - }; - }, - }; - }, - }; - } - Ok(missing_refs) - } + /// Distinguish between missing, unspent, and spent orphan refs. + /// + /// Takes the set of orphans_refs produced during transaction connectivity + /// validation, which do not point at valid unspents in the blockchain or + /// pool. These references point at either a missing (orphaned) commitment, + /// an unspent output of the orphans set, or a spent output either within + /// the orphans set or externally from orphans to the pool or blockchain. + /// The last case results in a failure condition and transaction acceptance + /// is aborted. + fn resolve_orphan_refs( + &self, + tx_hash: hash::Hash, + orphan_refs: &mut Vec, + ) -> Result, PoolError> { + let mut missing_refs: HashMap = HashMap::new(); + for (i, orphan_ref) in orphan_refs.iter_mut().enumerate() { + let orphan_commitment = &orphan_ref.output_commitment(); + match self.orphans.get_available_output(&orphan_commitment) { + // If the edge is an available output of orphans, + // update the prepared edge + Some(x) => *orphan_ref = x.with_destination(Some(tx_hash)), + // If the edge is not an available output, it is either + // already consumed or it belongs in missing_refs. + None => { + match self.orphans.get_internal_spent(&orphan_commitment) { + Some(x) => { + return Err(PoolError::DoubleSpend { + other_tx: x.destination_hash().unwrap(), + spent_output: x.output_commitment(), + }) + } + None => { + // The reference does not resolve to anything. + // Make sure this missing_output has not already + // been claimed, then add this entry to + // missing_refs + match self.orphans.get_unknown_output(&orphan_commitment) { + Some(x) => { + return Err(PoolError::DoubleSpend { + other_tx: x.destination_hash().unwrap(), + spent_output: x.output_commitment(), + }) + } + None => missing_refs.insert(i, ()), + }; + } + }; + } + }; + } + Ok(missing_refs) + } - /// The primary goal of the reconcile_orphans method is to eliminate any - /// orphans who conflict with the recently accepted pool transaction. - /// TODO: How do we handle fishing orphans out that look like they could - /// be freed? Current thought is to do so under a different lock domain - /// so that we don't have the potential for long recursion under the write - /// lock. - pub fn reconcile_orphans(&self)-> Result<(),PoolError> { - Ok(()) - } + /// The primary goal of the reconcile_orphans method is to eliminate any + /// orphans who conflict with the recently accepted pool transaction. + /// TODO: How do we handle fishing orphans out that look like they could + /// be freed? Current thought is to do so under a different lock domain + /// so that we don't have the potential for long recursion under the write + /// lock. + pub fn reconcile_orphans(&self) -> Result<(), PoolError> { + Ok(()) + } - /// Updates the pool with the details of a new block. - /// - /// Along with add_to_memory_pool, reconcile_block is the other major entry - /// point for the transaction pool. This method reconciles the records in - /// the transaction pool with the updated view presented by the incoming - /// block. This involves removing any transactions which appear to conflict - /// with inputs and outputs consumed in the block, and invalidating any - /// descendents or parents of the removed transaction, where relevant. - /// - /// Returns a list of transactions which have been evicted from the pool - /// due to the recent block. Because transaction association information is - /// irreversibly lost in the blockchain, we must keep track of these - /// evicted transactions elsewhere so that we can make a best effort at - /// returning them to the pool in the event of a reorg that invalidates - /// this block. - pub fn reconcile_block(&mut self, block: &block::Block) -> Result>, PoolError> { - // If this pool has been kept in sync correctly, serializing all - // updates, then the inputs must consume only members of the blockchain - // utxo set. - // If the block has been resolved properly and reduced fully to its - // canonical form, no inputs may consume outputs generated by previous - // transactions in the block; they would be cut-through. TODO: If this - // is not consensus enforced, then logic must be added here to account - // for that. - // Based on this, we operate under the following algorithm: - // For each block input, we examine the pool transaction, if any, that - // consumes the same blockchain output. - // If one exists, we mark the transaction and then examine its - // children. Recursively, we mark each child until a child is - // fully satisfied by outputs in the updated utxo view (after - // reconciliation of the block), or there are no more children. - // - // Additionally, to protect our invariant dictating no duplicate - // outputs, each output generated by the new utxo set is checked - // against outputs generated by the pool and the corresponding - // transactions are also marked. - // - // After marking concludes, sweeping begins. In order, the marked - // transactions are removed, the vertexes corresponding to the - // transactions are removed, all the marked transactions' outputs are - // removed, and all remaining non-blockchain inputs are returned to the - // unspent_outputs set. - // - // After the pool has been successfully processed, an orphans - // reconciliation job is triggered. - let mut marked_transactions: HashMap = HashMap::new(); - { - let mut conflicting_txs: Vec = block.inputs.iter(). - filter_map(|x| - self.pool.get_external_spent_output(&x.commitment())). - map(|x| x.destination_hash().unwrap()). - collect(); + /// Updates the pool with the details of a new block. + /// + /// Along with add_to_memory_pool, reconcile_block is the other major entry + /// point for the transaction pool. This method reconciles the records in + /// the transaction pool with the updated view presented by the incoming + /// block. This involves removing any transactions which appear to conflict + /// with inputs and outputs consumed in the block, and invalidating any + /// descendents or parents of the removed transaction, where relevant. + /// + /// Returns a list of transactions which have been evicted from the pool + /// due to the recent block. Because transaction association information is + /// irreversibly lost in the blockchain, we must keep track of these + /// evicted transactions elsewhere so that we can make a best effort at + /// returning them to the pool in the event of a reorg that invalidates + /// this block. + pub fn reconcile_block( + &mut self, + block: &block::Block, + ) -> Result>, PoolError> { + // If this pool has been kept in sync correctly, serializing all + // updates, then the inputs must consume only members of the blockchain + // utxo set. + // If the block has been resolved properly and reduced fully to its + // canonical form, no inputs may consume outputs generated by previous + // transactions in the block; they would be cut-through. TODO: If this + // is not consensus enforced, then logic must be added here to account + // for that. + // Based on this, we operate under the following algorithm: + // For each block input, we examine the pool transaction, if any, that + // consumes the same blockchain output. + // If one exists, we mark the transaction and then examine its + // children. Recursively, we mark each child until a child is + // fully satisfied by outputs in the updated utxo view (after + // reconciliation of the block), or there are no more children. + // + // Additionally, to protect our invariant dictating no duplicate + // outputs, each output generated by the new utxo set is checked + // against outputs generated by the pool and the corresponding + // transactions are also marked. + // + // After marking concludes, sweeping begins. In order, the marked + // transactions are removed, the vertexes corresponding to the + // transactions are removed, all the marked transactions' outputs are + // removed, and all remaining non-blockchain inputs are returned to the + // unspent_outputs set. + // + // After the pool has been successfully processed, an orphans + // reconciliation job is triggered. + let mut marked_transactions: HashMap = HashMap::new(); + { + let mut conflicting_txs: Vec = block + .inputs + .iter() + .filter_map(|x| self.pool.get_external_spent_output(&x.commitment())) + .map(|x| x.destination_hash().unwrap()) + .collect(); - let mut conflicting_outputs: Vec = block.outputs.iter(). - filter_map(|x: &transaction::Output| - self.pool.get_internal_spent_output(&x.commitment()). - or(self.pool.get_available_output(&x.commitment()))). - map(|x| x.source_hash().unwrap()). - collect(); + let mut conflicting_outputs: Vec = block + .outputs + .iter() + .filter_map(|x: &transaction::Output| { + self.pool.get_internal_spent_output(&x.commitment()).or( + self.pool.get_available_output(&x.commitment()), + ) + }) + .map(|x| x.source_hash().unwrap()) + .collect(); - conflicting_txs.append(&mut conflicting_outputs); + conflicting_txs.append(&mut conflicting_outputs); - for txh in conflicting_txs { - self.mark_transaction(txh, &mut marked_transactions); - } - } - let freed_txs = self.sweep_transactions(marked_transactions); + for txh in conflicting_txs { + self.mark_transaction(txh, &mut marked_transactions); + } + } + let freed_txs = self.sweep_transactions(marked_transactions); - self.reconcile_orphans().unwrap(); + self.reconcile_orphans().unwrap(); - Ok(freed_txs) - } + Ok(freed_txs) + } - /// The mark portion of our mark-and-sweep pool cleanup. - /// - /// The transaction designated by conflicting_tx is immediately marked. - /// Each output of this transaction is then examined; if a transaction in - /// the pool spends this output and the output is not replaced by an - /// identical output included in the updated UTXO set, the child is marked - /// as well and the process continues recursively. - /// - /// Marked transactions are added to the mutable marked_txs HashMap which - /// is supplied by the calling function. - fn mark_transaction(&self, conflicting_tx: hash::Hash, - marked_txs: &mut HashMap) { + /// The mark portion of our mark-and-sweep pool cleanup. + /// + /// The transaction designated by conflicting_tx is immediately marked. + /// Each output of this transaction is then examined; if a transaction in + /// the pool spends this output and the output is not replaced by an + /// identical output included in the updated UTXO set, the child is marked + /// as well and the process continues recursively. + /// + /// Marked transactions are added to the mutable marked_txs HashMap which + /// is supplied by the calling function. + fn mark_transaction( + &self, + conflicting_tx: hash::Hash, + marked_txs: &mut HashMap, + ) { - marked_txs.insert(conflicting_tx, ()); + marked_txs.insert(conflicting_tx, ()); - let tx_ref = self.transactions.get(&conflicting_tx); + let tx_ref = self.transactions.get(&conflicting_tx); - for output in &tx_ref.unwrap().outputs { - match self.pool.get_internal_spent_output(&output.commitment()) { - Some(x) => { - if self.blockchain.get_unspent(&x.output_commitment()).is_err() { - self.mark_transaction(x.destination_hash().unwrap(), marked_txs); - } - }, - None => {}, - }; - } - } - /// The sweep portion of mark-and-sweep pool cleanup. - /// - /// The transactions that exist in the hashmap are removed from the - /// heap storage as well as the vertex set. Any incoming edges are removed - /// and added to a list of freed edges. Any outbound edges are removed from - /// both the graph and the list of freed edges. It is the responsibility of - /// this method to ensure that the list of freed edges (inputs) are - /// consistent. - /// - /// TODO: There's some iteration overlap between this and the mark step. - /// Additional bookkeeping in the mark step could optimize that away. - fn sweep_transactions(&mut self, - marked_transactions: HashMap,) - ->Vec> { + for output in &tx_ref.unwrap().outputs { + match self.pool.get_internal_spent_output(&output.commitment()) { + Some(x) => { + if self.blockchain.get_unspent(&x.output_commitment()).is_err() { + self.mark_transaction(x.destination_hash().unwrap(), marked_txs); + } + } + None => {} + }; + } + } + /// The sweep portion of mark-and-sweep pool cleanup. + /// + /// The transactions that exist in the hashmap are removed from the + /// heap storage as well as the vertex set. Any incoming edges are removed + /// and added to a list of freed edges. Any outbound edges are removed from + /// both the graph and the list of freed edges. It is the responsibility of + /// this method to ensure that the list of freed edges (inputs) are + /// consistent. + /// + /// TODO: There's some iteration overlap between this and the mark step. + /// Additional bookkeeping in the mark step could optimize that away. + fn sweep_transactions( + &mut self, + marked_transactions: HashMap, + ) -> Vec> { - let mut removed_txs = Vec::new(); + let mut removed_txs = Vec::new(); - for tx_hash in marked_transactions.keys() { - let removed_tx = self.transactions.remove(tx_hash).unwrap(); + for tx_hash in marked_transactions.keys() { + let removed_tx = self.transactions.remove(tx_hash).unwrap(); - self.pool.remove_pool_transaction(&removed_tx, - &marked_transactions); + self.pool.remove_pool_transaction( + &removed_tx, + &marked_transactions, + ); - removed_txs.push(removed_tx); - } - removed_txs - } + removed_txs.push(removed_tx); + } + removed_txs + } - /// Fetch mineable transactions. - /// - /// Select a set of mineable transactions for block building. - pub fn prepare_mineable_transactions(&self, num_to_fetch: u32) -> Vec>{ - self.pool.get_mineable_transactions(num_to_fetch).iter(). - map(|x| self.transactions.get(x).unwrap().clone()).collect() - } + /// Fetch mineable transactions. + /// + /// Select a set of mineable transactions for block building. + pub fn prepare_mineable_transactions( + &self, + num_to_fetch: u32, + ) -> Vec> { + self.pool + .get_mineable_transactions(num_to_fetch) + .iter() + .map(|x| self.transactions.get(x).unwrap().clone()) + .collect() + } } #[cfg(test)] mod tests { - use super::*; - use types::*; - use secp::{Secp256k1, ContextFlag, constants}; - use secp::key; - use core::core::build; - use blockchain::{DummyChain, DummyChainImpl, DummyUtxoSet}; - use std::sync::{Arc, RwLock}; + use super::*; + use types::*; + use secp::{Secp256k1, ContextFlag, constants}; + use secp::key; + use core::core::build; + use blockchain::{DummyChain, DummyChainImpl, DummyUtxoSet}; + use std::sync::{Arc, RwLock}; - macro_rules! expect_output_parent { + macro_rules! expect_output_parent { ($pool:expr, $expected:pat, $( $output:expr ),+ ) => { $( match $pool.search_for_best_output(&test_output($output).commitment()) { $expected => {}, - x => panic!("Unexpected result from output search for {:?}, got {:?}", $output, x), + x => panic!( + "Unexpected result from output search for {:?}, got {:?}", + $output, + x), }; )* } } - #[test] - /// A basic test; add a pair of transactions to the pool. - fn test_basic_pool_add() { - let mut dummy_chain = DummyChainImpl::new(); + #[test] + /// A basic test; add a pair of transactions to the pool. + fn test_basic_pool_add() { + let mut dummy_chain = DummyChainImpl::new(); - let parent_transaction = test_transaction(vec![5,6,7], vec![11,4]); - // We want this transaction to be rooted in the blockchain. - let new_utxo = DummyUtxoSet::empty(). - with_output(test_output(5)). - with_output(test_output(6)). - with_output(test_output(7)). - with_output(test_output(8)); + let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 4]); + // We want this transaction to be rooted in the blockchain. + let new_utxo = DummyUtxoSet::empty() + .with_output(test_output(5)) + .with_output(test_output(6)) + .with_output(test_output(7)) + .with_output(test_output(8)); - // Prepare a second transaction, connected to the first. - let child_transaction = test_transaction(vec![11,4], vec![12]); + // Prepare a second transaction, connected to the first. + let child_transaction = test_transaction(vec![11, 4], vec![12]); - dummy_chain.update_utxo_set(new_utxo); + dummy_chain.update_utxo_set(new_utxo); - // To mirror how this construction is intended to be used, the pool - // is placed inside a RwLock. - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + // To mirror how this construction is intended to be used, the pool + // is placed inside a RwLock. + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - // Take the write lock and add a pool entry - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); + // Take the write lock and add a pool entry + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); - // First, add the transaction rooted in the blockchain - let result = write_pool.add_to_memory_pool(test_source(), - parent_transaction); - if result.is_err() { - panic!("got an error adding parent tx: {:?}", - result.err().unwrap()); - } + // First, add the transaction rooted in the blockchain + let result = write_pool.add_to_memory_pool(test_source(), parent_transaction); + if result.is_err() { + panic!("got an error adding parent tx: {:?}", result.err().unwrap()); + } - // Now, add the transaction connected as a child to the first - let child_result = write_pool.add_to_memory_pool(test_source(), - child_transaction); + // Now, add the transaction connected as a child to the first + let child_result = write_pool.add_to_memory_pool(test_source(), child_transaction); - if child_result.is_err() { - panic!("got an error adding child tx: {:?}", - child_result.err().unwrap()); - } + if child_result.is_err() { + panic!( + "got an error adding child tx: {:?}", + child_result.err().unwrap() + ); + } - } + } - // Now take the read lock and use a few exposed methods to check - // consistency - { - let read_pool = pool.read().unwrap(); - assert_eq!(read_pool.total_size(), 2); + // Now take the read lock and use a few exposed methods to check + // consistency + { + let read_pool = pool.read().unwrap(); + assert_eq!(read_pool.total_size(), 2); - expect_output_parent!(read_pool, + expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 12); - expect_output_parent!(read_pool, + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 11, 5); - expect_output_parent!(read_pool, + expect_output_parent!(read_pool, Parent::BlockTransaction{output: _}, 8); - expect_output_parent!(read_pool, + expect_output_parent!(read_pool, Parent::Unknown, 20); - } - } + } + } - #[test] - /// Testing various expected error conditions - pub fn test_pool_add_error() { - let mut dummy_chain = DummyChainImpl::new(); + #[test] + /// Testing various expected error conditions + pub fn test_pool_add_error() { + let mut dummy_chain = DummyChainImpl::new(); - let new_utxo = DummyUtxoSet::empty(). - with_output(test_output(5)). - with_output(test_output(6)). - with_output(test_output(7)); + let new_utxo = DummyUtxoSet::empty() + .with_output(test_output(5)) + .with_output(test_output(6)) + .with_output(test_output(7)); - dummy_chain.update_utxo_set(new_utxo); + dummy_chain.update_utxo_set(new_utxo); - let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); + let pool = RwLock::new(test_setup(&Arc::new(dummy_chain))); + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); - // First expected failure: duplicate output - let duplicate_tx = test_transaction(vec![5,6], vec![7]); + // First expected failure: duplicate output + let duplicate_tx = test_transaction(vec![5,6], vec![7]); - match write_pool.add_to_memory_pool(test_source(), - duplicate_tx) { - Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"), - Err(x) =>{ match x { - PoolError::DuplicateOutput{other_tx, in_chain, output} => { - if other_tx.is_some() || !in_chain || output != test_output(7).commitment() { - panic!("Unexpected parameter in DuplicateOutput: {:?}", x); - } + match write_pool.add_to_memory_pool(test_source(), duplicate_tx) { + Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"), + Err(x) => { + match x { + PoolError::DuplicateOutput { + other_tx, + in_chain, + output, + } => { + if other_tx.is_some() || !in_chain || + output != test_output(7).commitment() + { + panic!("Unexpected parameter in DuplicateOutput: {:?}", x); + } + } + _ => { + panic!("Unexpected error when adding duplicate output transaction: {:?}", x) + } + }; + } + }; - }, - _ => panic!("Unexpected error when adding duplicate output transaction: {:?}", x), - };}, - }; + // To test DoubleSpend and AlreadyInPool conditions, we need to add + // a valid transaction. + let valid_transaction = test_transaction(vec![5,6], vec![8]); - // To test DoubleSpend and AlreadyInPool conditions, we need to add - // a valid transaction. - let valid_transaction = test_transaction(vec![5,6], vec![8]); + match write_pool.add_to_memory_pool(test_source(), valid_transaction) { + Ok(_) => {} + Err(x) => panic!("Unexpected error while adding a valid transaction: {:?}", x), + }; - match write_pool.add_to_memory_pool(test_source(), - valid_transaction) { - Ok(_) => {}, - Err(x) => panic!("Unexpected error while adding a valid transaction: {:?}", x), - }; + // Now, test a DoubleSpend by consuming the same blockchain unspent + // as valid_transaction: + let double_spend_transaction = test_transaction(vec![6], vec![2]); - // Now, test a DoubleSpend by consuming the same blockchain unspent - // as valid_transaction: - let double_spend_transaction = test_transaction(vec![6], vec![2]); + match write_pool.add_to_memory_pool(test_source(), double_spend_transaction) { + Ok(_) => panic!("Expected error when adding double spend, got Ok"), + Err(x) => { + match x { + PoolError::DoubleSpend { + other_tx: _, + spent_output, + } => { + if spent_output != test_output(6).commitment() { + panic!("Unexpected parameter in DoubleSpend: {:?}", x); + } + } + _ => { + panic!("Unexpected error when adding double spend transaction: {:?}", x) + } + }; + } + }; - match write_pool.add_to_memory_pool(test_source(), - double_spend_transaction) { - Ok(_) => panic!("Expected error when adding double spend, got Ok"), - Err(x) => { - match x { - PoolError::DoubleSpend{other_tx: _, spent_output} => { - if spent_output != test_output(6).commitment() { - panic!("Unexpected parameter in DoubleSpend: {:?}", x); - } - }, - _ => panic!("Unexpected error when adding double spend transaction: {:?}", x), - }; - }, - }; + let already_in_pool = test_transaction(vec![5,6], vec![8]); - let already_in_pool = test_transaction(vec![5,6], vec![8]); + match write_pool.add_to_memory_pool(test_source(), already_in_pool) { + Ok(_) => panic!("Expected error when adding already in pool, got Ok"), + Err(x) => { + match x { + PoolError::AlreadyInPool => {} + _ => { + panic!("Unexpected error when adding already in pool tx: {:?}", + x) + } + }; + } - match write_pool.add_to_memory_pool(test_source(), - already_in_pool) { - Ok(_) => panic!("Expected error when adding already in pool, got Ok"), - Err(x) => { - match x { - PoolError::AlreadyInPool => {}, - _ => panic!("Unexpected error when adding already in pool tx: {:?}", - x), - }; - } + }; - }; + assert_eq!(write_pool.total_size(), 1); + } + } - assert_eq!(write_pool.total_size(), 1); - } - } + #[test] + fn test_immature_coinbase() { + let mut dummy_chain = DummyChainImpl::new(); + let coinbase_output = test_coinbase_output(15); + dummy_chain.update_utxo_set(DummyUtxoSet::empty().with_output(coinbase_output)); - #[test] - fn test_immature_coinbase() { - let mut dummy_chain = DummyChainImpl::new(); - let coinbase_output = test_coinbase_output(15); - dummy_chain.update_utxo_set(DummyUtxoSet::empty().with_output(coinbase_output)); + let chain_ref = Arc::new(dummy_chain); + let pool = RwLock::new(test_setup(&chain_ref)); - let chain_ref = Arc::new(dummy_chain); - let pool = RwLock::new(test_setup(&chain_ref)); + { + let mut write_pool = pool.write().unwrap(); - { - let mut write_pool = pool.write().unwrap(); + let coinbase_header = block::BlockHeader { + height: 1, + ..block::BlockHeader::default() + }; + chain_ref.store_header_by_output_commitment( + coinbase_output.commitment(), + &coinbase_header, + ); - let coinbase_header = block::BlockHeader {height: 1, ..block::BlockHeader::default()}; - chain_ref.store_header_by_output_commitment(coinbase_output.commitment(), &coinbase_header); + let head_header = block::BlockHeader { + height: 2, + ..block::BlockHeader::default() + }; + chain_ref.store_head_header(&head_header); - let head_header = block::BlockHeader {height: 2, ..block::BlockHeader::default()}; - chain_ref.store_head_header(&head_header); + let txn = test_transaction(vec![15], vec![10, 4]); + let result = write_pool.add_to_memory_pool(test_source(), txn); + match result { + Err(PoolError::ImmatureCoinbase { + header: _, + output: out, + }) => { + assert_eq!(out, coinbase_output.commitment()); + } + _ => panic!("expected ImmatureCoinbase error here"), + }; - let txn = test_transaction(vec![15], vec![10, 4]); - let result = write_pool.add_to_memory_pool(test_source(), txn); - match result { - Err(PoolError::ImmatureCoinbase{header: _, output: out}) => { - assert_eq!(out, coinbase_output.commitment()); - }, - _ => panic!("expected ImmatureCoinbase error here"), - }; + let head_header = block::BlockHeader { + height: 4, + ..block::BlockHeader::default() + }; + chain_ref.store_head_header(&head_header); - let head_header = block::BlockHeader {height: 4, ..block::BlockHeader::default()}; - chain_ref.store_head_header(&head_header); + let txn = test_transaction(vec![15], vec![10, 4]); + let result = write_pool.add_to_memory_pool(test_source(), txn); + match result { + Err(PoolError::ImmatureCoinbase { + header: _, + output: out, + }) => { + assert_eq!(out, coinbase_output.commitment()); + } + _ => panic!("expected ImmatureCoinbase error here"), + }; - let txn = test_transaction(vec![15], vec![10, 4]); - let result = write_pool.add_to_memory_pool(test_source(), txn); - match result { - Err(PoolError::ImmatureCoinbase{header: _, output: out}) => { - assert_eq!(out, coinbase_output.commitment()); - }, - _ => panic!("expected ImmatureCoinbase error here"), - }; + let head_header = block::BlockHeader { + height: 5, + ..block::BlockHeader::default() + }; + chain_ref.store_head_header(&head_header); - let head_header = block::BlockHeader {height: 5, ..block::BlockHeader::default()}; - chain_ref.store_head_header(&head_header); + let txn = test_transaction(vec![15], vec![10, 4]); + let result = write_pool.add_to_memory_pool(test_source(), txn); + match result { + Ok(_) => {} + Err(_) => panic!("this should not return an error here"), + }; + } + } - let txn = test_transaction(vec![15], vec![10, 4]); - let result = write_pool.add_to_memory_pool(test_source(), txn); - match result { - Ok(_) => {}, - Err(_) => panic!("this should not return an error here"), - }; - } - } + #[test] + /// Testing an expected orphan + fn test_add_orphan() { + // TODO we need a test here + } - #[test] - /// Testing an expected orphan - fn test_add_orphan() { - // TODO we need a test here - } + #[test] + /// Testing block reconciliation + fn test_block_reconciliation() { + let mut dummy_chain = DummyChainImpl::new(); - #[test] - /// Testing block reconciliation - fn test_block_reconciliation() { - let mut dummy_chain = DummyChainImpl::new(); + let new_utxo = DummyUtxoSet::empty() + .with_output(test_output(10)) + .with_output(test_output(20)) + .with_output(test_output(30)) + .with_output(test_output(40)); - let new_utxo = DummyUtxoSet::empty(). - with_output(test_output(10)). - with_output(test_output(20)). - with_output(test_output(30)). - with_output(test_output(40)); + dummy_chain.update_utxo_set(new_utxo); - dummy_chain.update_utxo_set(new_utxo); + let chain_ref = Arc::new(dummy_chain); - let chain_ref = Arc::new(dummy_chain); + let pool = RwLock::new(test_setup(&chain_ref)); - let pool = RwLock::new(test_setup(&chain_ref)); + // Preparation: We will introduce a three root pool transactions. + // 1. A transaction that should be invalidated because it is exactly + // contained in the block. + // 2. A transaction that should be invalidated because the input is + // consumed in the block, although it is not exactly consumed. + // 3. A transaction that should remain after block reconciliation. + let block_transaction = test_transaction(vec![10], vec![8]); + let conflict_transaction = test_transaction(vec![20], vec![12,7]); + let valid_transaction = test_transaction(vec![30], vec![14,15]); - // Preparation: We will introduce a three root pool transactions. - // 1. A transaction that should be invalidated because it is exactly - // contained in the block. - // 2. A transaction that should be invalidated because the input is - // consumed in the block, although it is not exactly consumed. - // 3. A transaction that should remain after block reconciliation. - let block_transaction = test_transaction(vec![10], vec![8]); - let conflict_transaction = test_transaction(vec![20], vec![12,7]); - let valid_transaction = test_transaction(vec![30], vec![14,15]); + // We will also introduce a few children: + // 4. A transaction that descends from transaction 1, that is in + // turn exactly contained in the block. + let block_child = test_transaction(vec![8], vec![4,3]); + // 5. A transaction that descends from transaction 4, that is not + // contained in the block at all and should be valid after + // reconciliation. + let pool_child = test_transaction(vec![4], vec![1]); + // 6. A transaction that descends from transaction 2 that does not + // conflict with anything in the block in any way, but should be + // invalidated (orphaned). + let conflict_child = test_transaction(vec![12], vec![11]); + // 7. A transaction that descends from transaction 2 that should be + // valid due to its inputs being satisfied by the block. + let conflict_valid_child = test_transaction(vec![7], vec![5]); + // 8. A transaction that descends from transaction 3 that should be + // invalidated due to an output conflict. + let valid_child_conflict = test_transaction(vec![14], vec![9]); + // 9. A transaction that descends from transaction 3 that should remain + // valid after reconciliation. + let valid_child_valid = test_transaction(vec![15], vec![13]); + // 10. A transaction that descends from both transaction 6 and + // transaction 9 + let mixed_child = test_transaction(vec![11,13], vec![2]); - // We will also introduce a few children: - // 4. A transaction that descends from transaction 1, that is in - // turn exactly contained in the block. - let block_child = test_transaction(vec![8], vec![4,3]); - // 5. A transaction that descends from transaction 4, that is not - // contained in the block at all and should be valid after - // reconciliation. - let pool_child = test_transaction(vec![4], vec![1]); - // 6. A transaction that descends from transaction 2 that does not - // conflict with anything in the block in any way, but should be - // invalidated (orphaned). - let conflict_child = test_transaction(vec![12], vec![11]); - // 7. A transaction that descends from transaction 2 that should be - // valid due to its inputs being satisfied by the block. - let conflict_valid_child = test_transaction(vec![7], vec![5]); - // 8. A transaction that descends from transaction 3 that should be - // invalidated due to an output conflict. - let valid_child_conflict = test_transaction(vec![14], vec![9]); - // 9. A transaction that descends from transaction 3 that should remain - // valid after reconciliation. - let valid_child_valid = test_transaction(vec![15], vec![13]); - // 10. A transaction that descends from both transaction 6 and - // transaction 9 - let mixed_child = test_transaction(vec![11,13], vec![2]); - - // Add transactions. - // Note: There are some ordering constraints that must be followed here - // until orphans is 100% implemented. Once the orphans process has - // stabilized, we can mix these up to exercise that path a bit. - let mut txs_to_add = vec![block_transaction, conflict_transaction, + // Add transactions. + // Note: There are some ordering constraints that must be followed here + // until orphans is 100% implemented. Once the orphans process has + // stabilized, we can mix these up to exercise that path a bit. + let mut txs_to_add = vec![block_transaction, conflict_transaction, valid_transaction, block_child, pool_child, conflict_child, conflict_valid_child, valid_child_conflict, valid_child_valid, mixed_child]; - let expected_pool_size = txs_to_add.len(); + let expected_pool_size = txs_to_add.len(); - // First we add the above transactions to the pool; all should be - // accepted. - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); + // First we add the above transactions to the pool; all should be + // accepted. + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); - for tx in txs_to_add.drain(..) { - assert!(write_pool.add_to_memory_pool(test_source(), + for tx in txs_to_add.drain(..) { + assert!(write_pool.add_to_memory_pool(test_source(), tx).is_ok()); - } + } - assert_eq!(write_pool.total_size(), expected_pool_size); - } - // Now we prepare the block that will cause the above condition. - // First, the transactions we want in the block: - // - Copy of 1 - let block_tx_1 = test_transaction(vec![10], vec![8]); - // - Conflict w/ 2, satisfies 7 - let block_tx_2 = test_transaction(vec![20], vec![7]); - // - Copy of 4 - let block_tx_3 = test_transaction(vec![8], vec![4,3]); - // - Output conflict w/ 8 - let block_tx_4 = test_transaction(vec![40], vec![9]); - let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3, + assert_eq!(write_pool.total_size(), expected_pool_size); + } + // Now we prepare the block that will cause the above condition. + // First, the transactions we want in the block: + // - Copy of 1 + let block_tx_1 = test_transaction(vec![10], vec![8]); + // - Conflict w/ 2, satisfies 7 + let block_tx_2 = test_transaction(vec![20], vec![7]); + // - Copy of 4 + let block_tx_3 = test_transaction(vec![8], vec![4,3]); + // - Output conflict w/ 8 + let block_tx_4 = test_transaction(vec![40], vec![9]); + let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3, &block_tx_4]; - let block = block::Block::new(&block::BlockHeader::default(), - block_transactions, key::ONE_KEY).unwrap(); + let block = block::Block::new( + &block::BlockHeader::default(), + block_transactions, + key::ONE_KEY, + ).unwrap(); - chain_ref.apply_block(&block); + chain_ref.apply_block(&block); - // Block reconciliation - { - let mut write_pool = pool.write().unwrap(); + // Block reconciliation + { + let mut write_pool = pool.write().unwrap(); - let evicted_transactions = write_pool.reconcile_block(&block); + let evicted_transactions = write_pool.reconcile_block(&block); - assert!(evicted_transactions.is_ok()); + assert!(evicted_transactions.is_ok()); - assert_eq!(evicted_transactions.unwrap().len(), 6); + assert_eq!(evicted_transactions.unwrap().len(), 6); - // TODO: Txids are not yet deterministic. When they are, we should - // check the specific transactions that were evicted. - } + // TODO: Txids are not yet deterministic. When they are, we should + // check the specific transactions that were evicted. + } - // Using the pool's methods to validate a few end conditions. - { - let read_pool = pool.read().unwrap(); + // Using the pool's methods to validate a few end conditions. + { + let read_pool = pool.read().unwrap(); - assert_eq!(read_pool.total_size(), 4); + assert_eq!(read_pool.total_size(), 4); - // We should have available blockchain outputs at 9 and 3 - expect_output_parent!(read_pool, Parent::BlockTransaction{output: _}, 9, 3); + // We should have available blockchain outputs at 9 and 3 + expect_output_parent!(read_pool, Parent::BlockTransaction{output: _}, 9, 3); - // We should have spent blockchain outputs at 4 and 7 - expect_output_parent!(read_pool, + // We should have spent blockchain outputs at 4 and 7 + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 4, 7); - // We should have spent pool references at 15 - expect_output_parent!(read_pool, + // We should have spent pool references at 15 + expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 15); - // We should have unspent pool references at 1, 13, 14 - expect_output_parent!(read_pool, + // We should have unspent pool references at 1, 13, 14 + expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 1, 13, 14); - // References internal to the block should be unknown - expect_output_parent!(read_pool, Parent::Unknown, 8); + // References internal to the block should be unknown + expect_output_parent!(read_pool, Parent::Unknown, 8); - // Evicted transactions should have unknown outputs - expect_output_parent!(read_pool, Parent::Unknown, 2, 11); - } - } + // Evicted transactions should have unknown outputs + expect_output_parent!(read_pool, Parent::Unknown, 2, 11); + } + } - #[test] - /// Test transaction selection and block building. - fn test_block_building() { - // Add a handful of transactions - let mut dummy_chain = DummyChainImpl::new(); + #[test] + /// Test transaction selection and block building. + fn test_block_building() { + // Add a handful of transactions + let mut dummy_chain = DummyChainImpl::new(); - let new_utxo = DummyUtxoSet::empty(). - with_output(test_output(10)). - with_output(test_output(20)). - with_output(test_output(30)). - with_output(test_output(40)); + let new_utxo = DummyUtxoSet::empty() + .with_output(test_output(10)) + .with_output(test_output(20)) + .with_output(test_output(30)) + .with_output(test_output(40)); - dummy_chain.update_utxo_set(new_utxo); + dummy_chain.update_utxo_set(new_utxo); - let chain_ref = Arc::new(dummy_chain); + let chain_ref = Arc::new(dummy_chain); - let pool = RwLock::new(test_setup(&chain_ref)); + let pool = RwLock::new(test_setup(&chain_ref)); - let root_tx_1 = test_transaction(vec![10,20], vec![25]); - let root_tx_2 = test_transaction(vec![30], vec![28]); - let root_tx_3 = test_transaction(vec![40], vec![38]); + let root_tx_1 = test_transaction(vec![10,20], vec![25]); + let root_tx_2 = test_transaction(vec![30], vec![28]); + let root_tx_3 = test_transaction(vec![40], vec![38]); - let child_tx_1 = test_transaction(vec![25],vec![23]); - let child_tx_2 = test_transaction(vec![38],vec![32]); + let child_tx_1 = test_transaction(vec![25], vec![23]); + let child_tx_2 = test_transaction(vec![38], vec![32]); - { - let mut write_pool = pool.write().unwrap(); - assert_eq!(write_pool.total_size(), 0); + { + let mut write_pool = pool.write().unwrap(); + assert_eq!(write_pool.total_size(), 0); - assert!(write_pool.add_to_memory_pool(test_source(), + assert!(write_pool.add_to_memory_pool(test_source(), root_tx_1).is_ok()); - assert!(write_pool.add_to_memory_pool(test_source(), + assert!(write_pool.add_to_memory_pool(test_source(), root_tx_2).is_ok()); - assert!(write_pool.add_to_memory_pool(test_source(), + assert!(write_pool.add_to_memory_pool(test_source(), root_tx_3).is_ok()); - assert!(write_pool.add_to_memory_pool(test_source(), + assert!(write_pool.add_to_memory_pool(test_source(), child_tx_1).is_ok()); - assert!(write_pool.add_to_memory_pool(test_source(), + assert!(write_pool.add_to_memory_pool(test_source(), child_tx_2).is_ok()); - assert_eq!(write_pool.total_size(), 5); - } + assert_eq!(write_pool.total_size(), 5); + } - // Request blocks - let block: block::Block; - let mut txs: Vec>; - { - let read_pool = pool.read().unwrap(); - txs = read_pool.prepare_mineable_transactions(3); - assert_eq!(txs.len(), 3); - // TODO: This is ugly, either make block::new take owned - // txs instead of mut refs, or change - // prepare_mineable_transactions to return mut refs - let block_txs: Vec = txs.drain(..).map(|x| *x).collect(); - let tx_refs = block_txs.iter().collect(); - block = block::Block::new(&block::BlockHeader::default(), - tx_refs, key::ONE_KEY).unwrap(); - } + // Request blocks + let block: block::Block; + let mut txs: Vec>; + { + let read_pool = pool.read().unwrap(); + txs = read_pool.prepare_mineable_transactions(3); + assert_eq!(txs.len(), 3); + // TODO: This is ugly, either make block::new take owned + // txs instead of mut refs, or change + // prepare_mineable_transactions to return mut refs + let block_txs: Vec = txs.drain(..).map(|x| *x).collect(); + let tx_refs = block_txs.iter().collect(); + block = block::Block::new(&block::BlockHeader::default(), tx_refs, key::ONE_KEY) + .unwrap(); + } - chain_ref.apply_block(&block); - // Reconcile block - { - let mut write_pool = pool.write().unwrap(); + chain_ref.apply_block(&block); + // Reconcile block + { + let mut write_pool = pool.write().unwrap(); - let evicted_transactions = write_pool.reconcile_block(&block); + let evicted_transactions = write_pool.reconcile_block(&block); - assert!(evicted_transactions.is_ok()); + assert!(evicted_transactions.is_ok()); - assert_eq!(evicted_transactions.unwrap().len(), 3); - assert_eq!(write_pool.total_size(), 2); - } + assert_eq!(evicted_transactions.unwrap().len(), 3); + assert_eq!(write_pool.total_size(), 2); + } - } + } - fn test_setup(dummy_chain: &Arc) -> TransactionPool { - TransactionPool{ - transactions: HashMap::new(), - pool: Pool::empty(), - orphans: Orphans::empty(), - blockchain: dummy_chain.clone(), - } - } + fn test_setup(dummy_chain: &Arc) -> TransactionPool { + TransactionPool { + transactions: HashMap::new(), + pool: Pool::empty(), + orphans: Orphans::empty(), + blockchain: dummy_chain.clone(), + } + } - /// Cobble together a test transaction for testing the transaction pool. - /// - /// Connectivity here is the most important element. - /// Every output is given a blinding key equal to its value, so that the - /// entire commitment can be derived deterministically from just the value. - /// - /// Fees are the remainder between input and output values, so the numbers - /// should make sense. - fn test_transaction(input_values: Vec, output_values: Vec) -> transaction::Transaction { - let fees: i64 = input_values.iter().sum::() as i64 - output_values.iter().sum::() as i64; - assert!(fees >= 0); + /// Cobble together a test transaction for testing the transaction pool. + /// + /// Connectivity here is the most important element. + /// Every output is given a blinding key equal to its value, so that the + /// entire commitment can be derived deterministically from just the value. + /// + /// Fees are the remainder between input and output values, so the numbers + /// should make sense. + fn test_transaction( + input_values: Vec, + output_values: Vec, + ) -> transaction::Transaction { + let fees: i64 = input_values.iter().sum::() as i64 - + output_values.iter().sum::() as i64; + assert!(fees >= 0); - let mut tx_elements = Vec::new(); + let mut tx_elements = Vec::new(); - for input_value in input_values { - tx_elements.push(build::input(input_value, test_key(input_value))); - } + for input_value in input_values { + tx_elements.push(build::input(input_value, test_key(input_value))); + } - for output_value in output_values { - tx_elements.push(build::output(output_value, test_key(output_value))); - } - tx_elements.push(build::with_fee(fees as u64)); + for output_value in output_values { + tx_elements.push(build::output(output_value, test_key(output_value))); + } + tx_elements.push(build::with_fee(fees as u64)); - let (tx, _) = build::transaction(tx_elements).unwrap(); - tx - } + let (tx, _) = build::transaction(tx_elements).unwrap(); + tx + } - /// Deterministically generate an output defined by our test scheme - fn test_output(value: u64) -> transaction::Output { - let ec = Secp256k1::with_caps(ContextFlag::Commit); - let output_key = test_key(value); - let output_commitment = ec.commit(value, output_key).unwrap(); - transaction::Output{ - features: transaction::DEFAULT_OUTPUT, - commit: output_commitment, - proof: ec.range_proof(0, value, output_key, output_commitment, ec.nonce())} - } + /// Deterministically generate an output defined by our test scheme + fn test_output(value: u64) -> transaction::Output { + let ec = Secp256k1::with_caps(ContextFlag::Commit); + let output_key = test_key(value); + let output_commitment = ec.commit(value, output_key).unwrap(); + transaction::Output { + features: transaction::DEFAULT_OUTPUT, + commit: output_commitment, + proof: ec.range_proof(0, value, output_key, output_commitment, ec.nonce()), + } + } - /// Deterministically generate a coinbase output defined by our test scheme - fn test_coinbase_output(value: u64) -> transaction::Output { - let ec = Secp256k1::with_caps(ContextFlag::Commit); - let output_key = test_key(value); - let output_commitment = ec.commit(value, output_key).unwrap(); - transaction::Output{ - features: transaction::COINBASE_OUTPUT, - commit: output_commitment, - proof: ec.range_proof(0, value, output_key, output_commitment, ec.nonce())} - } + /// Deterministically generate a coinbase output defined by our test scheme + fn test_coinbase_output(value: u64) -> transaction::Output { + let ec = Secp256k1::with_caps(ContextFlag::Commit); + let output_key = test_key(value); + let output_commitment = ec.commit(value, output_key).unwrap(); + transaction::Output { + features: transaction::COINBASE_OUTPUT, + commit: output_commitment, + proof: ec.range_proof(0, value, output_key, output_commitment, ec.nonce()), + } + } - /// Makes a SecretKey from a single u64 - fn test_key(value: u64) -> key::SecretKey { - let ec = Secp256k1::with_caps(ContextFlag::Commit); - // SecretKey takes a SECRET_KEY_SIZE slice of u8. - assert!(constants::SECRET_KEY_SIZE > 8); + /// Makes a SecretKey from a single u64 + fn test_key(value: u64) -> key::SecretKey { + let ec = Secp256k1::with_caps(ContextFlag::Commit); + // SecretKey takes a SECRET_KEY_SIZE slice of u8. + assert!(constants::SECRET_KEY_SIZE > 8); - // (SECRET_KEY_SIZE - 8) zeros, followed by value as a big-endian byte - // sequence - let mut key_slice = vec![0;constants::SECRET_KEY_SIZE - 8]; + // (SECRET_KEY_SIZE - 8) zeros, followed by value as a big-endian byte + // sequence + let mut key_slice = vec![0;constants::SECRET_KEY_SIZE - 8]; - key_slice.push((value >> 56) as u8); - key_slice.push((value >> 48) as u8); - key_slice.push((value >> 40) as u8); - key_slice.push((value >> 32) as u8); - key_slice.push((value >> 24) as u8); - key_slice.push((value >> 16) as u8); - key_slice.push((value >> 8) as u8); - key_slice.push(value as u8); + key_slice.push((value >> 56) as u8); + key_slice.push((value >> 48) as u8); + key_slice.push((value >> 40) as u8); + key_slice.push((value >> 32) as u8); + key_slice.push((value >> 24) as u8); + key_slice.push((value >> 16) as u8); + key_slice.push((value >> 8) as u8); + key_slice.push(value as u8); - key::SecretKey::from_slice(&ec, &key_slice).unwrap() - } + key::SecretKey::from_slice(&ec, &key_slice).unwrap() + } - /// A generic TxSource representing a test - fn test_source() -> TxSource{ - TxSource{ - debug_name: "test".to_string(), - identifier: "127.0.0.1".to_string(), - } - } + /// A generic TxSource representing a test + fn test_source() -> TxSource { + TxSource { + debug_name: "test".to_string(), + identifier: "127.0.0.1".to_string(), + } + } } diff --git a/pool/src/types.rs b/pool/src/types.rs index 795c59743..0918d957d 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -37,90 +37,93 @@ use core::core::hash; /// Most likely this will evolve to contain some sort of network identifier, /// once we get a better sense of what transaction building might look like. pub struct TxSource { - /// Human-readable name used for logging and errors. - pub debug_name: String, - /// Unique identifier used to distinguish this peer from others. - pub identifier: String, + /// Human-readable name used for logging and errors. + pub debug_name: String, + /// Unique identifier used to distinguish this peer from others. + pub identifier: String, } /// This enum describes the parent for a given input of a transaction. #[derive(Clone)] pub enum Parent { - Unknown, - BlockTransaction{output: transaction::Output}, - PoolTransaction{tx_ref: hash::Hash}, - AlreadySpent{other_tx: hash::Hash}, + Unknown, + BlockTransaction { output: transaction::Output }, + PoolTransaction { tx_ref: hash::Hash }, + AlreadySpent { other_tx: hash::Hash }, } impl fmt::Debug for Parent { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - &Parent::Unknown => write!(f, "Parent: Unknown"), - &Parent::BlockTransaction{output: _} => write!(f, "Parent: Block Transaction"), - &Parent::PoolTransaction{tx_ref: x} => write!(f, - "Parent: Pool Transaction ({:?})", x), - &Parent::AlreadySpent{other_tx: x} => write!(f, - "Parent: Already Spent By {:?}", x), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + &Parent::Unknown => write!(f, "Parent: Unknown"), + &Parent::BlockTransaction { output: _ } => write!(f, "Parent: Block Transaction"), + &Parent::PoolTransaction { tx_ref: x } => { + write!(f, "Parent: Pool Transaction ({:?})", x) + } + &Parent::AlreadySpent { other_tx: x } => write!(f, "Parent: Already Spent By {:?}", x), + } + } } // TODO document this enum more accurately /// Enum of errors #[derive(Debug)] pub enum PoolError { - /// An invalid pool entry - Invalid, - /// An entry already in the pool - AlreadyInPool, - /// A duplicate output - DuplicateOutput{ - /// The other transaction - other_tx: Option, - /// Is in chain? - in_chain: bool, - /// The output - output: Commitment - }, - /// A double spend - DoubleSpend{ - /// The other transaction - other_tx: hash::Hash, - /// The spent output - spent_output: Commitment - }, - /// Attempt to spend a coinbase output before it matures (1000 blocks?) - ImmatureCoinbase{ - /// The block header of the block containing the coinbase output - header: block::BlockHeader, - /// The unspent coinbase output - output: Commitment, - }, - /// An orphan successfully added to the orphans set - OrphanTransaction, - /// TODO - wip, just getting imports working, remove this and use more specific errors - GenericPoolError, - /// TODO - is this the right level of abstraction for pool errors? - OutputNotFound, - /// TODO - is this the right level of abstraction for pool errors? - OutputSpent, + /// An invalid pool entry + Invalid, + /// An entry already in the pool + AlreadyInPool, + /// A duplicate output + DuplicateOutput { + /// The other transaction + other_tx: Option, + /// Is in chain? + in_chain: bool, + /// The output + output: Commitment, + }, + /// A double spend + DoubleSpend { + /// The other transaction + other_tx: hash::Hash, + /// The spent output + spent_output: Commitment, + }, + /// Attempt to spend a coinbase output before it matures (1000 blocks?) + ImmatureCoinbase { + /// The block header of the block containing the coinbase output + header: block::BlockHeader, + /// The unspent coinbase output + output: Commitment, + }, + /// An orphan successfully added to the orphans set + OrphanTransaction, + /// TODO - wip, just getting imports working, remove this and use more + /// specific errors + GenericPoolError, + /// TODO - is this the right level of abstraction for pool errors? + OutputNotFound, + /// TODO - is this the right level of abstraction for pool errors? + OutputSpent, } /// Interface that the pool requires from a blockchain implementation. pub trait BlockChain { - /// Get an unspent output by its commitment. Will return None if the output - /// is spent or if it doesn't exist. The blockchain is expected to produce - /// a result with its current view of the most worked chain, ignoring - /// orphans, etc. - fn get_unspent(&self, output_ref: &Commitment) - -> Result; + /// Get an unspent output by its commitment. Will return None if the output + /// is spent or if it doesn't exist. The blockchain is expected to produce + /// a result with its current view of the most worked chain, ignoring + /// orphans, etc. + fn get_unspent(&self, output_ref: &Commitment) -> Result; - /// Get the block header by output commitment (needed for spending coinbase after n blocks) - fn get_block_header_by_output_commit(&self, commit: &Commitment) - -> Result; + /// Get the block header by output commitment (needed for spending coinbase + /// after n blocks) + fn get_block_header_by_output_commit( + &self, + commit: &Commitment, + ) -> Result; - /// Get the block header at the head - fn head_header(&self) -> Result; + /// Get the block header at the head + fn head_header(&self) -> Result; } /// Pool contains the elements of the graph that are connected, in full, to @@ -135,230 +138,270 @@ pub trait BlockChain { /// connections are in the pool edge set, while unspent (dangling) references /// exist in the available_outputs set. pub struct Pool { - graph : graph::DirectedGraph, + graph: graph::DirectedGraph, - // available_outputs are unspent outputs of the current pool set, - // maintained as edges with empty destinations, keyed by the - // output's hash. - available_outputs: HashMap, + // available_outputs are unspent outputs of the current pool set, + // maintained as edges with empty destinations, keyed by the + // output's hash. + available_outputs: HashMap, - // Consumed blockchain utxo's are kept in a separate map. - consumed_blockchain_outputs: HashMap + // Consumed blockchain utxo's are kept in a separate map. + consumed_blockchain_outputs: HashMap, } impl Pool { - pub fn empty() -> Pool { - Pool{ - graph: graph::DirectedGraph::empty(), - available_outputs: HashMap::new(), - consumed_blockchain_outputs: HashMap::new(), - } - } + pub fn empty() -> Pool { + Pool { + graph: graph::DirectedGraph::empty(), + available_outputs: HashMap::new(), + consumed_blockchain_outputs: HashMap::new(), + } + } - /// Given an output, check if a spending reference (input -> output) - /// already exists in the pool. - /// Returns the transaction (kernel) hash corresponding to the conflicting - /// transaction - pub fn check_double_spend(&self, o: &transaction::Output) -> Option { - self.graph.get_edge_by_commitment(&o.commitment()).or(self.consumed_blockchain_outputs.get(&o.commitment())).map(|x| x.destination_hash().unwrap()) - } + /// Given an output, check if a spending reference (input -> output) + /// already exists in the pool. + /// Returns the transaction (kernel) hash corresponding to the conflicting + /// transaction + pub fn check_double_spend(&self, o: &transaction::Output) -> Option { + self.graph + .get_edge_by_commitment(&o.commitment()) + .or(self.consumed_blockchain_outputs.get(&o.commitment())) + .map(|x| x.destination_hash().unwrap()) + } - pub fn get_blockchain_spent(&self, c: &Commitment) -> Option<&graph::Edge> { - self.consumed_blockchain_outputs.get(c) - } + pub fn get_blockchain_spent(&self, c: &Commitment) -> Option<&graph::Edge> { + self.consumed_blockchain_outputs.get(c) + } - pub fn add_pool_transaction(&mut self, pool_entry: graph::PoolEntry, - mut blockchain_refs: Vec, pool_refs: Vec, - mut new_unspents: Vec) { + pub fn add_pool_transaction( + &mut self, + pool_entry: graph::PoolEntry, + mut blockchain_refs: Vec, + pool_refs: Vec, + mut new_unspents: Vec, + ) { - // Removing consumed available_outputs - for new_edge in &pool_refs { - // All of these should correspond to an existing unspent - assert!(self.available_outputs.remove(&new_edge.output_commitment()).is_some()); - } + // Removing consumed available_outputs + for new_edge in &pool_refs { + // All of these should correspond to an existing unspent + assert!( + self.available_outputs + .remove(&new_edge.output_commitment()) + .is_some() + ); + } - // Accounting for consumed blockchain outputs - for new_blockchain_edge in blockchain_refs.drain(..) { - self.consumed_blockchain_outputs.insert( - new_blockchain_edge.output_commitment(), - new_blockchain_edge); - } + // Accounting for consumed blockchain outputs + for new_blockchain_edge in blockchain_refs.drain(..) { + self.consumed_blockchain_outputs.insert( + new_blockchain_edge + .output_commitment(), + new_blockchain_edge, + ); + } - // Adding the transaction to the vertices list along with internal - // pool edges - self.graph.add_entry(pool_entry, pool_refs); + // Adding the transaction to the vertices list along with internal + // pool edges + self.graph.add_entry(pool_entry, pool_refs); - // Adding the new unspents to the unspent map - for unspent_output in new_unspents.drain(..) { - self.available_outputs.insert( - unspent_output.output_commitment(), unspent_output); - } - } + // Adding the new unspents to the unspent map + for unspent_output in new_unspents.drain(..) { + self.available_outputs.insert( + unspent_output.output_commitment(), + unspent_output, + ); + } + } - pub fn remove_pool_transaction(&mut self, tx: &transaction::Transaction, - marked_txs: &HashMap) { + pub fn remove_pool_transaction( + &mut self, + tx: &transaction::Transaction, + marked_txs: &HashMap, + ) { - self.graph.remove_vertex(graph::transaction_identifier(tx)); + self.graph.remove_vertex(graph::transaction_identifier(tx)); - for input in tx.inputs.iter().map(|x| x.commitment()) { - match self.graph.remove_edge_by_commitment(&input) { - Some(x) => { - if !marked_txs.contains_key(&x.source_hash().unwrap()) { - self.available_outputs.insert(x.output_commitment(), - x.with_destination(None)); - } - }, - None => { - self.consumed_blockchain_outputs.remove(&input); - }, - }; - } + for input in tx.inputs.iter().map(|x| x.commitment()) { + match self.graph.remove_edge_by_commitment(&input) { + Some(x) => { + if !marked_txs.contains_key(&x.source_hash().unwrap()) { + self.available_outputs.insert( + x.output_commitment(), + x.with_destination(None), + ); + } + } + None => { + self.consumed_blockchain_outputs.remove(&input); + } + }; + } - for output in tx.outputs.iter().map(|x| x.commitment()) { - match self.graph.remove_edge_by_commitment(&output) { - Some(x) => { - if !marked_txs.contains_key( - &x.destination_hash().unwrap()) { + for output in tx.outputs.iter().map(|x| x.commitment()) { + match self.graph.remove_edge_by_commitment(&output) { + Some(x) => { + if !marked_txs.contains_key(&x.destination_hash().unwrap()) { - self.consumed_blockchain_outputs.insert( - x.output_commitment(), - x.with_source(None)); - } - }, - None => { - self.available_outputs.remove(&output); - } - }; - } - } + self.consumed_blockchain_outputs.insert( + x.output_commitment(), + x.with_source(None), + ); + } + } + None => { + self.available_outputs.remove(&output); + } + }; + } + } - /// Simplest possible implementation: just return the roots - pub fn get_mineable_transactions(&self, num_to_fetch: u32) -> Vec { - let mut roots = self.graph.get_roots(); - roots.truncate(num_to_fetch as usize); - roots - } + /// Simplest possible implementation: just return the roots + pub fn get_mineable_transactions(&self, num_to_fetch: u32) -> Vec { + let mut roots = self.graph.get_roots(); + roots.truncate(num_to_fetch as usize); + roots + } } impl TransactionGraphContainer for Pool { - fn get_graph(&self) -> &graph::DirectedGraph { - &self.graph - } - fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge> { - self.available_outputs.get(output) - } - fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> { - self.consumed_blockchain_outputs.get(output) - } - fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> { - self.graph.get_edge_by_commitment(output) - } + fn get_graph(&self) -> &graph::DirectedGraph { + &self.graph + } + fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge> { + self.available_outputs.get(output) + } + fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> { + self.consumed_blockchain_outputs.get(output) + } + fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> { + self.graph.get_edge_by_commitment(output) + } } /// Orphans contains the elements of the transaction graph that have not been /// connected in full to the blockchain. pub struct Orphans { - graph : graph::DirectedGraph, + graph: graph::DirectedGraph, - // available_outputs are unspent outputs of the current orphan set, - // maintained as edges with empty destinations. - available_outputs: HashMap, + // available_outputs are unspent outputs of the current orphan set, + // maintained as edges with empty destinations. + available_outputs: HashMap, - // missing_outputs are spending references (inputs) with missing - // corresponding outputs, maintained as edges with empty sources. - missing_outputs: HashMap, + // missing_outputs are spending references (inputs) with missing + // corresponding outputs, maintained as edges with empty sources. + missing_outputs: HashMap, - // pool_connections are bidirectional edges which connect to the pool - // graph. They should map one-to-one to pool graph available_outputs. - // pool_connections should not be viewed authoritatively, they are - // merely informational until the transaction is officially connected to - // the pool. - pool_connections: HashMap, + // pool_connections are bidirectional edges which connect to the pool + // graph. They should map one-to-one to pool graph available_outputs. + // pool_connections should not be viewed authoritatively, they are + // merely informational until the transaction is officially connected to + // the pool. + pool_connections: HashMap, } impl Orphans { - pub fn empty() -> Orphans { - Orphans{ - graph: graph::DirectedGraph::empty(), - available_outputs : HashMap::new(), - missing_outputs: HashMap::new(), - pool_connections: HashMap::new(), - } - } + pub fn empty() -> Orphans { + Orphans { + graph: graph::DirectedGraph::empty(), + available_outputs: HashMap::new(), + missing_outputs: HashMap::new(), + pool_connections: HashMap::new(), + } + } - /// Checks for a double spent output, given the hash of the output, - /// ONLY in the data maintained by the orphans set. This includes links - /// to the pool as well as links internal to orphan transactions. - /// Returns the transaction hash corresponding to the conflicting - /// transaction. - pub fn check_double_spend(&self, o: transaction::Output) -> Option { - self.graph.get_edge_by_commitment(&o.commitment()).or(self.pool_connections.get(&o.commitment())).map(|x| x.destination_hash().unwrap()) - } + /// Checks for a double spent output, given the hash of the output, + /// ONLY in the data maintained by the orphans set. This includes links + /// to the pool as well as links internal to orphan transactions. + /// Returns the transaction hash corresponding to the conflicting + /// transaction. + pub fn check_double_spend(&self, o: transaction::Output) -> Option { + self.graph + .get_edge_by_commitment(&o.commitment()) + .or(self.pool_connections.get(&o.commitment())) + .map(|x| x.destination_hash().unwrap()) + } - pub fn get_unknown_output(&self, output: &Commitment) -> Option<&graph::Edge> { - self.missing_outputs.get(output) - } + pub fn get_unknown_output(&self, output: &Commitment) -> Option<&graph::Edge> { + self.missing_outputs.get(output) + } - /// Add an orphan transaction to the orphans set. - /// - /// This method adds a given transaction (represented by the PoolEntry at - /// orphan_entry) to the orphans set. - /// - /// This method has no failure modes. All checks should be passed before - /// entry. - /// - /// Expects a HashMap at is_missing describing the indices of orphan_refs - /// which correspond to missing (vs orphan-to-orphan) links. - pub fn add_orphan_transaction(&mut self, orphan_entry: graph::PoolEntry, - mut pool_refs: Vec, mut orphan_refs: Vec, - is_missing: HashMap, mut new_unspents: Vec) { + /// Add an orphan transaction to the orphans set. + /// + /// This method adds a given transaction (represented by the PoolEntry at + /// orphan_entry) to the orphans set. + /// + /// This method has no failure modes. All checks should be passed before + /// entry. + /// + /// Expects a HashMap at is_missing describing the indices of orphan_refs + /// which correspond to missing (vs orphan-to-orphan) links. + pub fn add_orphan_transaction( + &mut self, + orphan_entry: graph::PoolEntry, + mut pool_refs: Vec, + mut orphan_refs: Vec, + is_missing: HashMap, + mut new_unspents: Vec, + ) { - // Removing consumed available_outputs - for (i, new_edge) in orphan_refs.drain(..).enumerate() { - if is_missing.contains_key(&i) { - self.missing_outputs.insert(new_edge.output_commitment(), - new_edge); - } else { - assert!(self.available_outputs.remove(&new_edge.output_commitment()).is_some()); - self.graph.add_edge_only(new_edge); - } - } + // Removing consumed available_outputs + for (i, new_edge) in orphan_refs.drain(..).enumerate() { + if is_missing.contains_key(&i) { + self.missing_outputs.insert( + new_edge.output_commitment(), + new_edge, + ); + } else { + assert!( + self.available_outputs + .remove(&new_edge.output_commitment()) + .is_some() + ); + self.graph.add_edge_only(new_edge); + } + } - // Accounting for consumed blockchain and pool outputs - for external_edge in pool_refs.drain(..) { - self.pool_connections.insert( - external_edge.output_commitment(), external_edge); - } + // Accounting for consumed blockchain and pool outputs + for external_edge in pool_refs.drain(..) { + self.pool_connections.insert( + external_edge.output_commitment(), + external_edge, + ); + } - // if missing_refs is the same length as orphan_refs, we have - // no orphan-orphan links for this transaction and it is a - // root transaction of the orphans set - self.graph.add_vertex_only(orphan_entry, - is_missing.len() == orphan_refs.len()); + // if missing_refs is the same length as orphan_refs, we have + // no orphan-orphan links for this transaction and it is a + // root transaction of the orphans set + self.graph.add_vertex_only( + orphan_entry, + is_missing.len() == orphan_refs.len(), + ); - // Adding the new unspents to the unspent map - for unspent_output in new_unspents.drain(..) { - self.available_outputs.insert( - unspent_output.output_commitment(), unspent_output); - } - } + // Adding the new unspents to the unspent map + for unspent_output in new_unspents.drain(..) { + self.available_outputs.insert( + unspent_output.output_commitment(), + unspent_output, + ); + } + } } impl TransactionGraphContainer for Orphans { - fn get_graph(&self) -> &graph::DirectedGraph { - &self.graph - } - fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge> { - self.available_outputs.get(output) - } - fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> { - self.pool_connections.get(output) - } - fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> { - self.graph.get_edge_by_commitment(output) - } + fn get_graph(&self) -> &graph::DirectedGraph { + &self.graph + } + fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge> { + self.available_outputs.get(output) + } + fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> { + self.pool_connections.get(output) + } + fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> { + self.graph.get_edge_by_commitment(output) + } } /// Trait for types that embed a graph and connect to external state. @@ -382,44 +425,43 @@ impl TransactionGraphContainer for Orphans { /// in the child. This ensures that no descendent set must modify state in a /// set of higher priority. pub trait TransactionGraphContainer { - /// Accessor for graph object - fn get_graph(&self) -> &graph::DirectedGraph; - /// Accessor for internal spents - fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge>; - /// Accessor for external unspents - fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge>; - /// Accessor for external spents - fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge>; + /// Accessor for graph object + fn get_graph(&self) -> &graph::DirectedGraph; + /// Accessor for internal spents + fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge>; + /// Accessor for external unspents + fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge>; + /// Accessor for external spents + fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge>; - /// Checks if the available_output set has the output at the given - /// commitment - fn has_available_output(&self, c: &Commitment) -> bool { - self.get_available_output(c).is_some() - } + /// Checks if the available_output set has the output at the given + /// commitment + fn has_available_output(&self, c: &Commitment) -> bool { + self.get_available_output(c).is_some() + } - /// Checks if the pool has anything by this output already, between - /// available outputs and internal ones. - fn find_output(&self, c: &Commitment) -> Option { - self.get_available_output(c). - or(self.get_internal_spent_output(c)). - map(|x| x.source_hash().unwrap()) - } + /// Checks if the pool has anything by this output already, between + /// available outputs and internal ones. + fn find_output(&self, c: &Commitment) -> Option { + self.get_available_output(c) + .or(self.get_internal_spent_output(c)) + .map(|x| x.source_hash().unwrap()) + } - /// Search for a spent reference internal to the graph - fn get_internal_spent(&self, c: &Commitment) -> Option<&graph::Edge> { - self.get_internal_spent_output(c) - } + /// Search for a spent reference internal to the graph + fn get_internal_spent(&self, c: &Commitment) -> Option<&graph::Edge> { + self.get_internal_spent_output(c) + } - fn num_root_transactions(&self) -> usize { - self.get_graph().len_roots() - } + fn num_root_transactions(&self) -> usize { + self.get_graph().len_roots() + } - fn num_transactions(&self) -> usize { - self.get_graph().len_vertices() - } - - fn num_output_edges(&self) -> usize { - self.get_graph().len_edges() - } + fn num_transactions(&self) -> usize { + self.get_graph().len_vertices() + } + fn num_output_edges(&self) -> usize { + self.get_graph().len_edges() + } } diff --git a/pow/src/cuckoo.rs b/pow/src/cuckoo.rs index 5eddbcea8..acd240065 100644 --- a/pow/src/cuckoo.rs +++ b/pow/src/cuckoo.rs @@ -57,8 +57,8 @@ impl Cuckoo { /// serialized block header. pub fn new(header: &[u8], sizeshift: u32) -> Cuckoo { let size = 1 << sizeshift; - let hashed=blake2::blake2b::blake2b(32, &[], header); - let hashed=hashed.as_bytes(); + let hashed = blake2::blake2b::blake2b(32, &[], header); + let hashed = hashed.as_bytes(); let k0 = u8_to_u64(hashed, 0); let k1 = u8_to_u64(hashed, 8); @@ -157,11 +157,8 @@ pub struct Miner { } impl MiningWorker for Miner { - /// Creates a new miner - fn new(ease: u32, - sizeshift: u32, - proof_size: usize) -> Miner { + fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Miner { let size = 1 << sizeshift; let graph = vec![0; size + 1]; let easiness = (ease as u64) * (size as u64) / 100; @@ -173,11 +170,11 @@ impl MiningWorker for Miner { proof_size: proof_size, } } - + fn mine(&mut self, header: &[u8]) -> Result { let size = 1 << self.sizeshift; self.graph = vec![0; size + 1]; - self.cuckoo=Some(Cuckoo::new(header, self.sizeshift)); + self.cuckoo = Some(Cuckoo::new(header, self.sizeshift)); self.mine_impl() } } @@ -193,8 +190,6 @@ enum CycleSol { } impl Miner { - - /// Searches for a solution pub fn mine_impl(&mut self) -> Result { let mut us = [0; MAXPATHLEN]; @@ -214,7 +209,7 @@ impl Miner { match sol { CycleSol::ValidProof(res) => { return Ok(Proof::new(res.to_vec())); - }, + } CycleSol::InvalidCycle(_) => continue, CycleSol::NoCycle => { self.update_graph(nu, &us, nv, &vs); @@ -317,10 +312,10 @@ impl Miner { /// Utility to transform a 8 bytes of a byte array into a u64. -fn u8_to_u64(p:&[u8], i: usize) -> u64 { - (p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24 | - (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 | - (p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56 +fn u8_to_u64(p: &[u8], i: usize) -> u64 { + (p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | + (p[i + 3] as u64) << 24 | (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 | + (p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56 } #[cfg(test)] @@ -329,31 +324,183 @@ mod test { use core::core::Proof; - static V1:[u32;42] = [0x1fe9, 0x2050, 0x4581, 0x6322, 0x65ab, 0xb3c1, 0xc1a4, - 0xe257, 0x106ae, 0x17b11, 0x202d4, 0x2705d, 0x2deb2, 0x2f80e, - 0x32298, 0x34782, 0x35c5a, 0x37458, 0x38f28, 0x406b2, 0x40e34, - 0x40fc6, 0x42220, 0x42d13, 0x46c0f, 0x4fd47, 0x55ad2, 0x598f7, - 0x5aa8f, 0x62aa3, 0x65725, 0x65dcb, 0x671c7, 0x6eb20, 0x752fe, - 0x7594f, 0x79b9c, 0x7f775, 0x81635, 0x8401c, 0x844e5, 0x89fa8]; - static V2:[u32;42] = [0x2a37, 0x7557, 0xa3c3, 0xfce6, 0x1248e, 0x15837, 0x1827f, - 0x18a93, 0x1a7dd, 0x1b56b, 0x1ceb4, 0x1f962, 0x1fe2a, 0x29cb9, - 0x2f30e, 0x2f771, 0x336bf, 0x34355, 0x391d7, 0x39495, 0x3be0c, - 0x463be, 0x4d0c2, 0x4eead, 0x50214, 0x520de, 0x52a86, 0x53818, - 0x53b3b, 0x54c0b, 0x572fa, 0x5d79c, 0x5e3c2, 0x6769e, 0x6a0fe, - 0x6d835, 0x6fc7c, 0x70f03, 0x79d4a, 0x7b03e, 0x81e09, 0x9bd44]; - static V3:[u32;42] = [0x8158, 0x9f18, 0xc4ba, 0x108c7, 0x11caa, 0x13b82, 0x1618f, - 0x1c83b, 0x1ec89, 0x24354, 0x28864, 0x2a0fb, 0x2ce50, 0x2e8fa, - 0x32b36, 0x343e6, 0x34dc9, 0x36881, 0x3ffca, 0x40f79, 0x42721, - 0x43b8c, 0x44b9d, 0x47ed3, 0x4cd34, 0x5278a, 0x5ab64, 0x5b4d4, - 0x5d842, 0x5fa33, 0x6464e, 0x676ee, 0x685d6, 0x69df0, 0x6a5fd, - 0x6bda3, 0x72544, 0x77974, 0x7908c, 0x80e67, 0x81ef4, 0x8d882]; + static V1: [u32; 42] = [ + 0x1fe9, + 0x2050, + 0x4581, + 0x6322, + 0x65ab, + 0xb3c1, + 0xc1a4, + 0xe257, + 0x106ae, + 0x17b11, + 0x202d4, + 0x2705d, + 0x2deb2, + 0x2f80e, + 0x32298, + 0x34782, + 0x35c5a, + 0x37458, + 0x38f28, + 0x406b2, + 0x40e34, + 0x40fc6, + 0x42220, + 0x42d13, + 0x46c0f, + 0x4fd47, + 0x55ad2, + 0x598f7, + 0x5aa8f, + 0x62aa3, + 0x65725, + 0x65dcb, + 0x671c7, + 0x6eb20, + 0x752fe, + 0x7594f, + 0x79b9c, + 0x7f775, + 0x81635, + 0x8401c, + 0x844e5, + 0x89fa8, + ]; + static V2: [u32; 42] = [ + 0x2a37, + 0x7557, + 0xa3c3, + 0xfce6, + 0x1248e, + 0x15837, + 0x1827f, + 0x18a93, + 0x1a7dd, + 0x1b56b, + 0x1ceb4, + 0x1f962, + 0x1fe2a, + 0x29cb9, + 0x2f30e, + 0x2f771, + 0x336bf, + 0x34355, + 0x391d7, + 0x39495, + 0x3be0c, + 0x463be, + 0x4d0c2, + 0x4eead, + 0x50214, + 0x520de, + 0x52a86, + 0x53818, + 0x53b3b, + 0x54c0b, + 0x572fa, + 0x5d79c, + 0x5e3c2, + 0x6769e, + 0x6a0fe, + 0x6d835, + 0x6fc7c, + 0x70f03, + 0x79d4a, + 0x7b03e, + 0x81e09, + 0x9bd44, + ]; + static V3: [u32; 42] = [ + 0x8158, + 0x9f18, + 0xc4ba, + 0x108c7, + 0x11caa, + 0x13b82, + 0x1618f, + 0x1c83b, + 0x1ec89, + 0x24354, + 0x28864, + 0x2a0fb, + 0x2ce50, + 0x2e8fa, + 0x32b36, + 0x343e6, + 0x34dc9, + 0x36881, + 0x3ffca, + 0x40f79, + 0x42721, + 0x43b8c, + 0x44b9d, + 0x47ed3, + 0x4cd34, + 0x5278a, + 0x5ab64, + 0x5b4d4, + 0x5d842, + 0x5fa33, + 0x6464e, + 0x676ee, + 0x685d6, + 0x69df0, + 0x6a5fd, + 0x6bda3, + 0x72544, + 0x77974, + 0x7908c, + 0x80e67, + 0x81ef4, + 0x8d882, + ]; // cuckoo28 at 50% edges of letter 'u' - static V4:[u32;42] = [0x1CBBFD, 0x2C5452, 0x520338, 0x6740C5, 0x8C6997, 0xC77150, 0xFD4972, - 0x1060FA7, 0x11BFEA0, 0x1343E8D, 0x14CE02A, 0x1533515, 0x1715E61, 0x1996D9B, - 0x1CB296B, 0x1FCA180, 0x209A367, 0x20AD02E, 0x23CD2E4, 0x2A3B360, 0x2DD1C0C, - 0x333A200, 0x33D77BC, 0x3620C78, 0x3DD7FB8, 0x3FBFA49, 0x41BDED2, 0x4A86FD9, - 0x570DE24, 0x57CAB86, 0x594B886, 0x5C74C94, 0x5DE7572, 0x60ADD6F, 0x635918B, - 0x6C9E120, 0x6EFA583, 0x7394ACA, 0x7556A23, 0x77F70AA, 0x7CF750A, 0x7F60790]; + static V4: [u32; 42] = [ + 0x1CBBFD, + 0x2C5452, + 0x520338, + 0x6740C5, + 0x8C6997, + 0xC77150, + 0xFD4972, + 0x1060FA7, + 0x11BFEA0, + 0x1343E8D, + 0x14CE02A, + 0x1533515, + 0x1715E61, + 0x1996D9B, + 0x1CB296B, + 0x1FCA180, + 0x209A367, + 0x20AD02E, + 0x23CD2E4, + 0x2A3B360, + 0x2DD1C0C, + 0x333A200, + 0x33D77BC, + 0x3620C78, + 0x3DD7FB8, + 0x3FBFA49, + 0x41BDED2, + 0x4A86FD9, + 0x570DE24, + 0x57CAB86, + 0x594B886, + 0x5C74C94, + 0x5DE7572, + 0x60ADD6F, + 0x635918B, + 0x6C9E120, + 0x6EFA583, + 0x7394ACA, + 0x7556A23, + 0x77F70AA, + 0x7CF750A, + 0x7F60790, + ]; /// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few /// known cycle proofs @@ -372,29 +519,52 @@ mod test { #[test] fn validate20_vectors() { - assert!(Cuckoo::new(&[49], 20).verify(Proof::new(V1.to_vec().clone()), 75)); - assert!(Cuckoo::new(&[50], 20).verify(Proof::new(V2.to_vec().clone()), 70)); - assert!(Cuckoo::new(&[51], 20).verify(Proof::new(V3.to_vec().clone()), 70)); + assert!(Cuckoo::new(&[49], 20).verify( + Proof::new(V1.to_vec().clone()), + 75, + )); + assert!(Cuckoo::new(&[50], 20).verify( + Proof::new(V2.to_vec().clone()), + 70, + )); + assert!(Cuckoo::new(&[51], 20).verify( + Proof::new(V3.to_vec().clone()), + 70, + )); } #[test] fn validate28_vectors() { - let mut test_header=[0;32]; - test_header[0]=24; - assert!(Cuckoo::new(&test_header, 28).verify(Proof::new(V4.to_vec().clone()), 50)); + let mut test_header = [0; 32]; + test_header[0] = 24; + assert!(Cuckoo::new(&test_header, 28).verify( + Proof::new(V4.to_vec().clone()), + 50, + )); } #[test] fn validate_fail() { // edge checks assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75)); - assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0xffff; 42]), 75)); + assert!(!Cuckoo::new(&[49], 20).verify( + Proof::new(vec![0xffff; 42]), + 75, + )); // wrong data for proof - assert!(!Cuckoo::new(&[50], 20).verify(Proof::new(V1.to_vec().clone()), 75)); - let mut test_header=[0;32]; - test_header[0]=24; - assert!(!Cuckoo::new(&test_header, 20).verify(Proof::new(V4.to_vec().clone()), 50)); - + assert!(!Cuckoo::new(&[50], 20).verify( + Proof::new(V1.to_vec().clone()), + 75, + )); + let mut test_header = [0; 32]; + test_header[0] = 24; + assert!(!Cuckoo::new(&test_header, 20).verify( + Proof::new( + V4.to_vec().clone(), + ), + 50, + )); + } #[test] diff --git a/pow/src/lib.rs b/pow/src/lib.rs index b97e97085..316849fc5 100644 --- a/pow/src/lib.rs +++ b/pow/src/lib.rs @@ -62,14 +62,14 @@ use cuckoo::{Cuckoo, Error}; /// pub trait MiningWorker { - /// This only sets parameters and does initialisation work now - fn new(ease: u32, sizeshift: u32, proof_size:usize) -> Self where Self:Sized; + fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self + where + Self: Sized; /// Actually perform a mining attempt on the given input and /// return a proof if found fn mine(&mut self, header: &[u8]) -> Result; - } /// Validates the proof of work of a given header, and that the proof of work @@ -85,43 +85,54 @@ pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool { /// Uses the much easier Cuckoo20 (mostly for /// tests). -pub fn pow20(miner:&mut T, bh: &mut BlockHeader, diff: Difficulty) -> Result<(), Error> { +pub fn pow20( + miner: &mut T, + bh: &mut BlockHeader, + diff: Difficulty, +) -> Result<(), Error> { pow_size(miner, bh, diff, 20) } -/// Mines a genesis block, using the config specified miner if specified. Otherwise, +/// Mines a genesis block, using the config specified miner if specified. +/// Otherwise, /// uses the internal miner /// -pub fn mine_genesis_block(miner_config:Option)->Option { +pub fn mine_genesis_block(miner_config: Option) -> Option { info!("Starting miner loop for Genesis Block"); let mut gen = genesis::genesis(); let diff = gen.header.difficulty.clone(); - + let sz = global::sizeshift() as u32; let proof_size = global::proofsize(); - let mut miner:Box = match miner_config { + let mut miner: Box = match miner_config { Some(c) => { - if c.use_cuckoo_miner { + if c.use_cuckoo_miner { let mut p = plugin::PluginMiner::new(consensus::EASINESS, sz, proof_size); p.init(c.clone()); Box::new(p) } else { Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)) - } - }, + } + } None => Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)), }; pow_size(&mut *miner, &mut gen.header, diff, sz as u32).unwrap(); Some(gen) } -/// Runs a proof of work computation over the provided block using the provided Mining Worker, -/// until the required difficulty target is reached. May take a while for a low target... -pub fn pow_size(miner:&mut T, bh: &mut BlockHeader, - diff: Difficulty, _: u32) -> Result<(), Error> { +/// Runs a proof of work computation over the provided block using the provided +/// Mining Worker, +/// until the required difficulty target is reached. May take a while for a low +/// target... +pub fn pow_size( + miner: &mut T, + bh: &mut BlockHeader, + diff: Difficulty, + _: u32, +) -> Result<(), Error> { let start_nonce = bh.nonce; // if we're in production mode, try the pre-mined solution first @@ -166,17 +177,26 @@ mod test { use global; use core::core::target::Difficulty; use core::genesis; - use core::consensus::MINIMUM_DIFFICULTY; + use core::consensus::MINIMUM_DIFFICULTY; use core::global::MiningParameterMode; #[test] fn genesis_pow() { - global::set_mining_mode(MiningParameterMode::AutomatedTesting); + global::set_mining_mode(MiningParameterMode::AutomatedTesting); let mut b = genesis::genesis(); b.header.nonce = 310; - let mut internal_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize()); - pow_size(&mut internal_miner, &mut b.header, Difficulty::from_num(MINIMUM_DIFFICULTY), global::sizeshift() as u32).unwrap(); + let mut internal_miner = cuckoo::Miner::new( + consensus::EASINESS, + global::sizeshift() as u32, + global::proofsize(), + ); + pow_size( + &mut internal_miner, + &mut b.header, + Difficulty::from_num(MINIMUM_DIFFICULTY), + global::sizeshift() as u32, + ).unwrap(); assert!(b.header.nonce != 310); assert!(b.header.pow.clone().to_difficulty() >= Difficulty::from_num(MINIMUM_DIFFICULTY)); assert!(verify_size(&b.header, global::sizeshift() as u32)); diff --git a/pow/src/plugin.rs b/pow/src/plugin.rs index 91322fec8..23c99e67f 100644 --- a/pow/src/plugin.rs +++ b/pow/src/plugin.rs @@ -30,7 +30,7 @@ use types::MinerConfig; use std::sync::Mutex; use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution, -CuckooMinerDeviceStats, CuckooMinerError}; + CuckooMinerDeviceStats, CuckooMinerError}; // For now, we're just going to keep a static reference around to the loaded // config @@ -112,7 +112,7 @@ impl PluginMiner { let sz = global::sizeshift(); let mut cuckoo_configs = Vec::new(); - let mut index=0; + let mut index = 0; for f in plugin_vec_filters { // So this is built dynamically based on the plugin implementation // type and the consensus sizeshift @@ -126,12 +126,12 @@ impl PluginMiner { info!("Mining plugin {} - {}", index, caps[0].full_path.clone()); config.plugin_full_path = caps[0].full_path.clone(); if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config { - if let Some(lp) = l[index].parameter_list.clone(){ + if let Some(lp) = l[index].parameter_list.clone() { config.parameter_list = lp.clone(); } } cuckoo_configs.push(config); - index+=1; + index += 1; } // Store this config now, because we just want one instance // of the plugin lib per invocation now @@ -141,7 +141,7 @@ impl PluginMiner { let result = CuckooMiner::new(cuckoo_configs.clone()); if let Err(e) = result { error!("Error initializing mining plugin: {:?}", e); - //error!("Accepted values are: {:?}", caps[0].parameters); + // error!("Accepted values are: {:?}", caps[0].parameters); panic!("Unable to init mining plugin."); } @@ -167,8 +167,8 @@ impl PluginMiner { } /// Get stats - pub fn get_stats(&self, index:usize) -> Result, CuckooMinerError> { - self.miner.as_ref().unwrap().get_stats(index) + pub fn get_stats(&self, index: usize) -> Result, CuckooMinerError> { + self.miner.as_ref().unwrap().get_stats(index) } } @@ -185,7 +185,7 @@ impl MiningWorker for PluginMiner { /// And simply calls the mine function of the loaded plugin /// returning whether a solution was found and the solution itself - fn mine(&mut self, header: &[u8]) -> Result{ + fn mine(&mut self, header: &[u8]) -> Result { let result = self.miner .as_mut() .unwrap() diff --git a/pow/src/types.rs b/pow/src/types.rs index 73605598d..fe7ac9796 100644 --- a/pow/src/types.rs +++ b/pow/src/types.rs @@ -19,18 +19,18 @@ use std::collections::HashMap; /// CuckooMinerPlugin configuration #[derive(Debug, Clone, Serialize, Deserialize)] pub struct CuckooMinerPluginConfig { - ///The type of plugin to load (i.e. filters on filename) - pub type_filter : String, + /// The type of plugin to load (i.e. filters on filename) + pub type_filter: String, - ///Parameters for this plugin - pub parameter_list : Option>, + /// Parameters for this plugin + pub parameter_list: Option>, } impl Default for CuckooMinerPluginConfig { fn default() -> CuckooMinerPluginConfig { CuckooMinerPluginConfig { - type_filter : String::new(), - parameter_list : None, + type_filter: String::new(), + parameter_list: None, } } } diff --git a/src/bin/grin.rs b/src/bin/grin.rs index 4ddae6c40..a986ef21f 100644 --- a/src/bin/grin.rs +++ b/src/bin/grin.rs @@ -374,10 +374,10 @@ fn wallet_command(wallet_args: &ArgMatches) { dest = d; } wallet::issue_send_tx(&wallet_config, &key, amount, dest.to_string()).unwrap(); - }, + } ("info", Some(_)) => { wallet::show_info(&wallet_config, &key); - }, + } _ => panic!("Unknown wallet command, use 'grin help wallet' for details"), } } diff --git a/store/src/sumtree.rs b/store/src/sumtree.rs index 4dab8de1b..b0f61b921 100644 --- a/store/src/sumtree.rs +++ b/store/src/sumtree.rs @@ -119,7 +119,7 @@ impl AppendOnlyFile { } as u64; // write the buffer, except if we prune offsets in the current span, - // in which case we skip + // in which case we skip let mut buf_start = 0; while prune_offs[prune_pos] >= read && prune_offs[prune_pos] < read + len { let prune_at = prune_offs[prune_pos] as usize; @@ -188,7 +188,11 @@ impl RemoveLog { if last_offs == 0 { self.removed = vec![]; } else { - self.removed = self.removed.iter().filter(|&&(_, idx)| { idx < last_offs }).map(|x| *x).collect(); + self.removed = self.removed + .iter() + .filter(|&&(_, idx)| idx < last_offs) + .map(|x| *x) + .collect(); } Ok(()) } @@ -230,8 +234,7 @@ impl RemoveLog { /// Whether the remove log currently includes the provided position. fn includes(&self, elmt: u64) -> bool { - include_tuple(&self.removed, elmt) || - include_tuple(&self.removed_tmp, elmt) + include_tuple(&self.removed, elmt) || include_tuple(&self.removed_tmp, elmt) } /// Number of positions stored in the remove log. @@ -305,7 +308,7 @@ where // Third, check if it's in the pruned list or its offset let shift = self.pruned_nodes.get_shift(position); if let None = shift { - return None + return None; } // The MMR starts at 1, our binary backend starts at 0 @@ -329,7 +332,9 @@ where fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> { assert!(self.buffer.len() == 0, "Rewind on non empty buffer."); - self.remove_log.truncate(index).map_err(|e| format!("Could not truncate remove log: {}", e))?; + self.remove_log.truncate(index).map_err(|e| { + format!("Could not truncate remove log: {}", e) + })?; self.rewind = Some((position, index, self.buffer_index)); self.buffer_index = position as usize; Ok(()) @@ -340,7 +345,9 @@ where if self.buffer.used_size() > 0 { for position in &positions { let pos_sz = *position as usize; - if pos_sz > self.buffer_index && pos_sz - 1 < self.buffer_index + self.buffer.len() { + if pos_sz > self.buffer_index && + pos_sz - 1 < self.buffer_index + self.buffer.len() + { self.buffer.remove(vec![*position], index).unwrap(); } } @@ -370,7 +377,7 @@ where remove_log: rm_log, buffer: VecBackend::new(), buffer_index: (sz as usize) / record_len, - pruned_nodes: pmmr::PruneList{pruned_nodes: prune_list}, + pruned_nodes: pmmr::PruneList { pruned_nodes: prune_list }, rewind: None, }) } @@ -398,7 +405,10 @@ where if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) { return Err(io::Error::new( io::ErrorKind::Interrupted, - format!("Could not write to log storage, disk full? {:?}", e) + format!( + "Could not write to log storage, disk full? {:?}", + e + ), )); } } @@ -407,7 +417,7 @@ where self.buffer_index = self.buffer_index + self.buffer.len(); self.buffer.clear(); self.remove_log.flush()?; - self.hashsum_file.sync()?; + self.hashsum_file.sync()?; self.rewind = None; Ok(()) } @@ -431,12 +441,14 @@ where /// to decide whether the remove log has reached its maximum length, /// otherwise the RM_LOG_MAX_NODES default value is used. /// - /// TODO whatever is calling this should also clean up the commit to position + /// TODO whatever is calling this should also clean up the commit to + /// position /// index in db pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> { if !(max_len > 0 && self.remove_log.len() > max_len || - max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES) { - return Ok(()) + max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES) + { + return Ok(()); } // 0. validate none of the nodes in the rm log are in the prune list (to @@ -444,8 +456,10 @@ where for pos in &self.remove_log.removed[..] { if let None = self.pruned_nodes.pruned_pos(pos.0) { // TODO we likely can recover from this by directly jumping to 3 - error!("The remove log contains nodes that are already in the pruned \ - list, a previous compaction likely failed."); + error!( + "The remove log contains nodes that are already in the pruned \ + list, a previous compaction likely failed." + ); return Ok(()); } } @@ -454,20 +468,34 @@ where // remove list let tmp_prune_file = format!("{}/{}.prune", self.data_dir, PMMR_DATA_FILE); let record_len = (32 + T::sum_len()) as u64; - let to_rm = self.remove_log.removed.iter().map(|&(pos, _)| { - let shift = self.pruned_nodes.get_shift(pos); - (pos - 1 - shift.unwrap()) * record_len - }).collect(); - self.hashsum_file.save_prune(tmp_prune_file.clone(), to_rm, record_len)?; + let to_rm = self.remove_log + .removed + .iter() + .map(|&(pos, _)| { + let shift = self.pruned_nodes.get_shift(pos); + (pos - 1 - shift.unwrap()) * record_len + }) + .collect(); + self.hashsum_file.save_prune( + tmp_prune_file.clone(), + to_rm, + record_len, + )?; // 2. update the prune list and save it in place for &(rm_pos, _) in &self.remove_log.removed[..] { self.pruned_nodes.add(rm_pos); } - write_vec(format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE), &self.pruned_nodes.pruned_nodes)?; + write_vec( + format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE), + &self.pruned_nodes.pruned_nodes, + )?; // 3. move the compact copy to the hashsum file and re-open it - fs::rename(tmp_prune_file.clone(), format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?; + fs::rename( + tmp_prune_file.clone(), + format!("{}/{}", self.data_dir, PMMR_DATA_FILE), + )?; self.hashsum_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?; self.hashsum_file.sync()?; @@ -481,7 +509,9 @@ where // Read an ordered vector of scalars from a file. fn read_ordered_vec(path: String) -> io::Result> - where T: ser::Readable + cmp::Ord { +where + T: ser::Readable + cmp::Ord, +{ let file_path = Path::new(&path); let mut ovec = Vec::with_capacity(1000); @@ -506,7 +536,10 @@ fn read_ordered_vec(path: String) -> io::Result> Err(_) => { return Err(io::Error::new( io::ErrorKind::InvalidData, - format!("Corrupted storage, could not read file at {}", path), + format!( + "Corrupted storage, could not read file at {}", + path + ), )); } } @@ -519,13 +552,16 @@ fn read_ordered_vec(path: String) -> io::Result> } fn write_vec(path: String, v: &Vec) -> io::Result<()> - where T: ser::Writeable { - +where + T: ser::Writeable, +{ + let mut file_path = File::create(&path)?; ser::serialize(&mut file_path, v).map_err(|_| { io::Error::new( io::ErrorKind::InvalidInput, - format!("Failed to serialize data when writing to {}", path)) + format!("Failed to serialize data when writing to {}", path), + ) })?; Ok(()) } diff --git a/store/tests/sumtree.rs b/store/tests/sumtree.rs index e948096a2..0d8a5f458 100644 --- a/store/tests/sumtree.rs +++ b/store/tests/sumtree.rs @@ -69,7 +69,7 @@ fn sumtree_prune_compact() { let mut backend = store::sumtree::PMMRBackend::new(data_dir).unwrap(); let mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap(); - + // save the root let root: HashSum; { @@ -113,7 +113,7 @@ fn sumtree_reload() { let mut backend = store::sumtree::PMMRBackend::new(data_dir.clone()).unwrap(); mmr_size = load(0, &elems[..], &mut backend); backend.sync().unwrap(); - + // save the root and prune some nodes so we have prune data { let mut pmmr = PMMR::at(&mut backend, mmr_size); @@ -164,8 +164,7 @@ fn setup() -> (String, Vec) { (data_dir, elems) } -fn load(pos: u64, elems: &[TestElem], - backend: &mut store::sumtree::PMMRBackend) -> u64 { +fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend) -> u64 { let mut pmmr = PMMR::at(backend, pos); for elem in elems { diff --git a/util/src/hex.rs b/util/src/hex.rs index d70a1d810..d1df46d2b 100644 --- a/util/src/hex.rs +++ b/util/src/hex.rs @@ -22,27 +22,30 @@ use std::num; /// Encode the provided bytes into a hex string pub fn to_hex(bytes: Vec) -> String { - let mut s = String::new(); - for byte in bytes { - write!(&mut s, "{:02x}", byte).expect("Unable to write"); - } - s + let mut s = String::new(); + for byte in bytes { + write!(&mut s, "{:02x}", byte).expect("Unable to write"); + } + s } /// Decode a hex string into bytes. pub fn from_hex(hex_str: String) -> Result, num::ParseIntError> { - let hex_trim = if &hex_str[..2] == "0x" { - hex_str[2..].to_owned() - } else { - hex_str.clone() - }; - split_n(&hex_trim.trim()[..], 2).iter() - .map(|b| u8::from_str_radix(b, 16)) - .collect::, _>>() + let hex_trim = if &hex_str[..2] == "0x" { + hex_str[2..].to_owned() + } else { + hex_str.clone() + }; + split_n(&hex_trim.trim()[..], 2) + .iter() + .map(|b| u8::from_str_radix(b, 16)) + .collect::, _>>() } fn split_n(s: &str, n: usize) -> Vec<&str> { - (0 .. (s.len() - n + 1)/2 + 1).map(|i| &s[2*i .. 2*i + n]).collect() + (0..(s.len() - n + 1) / 2 + 1) + .map(|i| &s[2 * i..2 * i + n]) + .collect() } #[cfg(test)] @@ -59,7 +62,13 @@ mod test { #[test] fn test_from_hex() { assert_eq!(from_hex("00000000".to_string()).unwrap(), vec![0, 0, 0, 0]); - assert_eq!(from_hex("0a0b0c0d".to_string()).unwrap(), vec![10, 11, 12, 13]); - assert_eq!(from_hex("000000ff".to_string()).unwrap(), vec![0, 0, 0, 255]); + assert_eq!( + from_hex("0a0b0c0d".to_string()).unwrap(), + vec![10, 11, 12, 13] + ); + assert_eq!( + from_hex("000000ff".to_string()).unwrap(), + vec![0, 0, 0, 255] + ); } } diff --git a/util/src/lib.rs b/util/src/lib.rs index 9287f4bd3..9206fa22a 100644 --- a/util/src/lib.rs +++ b/util/src/lib.rs @@ -28,26 +28,26 @@ pub use hex::*; // (borrowed). #[derive(Clone)] pub struct OneTime { - inner: RefCell>, + inner: RefCell>, } unsafe impl Sync for OneTime {} unsafe impl Send for OneTime {} impl OneTime { - /// Builds a new uninitialized OneTime. - pub fn new() -> OneTime { - OneTime { inner: RefCell::new(None) } - } + /// Builds a new uninitialized OneTime. + pub fn new() -> OneTime { + OneTime { inner: RefCell::new(None) } + } - /// Initializes the OneTime, should only be called once after construction. - pub fn init(&self, value: T) { - let mut inner_mut = self.inner.borrow_mut(); - *inner_mut = Some(value); - } + /// Initializes the OneTime, should only be called once after construction. + pub fn init(&self, value: T) { + let mut inner_mut = self.inner.borrow_mut(); + *inner_mut = Some(value); + } - /// Borrows the OneTime, should only be called after initialization. - pub fn borrow(&self) -> Ref { - Ref::map(self.inner.borrow(), |o| o.as_ref().unwrap()) - } + /// Borrows the OneTime, should only be called after initialization. + pub fn borrow(&self) -> Ref { + Ref::map(self.inner.borrow(), |o| o.as_ref().unwrap()) + } } diff --git a/wallet/src/checker.rs b/wallet/src/checker.rs index 7740dd63c..46b9c79da 100644 --- a/wallet/src/checker.rs +++ b/wallet/src/checker.rs @@ -22,11 +22,7 @@ use types::*; use util; -fn refresh_output( - out: &mut OutputData, - api_out: Option, - tip: &api::Tip, -) { +fn refresh_output(out: &mut OutputData, api_out: Option, tip: &api::Tip) { if let Some(api_out) = api_out { out.height = api_out.height; out.lock_height = api_out.lock_height; @@ -38,25 +34,23 @@ fn refresh_output( } else { out.status = OutputStatus::Unspent; } - } else if vec![ - OutputStatus::Unspent, - OutputStatus::Locked - ].contains(&out.status) { + } else if vec![OutputStatus::Unspent, OutputStatus::Locked].contains(&out.status) { out.status = OutputStatus::Spent; } } /// Goes through the list of outputs that haven't been spent yet and check /// with a node whether their status has changed. -pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(), Error>{ +pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(), Error> { let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let tip = get_tip(config)?; WalletData::with_wallet(&config.data_file_dir, |wallet_data| { // check each output that's not spent - for mut out in wallet_data.outputs - .iter_mut() - .filter(|out| out.status != OutputStatus::Spent) { + for mut out in wallet_data.outputs.iter_mut().filter(|out| { + out.status != OutputStatus::Spent + }) + { // figure out the commitment // TODO check the pool for unconfirmed @@ -66,8 +60,9 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<( match get_output_by_commitment(config, commitment) { Ok(api_out) => refresh_output(&mut out, api_out, &tip), Err(_) => { - //TODO find error with connection and return - //error!("Error contacting server node at {}. Is it running?", config.check_node_api_http_addr); + // TODO find error with connection and return + // error!("Error contacting server node at {}. Is it running?", + // config.check_node_api_http_addr); } } } @@ -76,14 +71,14 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<( fn get_tip(config: &WalletConfig) -> Result { let url = format!("{}/v1/chain/1", config.check_node_api_http_addr); - api::client::get::(url.as_str()) - .map_err(|e| Error::Node(e)) + api::client::get::(url.as_str()).map_err(|e| Error::Node(e)) } -// queries a reachable node for a given output, checking whether it's been confirmed +// queries a reachable node for a given output, checking whether it's been +// confirmed fn get_output_by_commitment( config: &WalletConfig, - commit: pedersen::Commitment + commit: pedersen::Commitment, ) -> Result, Error> { let url = format!( "{}/v1/chain/utxo/{}", diff --git a/wallet/src/extkey.rs b/wallet/src/extkey.rs index 05a74e72f..032548fae 100644 --- a/wallet/src/extkey.rs +++ b/wallet/src/extkey.rs @@ -207,8 +207,9 @@ impl ExtendedKey { let mut secret_key = SecretKey::from_slice(&secp, &derived.as_bytes()[0..32]) .expect("Error deriving key"); - secret_key.add_assign(secp, &self.key) - .expect("Error deriving key"); + secret_key.add_assign(secp, &self.key).expect( + "Error deriving key", + ); // TODO check if key != 0 ? let mut chain_code: [u8; 32] = [0; 32]; @@ -241,18 +242,26 @@ mod test { let s = Secp256k1::new(); let seed = from_hex("000102030405060708090a0b0c0d0e0f"); let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap(); - let sec = - from_hex("c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd"); + let sec = from_hex( + "c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd", + ); let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap(); - let chaincode = - from_hex("e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72"); + let chaincode = from_hex( + "e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72", + ); let identifier = from_hex("942b6c0bd43bdcb24f3edfe7fadbc77054ecc4f2"); let fingerprint = from_hex("942b6c0b"); let depth = 0; let n_child = 0; assert_eq!(extk.key, secret_key); - assert_eq!(extk.identifier(), Identifier::from_bytes(identifier.as_slice())); - assert_eq!(extk.fingerprint, Fingerprint::from_bytes(fingerprint.as_slice())); + assert_eq!( + extk.identifier(), + Identifier::from_bytes(identifier.as_slice()) + ); + assert_eq!( + extk.fingerprint, + Fingerprint::from_bytes(fingerprint.as_slice()) + ); assert_eq!( extk.identifier().fingerprint(), Fingerprint::from_bytes(fingerprint.as_slice()) @@ -269,19 +278,27 @@ mod test { let seed = from_hex("000102030405060708090a0b0c0d0e0f"); let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap(); let derived = extk.derive(&s, 0).unwrap(); - let sec = - from_hex("d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f"); + let sec = from_hex( + "d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f", + ); let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap(); - let chaincode = - from_hex("243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52"); + let chaincode = from_hex( + "243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52", + ); let fingerprint = from_hex("942b6c0b"); let identifier = from_hex("8b011f14345f3f0071e85f6eec116de1e575ea10"); let identifier_fingerprint = from_hex("8b011f14"); let depth = 1; let n_child = 0; assert_eq!(derived.key, secret_key); - assert_eq!(derived.identifier(), Identifier::from_bytes(identifier.as_slice())); - assert_eq!(derived.fingerprint, Fingerprint::from_bytes(fingerprint.as_slice())); + assert_eq!( + derived.identifier(), + Identifier::from_bytes(identifier.as_slice()) + ); + assert_eq!( + derived.fingerprint, + Fingerprint::from_bytes(fingerprint.as_slice()) + ); assert_eq!( derived.identifier().fingerprint(), Fingerprint::from_bytes(identifier_fingerprint.as_slice()) diff --git a/wallet/src/info.rs b/wallet/src/info.rs index d990807fa..2cfc8c211 100644 --- a/wallet/src/info.rs +++ b/wallet/src/info.rs @@ -27,9 +27,10 @@ pub fn show_info(config: &WalletConfig, ext_key: &ExtendedKey) { println!("Outputs - "); println!("fingerprint, n_child, height, lock_height, status, value"); println!("----------------------------------"); - for out in &mut wallet_data.outputs - .iter() - .filter(|o| o.fingerprint == ext_key.fingerprint ) { + for out in &mut wallet_data.outputs.iter().filter(|o| { + o.fingerprint == ext_key.fingerprint + }) + { let key = ext_key.derive(&secp, out.n_child).unwrap(); println!( diff --git a/wallet/src/receiver.rs b/wallet/src/receiver.rs index 2308e2fa4..68c2fb2cd 100644 --- a/wallet/src/receiver.rs +++ b/wallet/src/receiver.rs @@ -50,7 +50,7 @@ //! So we may as well have it in place already. use std::convert::From; -use secp::{self}; +use secp; use secp::key::SecretKey; use core::core::{Block, Transaction, TxKernel, Output, build}; @@ -72,16 +72,15 @@ struct TxWrapper { pub fn receive_json_tx( config: &WalletConfig, ext_key: &ExtendedKey, - partial_tx_str: &str + partial_tx_str: &str, ) -> Result<(), Error> { let (amount, blinding, partial_tx) = partial_tx_from_json(partial_tx_str)?; let final_tx = receive_transaction(&config, ext_key, amount, blinding, partial_tx)?; let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap()); let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str()); - let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).map_err(|e| { - Error::Node(e) - })?; + let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }) + .map_err(|e| Error::Node(e))?; Ok(()) } @@ -102,7 +101,7 @@ impl ApiEndpoint for WalletReceiver { fn operations(&self) -> Vec { vec![ Operation::Custom("coinbase".to_string()), - Operation::Custom("receive_json_tx".to_string()) + Operation::Custom("receive_json_tx".to_string()), ] } @@ -115,41 +114,50 @@ impl ApiEndpoint for WalletReceiver { if cb_amount.amount == 0 { return Err(api::Error::Argument(format!("Zero amount not allowed."))); } - let (out, kern) = - receive_coinbase(&self.config, &self.key, cb_amount.amount).map_err(|e| { - api::Error::Internal(format!("Error building coinbase: {:?}", e)) - })?; - let out_bin = - ser::ser_vec(&out).map_err(|e| { - api::Error::Internal(format!("Error serializing output: {:?}", e)) - })?; - let kern_bin = - ser::ser_vec(&kern).map_err(|e| { - api::Error::Internal(format!("Error serializing kernel: {:?}", e)) - })?; + let (out, kern) = receive_coinbase( + &self.config, + &self.key, + cb_amount.amount, + ).map_err(|e| { + api::Error::Internal(format!("Error building coinbase: {:?}", e)) + })?; + let out_bin = ser::ser_vec(&out).map_err(|e| { + api::Error::Internal(format!("Error serializing output: {:?}", e)) + })?; + let kern_bin = ser::ser_vec(&kern).map_err(|e| { + api::Error::Internal(format!("Error serializing kernel: {:?}", e)) + })?; Ok(CbData { output: util::to_hex(out_bin), kernel: util::to_hex(kern_bin), }) } - _ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))), + _ => Err(api::Error::Argument( + format!("Incorrect request data: {}", op), + )), } } "receive_json_tx" => { match input { WalletReceiveRequest::PartialTransaction(partial_tx_str) => { debug!("Operation {} with transaction {}", op, &partial_tx_str); - receive_json_tx(&self.config, &self.key, &partial_tx_str).map_err(|e| { - api::Error::Internal(format!("Error processing partial transaction: {:?}", e)) - }).unwrap(); + receive_json_tx(&self.config, &self.key, &partial_tx_str) + .map_err(|e| { + api::Error::Internal( + format!("Error processing partial transaction: {:?}", e), + ) + }) + .unwrap(); - //TODO: Return emptiness for now, should be a proper enum return type + // TODO: Return emptiness for now, should be a proper enum return type Ok(CbData { output: String::from(""), kernel: String::from(""), }) } - _ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))), + _ => Err(api::Error::Argument( + format!("Incorrect request data: {}", op), + )), } } _ => Err(api::Error::Argument(format!("Unknown operation: {}", op))), @@ -158,7 +166,11 @@ impl ApiEndpoint for WalletReceiver { } /// Build a coinbase output and the corresponding kernel -fn receive_coinbase(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> Result<(Output, TxKernel), Error> { +fn receive_coinbase( + config: &WalletConfig, + ext_key: &ExtendedKey, + amount: u64, +) -> Result<(Output, TxKernel), Error> { let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); // operate within a lock on wallet data @@ -177,20 +189,23 @@ fn receive_coinbase(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) - height: 0, lock_height: 0, }); - debug!("Using child {} for a new coinbase output.", - coinbase_key.n_child); + debug!( + "Using child {} for a new coinbase output.", + coinbase_key.n_child + ); Block::reward_output(coinbase_key.key, &secp).map_err(&From::from) })? } /// Builds a full transaction from the partial one sent to us for transfer -fn receive_transaction(config: &WalletConfig, - ext_key: &ExtendedKey, - amount: u64, - blinding: SecretKey, - partial: Transaction) - -> Result { +fn receive_transaction( + config: &WalletConfig, + ext_key: &ExtendedKey, + amount: u64, + blinding: SecretKey, + partial: Transaction, +) -> Result { let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); @@ -200,9 +215,11 @@ fn receive_transaction(config: &WalletConfig, let next_child = wallet_data.next_child(&ext_key.fingerprint); let out_key = ext_key.derive(&secp, next_child).map_err(|e| Error::Key(e))?; - let (tx_final, _) = build::transaction(vec![build::initial_tx(partial), - build::with_excess(blinding), - build::output(amount, out_key.key)])?; + let (tx_final, _) = build::transaction(vec![ + build::initial_tx(partial), + build::with_excess(blinding), + build::output(amount, out_key.key), + ])?; // make sure the resulting transaction is valid (could have been lied to // on excess) @@ -218,8 +235,10 @@ fn receive_transaction(config: &WalletConfig, lock_height: 0, }); - debug!("Using child {} for a new transaction output.", - out_key.n_child); + debug!( + "Using child {} for a new transaction output.", + out_key.n_child + ); Ok(tx_final) })? diff --git a/wallet/src/sender.rs b/wallet/src/sender.rs index 46780bca0..a10e99ef6 100644 --- a/wallet/src/sender.rs +++ b/wallet/src/sender.rs @@ -13,7 +13,7 @@ // limitations under the License. use std::convert::From; -use secp::{self}; +use secp; use secp::key::SecretKey; use checker; @@ -27,7 +27,12 @@ use api; /// wallet /// UTXOs. The destination can be "stdout" (for command line) or a URL to the /// recipients wallet receiver (to be implemented). -pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64, dest: String) -> Result<(), Error> { +pub fn issue_send_tx( + config: &WalletConfig, + ext_key: &ExtendedKey, + amount: u64, + dest: String, +) -> Result<(), Error> { let _ = checker::refresh_outputs(&config, ext_key); let (tx, blind_sum) = build_send_tx(config, ext_key, amount)?; @@ -39,8 +44,10 @@ pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64, let url = format!("{}/v1/receive/receive_json_tx", &dest); debug!("Posting partial transaction to {}", url); let request = WalletReceiveRequest::PartialTransaction(json_tx); - let _: CbData = api::client::post(url.as_str(), &request) - .expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url)); + let _: CbData = api::client::post(url.as_str(), &request).expect(&format!( + "Wallet receiver at {} unreachable, could not send transaction. Is it running?", + url + )); } Ok(()) } @@ -48,7 +55,11 @@ pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64, /// Builds a transaction to send to someone from the HD seed associated with the /// wallet and the amount to send. Handles reading through the wallet data file, /// selecting outputs to spend and building the change. -fn build_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> Result<(Transaction, SecretKey), Error> { +fn build_send_tx( + config: &WalletConfig, + ext_key: &ExtendedKey, + amount: u64, +) -> Result<(Transaction, SecretKey), Error> { // first, rebuild the private key from the seed let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); @@ -66,7 +77,9 @@ fn build_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> R // third, build inputs using the appropriate key let mut parts = vec![]; for coin in &coins { - let in_key = ext_key.derive(&secp, coin.n_child).map_err(|e| Error::Key(e))?; + let in_key = ext_key.derive(&secp, coin.n_child).map_err( + |e| Error::Key(e), + )?; parts.push(build::input(coin.value, in_key.key)); } diff --git a/wallet/src/types.rs b/wallet/src/types.rs index 178dde979..2c3859f7e 100644 --- a/wallet/src/types.rs +++ b/wallet/src/types.rs @@ -79,14 +79,14 @@ impl From for Error { #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WalletConfig { - //Whether to run a wallet + // Whether to run a wallet pub enable_wallet: bool, - //The api address that this api server (i.e. this wallet) will run + // The api address that this api server (i.e. this wallet) will run pub api_http_addr: String, - //The api address of a running server node, against which transaction inputs will be checked - //during send + // The api address of a running server node, against which transaction inputs will be checked + // during send pub check_node_api_http_addr: String, - //The directory in which wallet files are stored + // The directory in which wallet files are stored pub data_file_dir: String, } @@ -171,10 +171,11 @@ impl WalletData { /// Note that due to the impossibility to do an actual file lock easily /// across operating systems, this just creates a lock file with a "should /// not exist" option. - pub fn with_wallet(data_file_dir:&str, f: F) -> Result - where F: FnOnce(&mut WalletData) -> T + pub fn with_wallet(data_file_dir: &str, f: F) -> Result + where + F: FnOnce(&mut WalletData) -> T, { - //create directory if it doesn't exist + // create directory if it doesn't exist fs::create_dir_all(data_file_dir).unwrap_or_else(|why| { info!("! {:?}", why.kind()); }); @@ -191,16 +192,23 @@ impl WalletData { .create_new(true) .open(lock_file_path) .map_err(|_| { - Error::WalletData(format!("Could not create wallet lock file. Either \ - some other process is using the wallet or there's a write access issue.")) + Error::WalletData(format!( + "Could not create wallet lock file. Either \ + some other process is using the wallet or there's a write access issue." + )) }); match result { - Ok(_) => { break; }, + Ok(_) => { + break; + } Err(e) => { if retries >= 3 { return Err(e); } - debug!("failed to obtain wallet.lock, retries - {}, sleeping", retries); + debug!( + "failed to obtain wallet.lock, retries - {}, sleeping", + retries + ); retries += 1; thread::sleep(time::Duration::from_millis(500)); } @@ -215,16 +223,16 @@ impl WalletData { // delete the lock file fs::remove_file(lock_file_path).map_err(|_| { - Error::WalletData( - format!("Could not remove wallet lock file. Maybe insufficient rights?") - ) + Error::WalletData(format!( + "Could not remove wallet lock file. Maybe insufficient rights?" + )) })?; Ok(res) } /// Read the wallet data or created a brand new one if it doesn't exist yet - fn read_or_create(data_file_path:&str) -> Result { + fn read_or_create(data_file_path: &str) -> Result { if Path::new(data_file_path).exists() { WalletData::read(data_file_path) } else { @@ -234,7 +242,7 @@ impl WalletData { } /// Read the wallet data from disk. - fn read(data_file_path:&str) -> Result { + fn read(data_file_path: &str) -> Result { let data_file = File::open(data_file_path).map_err(|e| { Error::WalletData(format!("Could not open {}: {}", data_file_path, e)) })?; @@ -244,7 +252,7 @@ impl WalletData { } /// Write the wallet data to disk. - fn write(&self, data_file_path:&str) -> Result<(), Error> { + fn write(&self, data_file_path: &str) -> Result<(), Error> { let mut data_file = File::create(data_file_path).map_err(|e| { Error::WalletData(format!("Could not create {}: {}", data_file_path, e)) })?; @@ -262,11 +270,12 @@ impl WalletData { } pub fn lock_output(&mut self, out: &OutputData) { - if let Some(out_to_lock) = self.outputs.iter_mut().find(|out_to_lock| { - out_to_lock.n_child == out.n_child && - out_to_lock.fingerprint == out.fingerprint && - out_to_lock.value == out.value - }) { + if let Some(out_to_lock) = + self.outputs.iter_mut().find(|out_to_lock| { + out_to_lock.n_child == out.n_child && out_to_lock.fingerprint == out.fingerprint && + out_to_lock.value == out.value + }) + { out_to_lock.lock(); } } @@ -333,7 +342,9 @@ pub fn partial_tx_from_json(json_str: &str) -> Result<(u64, SecretKey, Transacti let blinding = SecretKey::from_slice(&secp, &blind_bin[..])?; let tx_bin = util::from_hex(partial_tx.tx)?; let tx = ser::deserialize(&mut &tx_bin[..]).map_err(|_| { - Error::Format("Could not deserialize transaction, invalid format.".to_string()) + Error::Format( + "Could not deserialize transaction, invalid format.".to_string(), + ) })?; Ok((partial_tx.amount, blinding, tx))