From 3b4a48b2fdc4a2b20d00c5f78e419999853b3690 Mon Sep 17 00:00:00 2001 From: AntiochP <30642645+antiochp@users.noreply.github.com> Date: Wed, 9 Aug 2017 20:54:10 -0400 Subject: [PATCH] Cleanup build warnings (#87) * minor cleanup - unused imports * cleanup build warnings - unused vars * make structs pub to get rid of the private_in_public lint warning * missing docs on RangeProof * add missing docs to store delete function * cleaned up deprecation warning - tokio_core -> tokio_io complete() -> send() --- api/src/endpoints.rs | 19 ++++--- api/src/rest.rs | 4 +- chain/src/chain.rs | 11 ++-- chain/src/pipe.rs | 19 +++---- chain/src/store.rs | 3 +- chain/src/types.rs | 4 +- chain/tests/mine_simple_chain.rs | 12 ++--- config/src/config.rs | 16 +++--- config/src/types.rs | 20 +++---- core/src/core/block.rs | 1 - core/src/core/mod.rs | 5 +- core/src/core/pmmr.rs | 24 +++++---- core/src/core/sumtree.rs | 1 + core/src/core/target.rs | 6 +-- core/src/genesis.rs | 1 - core/src/global.rs | 11 +++- core/src/macros.rs | 2 + core/src/pow/mod.rs | 13 ++--- core/src/ser.rs | 1 + grin/src/adapters.rs | 18 ++++--- grin/src/miner.rs | 48 ++++++++--------- grin/src/plugin.rs | 35 ++++++------ grin/src/seed.rs | 21 ++++++-- grin/src/server.rs | 17 +++--- grin/src/sync.rs | 9 ++-- grin/src/types.rs | 4 +- grin/tests/framework.rs | 77 ++++++++++++-------------- grin/tests/simulnet.rs | 24 ++++----- p2p/src/conn.rs | 13 ++--- p2p/src/lib.rs | 1 - p2p/src/msg.rs | 23 ++++---- p2p/src/peer.rs | 2 +- p2p/src/protocol.rs | 11 ++-- p2p/src/rate_limit.rs | 11 ++-- p2p/src/server.rs | 20 +++---- p2p/src/types.rs | 3 +- p2p/tests/peer_handshake.rs | 1 - pool/src/blockchain.rs | 9 ++-- pool/src/graph.rs | 45 ++++++++++------ pool/src/lib.rs | 1 - pool/src/pool.rs | 92 +++++++++++++++++--------------- pool/src/types.rs | 76 ++++++++++++++------------ secp256k1zkp/src/pedersen.rs | 1 + store/src/lib.rs | 2 + wallet/src/checker.rs | 4 +- wallet/src/receiver.rs | 14 ++--- wallet/src/sender.rs | 15 +++--- wallet/src/types.rs | 10 ++-- 48 files changed, 418 insertions(+), 362 deletions(-) diff --git a/api/src/endpoints.rs b/api/src/endpoints.rs index f813d67af..8a3ff11c6 100644 --- a/api/src/endpoints.rs +++ b/api/src/endpoints.rs @@ -21,11 +21,10 @@ // } // } -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, RwLock}; use std::thread; use core::core::{Transaction, Output}; -use core::core::hash::Hash; use core::ser; use chain::{self, Tip}; use pool; @@ -51,7 +50,7 @@ impl ApiEndpoint for ChainApi { vec![Operation::Get] } - fn get(&self, id: String) -> ApiResult { + fn get(&self, _: String) -> ApiResult { self.chain.head().map_err(|e| Error::Internal(format!("{:?}", e))) } } @@ -75,13 +74,13 @@ impl ApiEndpoint for OutputApi { fn get(&self, id: String) -> ApiResult { debug!("GET output {}", id); - let c = util::from_hex(id.clone()).map_err(|e| Error::Argument(format!("Not a valid commitment: {}", id)))?; + let c = util::from_hex(id.clone()).map_err(|_| Error::Argument(format!("Not a valid commitment: {}", id)))?; match self.chain.get_unspent(&Commitment::from_vec(c)) { Some(utxo) => Ok(utxo), None => Err(Error::NotFound), } - + } } @@ -93,7 +92,7 @@ pub struct PoolApi { } #[derive(Serialize, Deserialize)] -struct PoolInfo { +pub struct PoolInfo { pool_size: usize, orphans_size: usize, total_size: usize, @@ -111,7 +110,7 @@ impl ApiEndpoint for PoolApi vec![Operation::Get, Operation::Custom("push".to_string())] } - fn get(&self, id: String) -> ApiResult { + fn get(&self, _: String) -> ApiResult { let pool = self.tx_pool.read().unwrap(); Ok(PoolInfo { pool_size: pool.pool_size(), @@ -120,9 +119,9 @@ impl ApiEndpoint for PoolApi }) } - fn operation(&self, op: String, input: TxWrapper) -> ApiResult<()> { + fn operation(&self, _: String, input: TxWrapper) -> ApiResult<()> { let tx_bin = util::from_hex(input.tx_hex) - .map_err(|e| Error::Argument(format!("Invalid hex in transaction wrapper.")))?; + .map_err(|_| Error::Argument(format!("Invalid hex in transaction wrapper.")))?; let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| { Error::Argument("Could not deserialize transaction, invalid format.".to_string()) @@ -146,7 +145,7 @@ impl ApiEndpoint for PoolApi /// Dummy wrapper for the hex-encoded serialized transaction. #[derive(Serialize, Deserialize)] -struct TxWrapper { +pub struct TxWrapper { tx_hex: String, } diff --git a/api/src/rest.rs b/api/src/rest.rs index d03f4c790..90004e2d9 100644 --- a/api/src/rest.rs +++ b/api/src/rest.rs @@ -30,9 +30,8 @@ use iron::{Iron, Request, Response, IronResult, IronError, status, headers, List use iron::method::Method; use iron::modifiers::Header; use iron::middleware::Handler; -use iron::error::HttpResult; use router::Router; -use serde::{Serialize, Deserialize}; +use serde::Serialize; use serde::de::DeserializeOwned; use serde_json; @@ -331,7 +330,6 @@ impl ApiServer { #[cfg(test)] mod test { use super::*; - use rest::*; #[derive(Serialize, Deserialize)] pub struct Animal { diff --git a/chain/src/chain.rs b/chain/src/chain.rs index 975ee20a5..47ebc1d26 100644 --- a/chain/src/chain.rs +++ b/chain/src/chain.rs @@ -97,7 +97,8 @@ impl Chain { Err(e) => return Err(Error::StoreErr(e)), }; - let head = chain_store.head()?; + // TODO - confirm this was safe to remove based on code above? + // let head = chain_store.head()?; Ok(Chain { store: Arc::new(chain_store), @@ -172,11 +173,11 @@ impl Chain { } } - /// Pop orphans out of the queue and check if we can now accept them. + /// Pop orphans out of the queue and check if we can now accept them. fn check_orphans(&self) { // first check how many we have to retry, unfort. we can't extend the lock // in the loop as it needs to be freed before going in process_block - let mut orphan_count = 0; + let orphan_count; { let orphans = self.orphans.lock().unwrap(); orphan_count = orphans.len(); @@ -184,13 +185,13 @@ impl Chain { // pop each orphan and retry, if still orphaned, will be pushed again for _ in 0..orphan_count { - let mut popped = None; + let popped; { let mut orphans = self.orphans.lock().unwrap(); popped = orphans.pop_back(); } if let Some((opts, orphan)) = popped { - self.process_block(orphan, opts); + let _process_result = self.process_block(orphan, opts); } } } diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs index 287347c45..bf2819162 100644 --- a/chain/src/pipe.rs +++ b/chain/src/pipe.rs @@ -14,7 +14,6 @@ //! Implementation of the chain block acceptance (or refusal) pipeline. -use std::convert::From; use std::sync::{Arc, Mutex}; use secp; @@ -22,22 +21,24 @@ use time; use core::consensus; use core::core::hash::{Hash, Hashed}; -use core::core::target::Difficulty; -use core::core::{BlockHeader, Block, Proof}; +use core::core::{BlockHeader, Block}; use core::pow; -use core::ser; use types::*; use store; use core::global; -use core::global::{MiningParameterMode,MINING_PARAMETER_MODE}; /// Contextual information required to process a new block and either reject or /// accept it. pub struct BlockContext { + /// The options pub opts: Options, + /// The store pub store: Arc, + /// The adapter pub adapter: Arc, + /// The head pub head: Tip, + /// The lock pub lock: Arc>, } @@ -68,11 +69,12 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result, Er b.hash() ); - ctx.lock.lock(); + let _ = ctx.lock.lock().unwrap(); add_block(b, &mut ctx)?; update_head(b, &mut ctx) } +/// Process the block header pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result, Error> { info!( @@ -84,7 +86,7 @@ pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result Result<(), E return Err(Error::DifficultyTooLow); } - let param_ref=MINING_PARAMETER_MODE.read().unwrap(); let cycle_size = if ctx.opts.intersects(EASY_POW) { global::sizeshift() } else { - consensus::DEFAULT_SIZESHIFT + consensus::DEFAULT_SIZESHIFT }; debug!("Validating block with cuckoo size {}", cycle_size); if !pow::verify_size(header, cycle_size as u32) { diff --git a/chain/src/store.rs b/chain/src/store.rs index 080a4839c..ecea6f049 100644 --- a/chain/src/store.rs +++ b/chain/src/store.rs @@ -41,6 +41,7 @@ pub struct ChainKVStore { } impl ChainKVStore { + /// Create new chain store pub fn new(root_path: String) -> Result { let db = grin_store::Store::open(format!("{}/{}", root_path, STORE_SUBPATH).as_str())?; Ok(ChainKVStore { db: db }) @@ -152,7 +153,7 @@ impl ChainStore for ChainKVStore { self.db.put_ser( &u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height), &real_prev, - ); + ).unwrap(); prev_h = real_prev.previous; prev_height = real_prev.height - 1; } else { diff --git a/chain/src/types.rs b/chain/src/types.rs index 26dec7c4c..413f9d18b 100644 --- a/chain/src/types.rs +++ b/chain/src/types.rs @@ -27,6 +27,7 @@ use grin_store; bitflags! { /// Options for block validation pub flags Options: u32 { + /// None flag const NONE = 0b00000001, /// Runs without checking the Proof of Work, mostly to make testing easier. const SKIP_POW = 0b00000010, @@ -37,6 +38,7 @@ bitflags! { } } +/// Errors #[derive(Debug)] pub enum Error { /// The block doesn't fit anywhere in our chain @@ -202,5 +204,5 @@ pub trait ChainAdapter { /// Dummy adapter used as a placeholder for real implementations pub struct NoopAdapter {} impl ChainAdapter for NoopAdapter { - fn block_accepted(&self, b: &Block) {} + fn block_accepted(&self, _: &Block) {} } diff --git a/chain/tests/mine_simple_chain.rs b/chain/tests/mine_simple_chain.rs index 18e22f166..ec730f3c5 100644 --- a/chain/tests/mine_simple_chain.rs +++ b/chain/tests/mine_simple_chain.rs @@ -26,7 +26,6 @@ use std::thread; use rand::os::OsRng; use grin_chain::types::*; -use grin_chain::store; use grin_core::core::hash::Hashed; use grin_core::core::target::Difficulty; use grin_core::pow; @@ -34,15 +33,13 @@ use grin_core::core; use grin_core::consensus; use grin_core::pow::cuckoo; use grin_core::global; -use grin_core::global::{MiningParameterMode,MINING_PARAMETER_MODE}; - -use grin::{ServerConfig, MinerConfig}; +use grin_core::global::MiningParameterMode; use grin_core::pow::MiningWorker; #[test] fn mine_empty_chain() { - env_logger::init(); + let _ = env_logger::init(); global::set_mining_mode(MiningParameterMode::AutomatedTesting); let mut rng = OsRng::new().unwrap(); let chain = grin_chain::Chain::init(".grin".to_string(), Arc::new(NoopAdapter {})) @@ -52,7 +49,6 @@ fn mine_empty_chain() { let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let reward_key = secp::key::SecretKey::new(&secp, &mut rng); - let server_config = ServerConfig::default(); let mut miner_config = grin::MinerConfig { enable_mining: true, burn_reward: true, @@ -60,7 +56,7 @@ fn mine_empty_chain() { }; miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps")); - let mut cuckoo_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize()); + let mut cuckoo_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize()); for n in 1..4 { let prev = chain.head_header().unwrap(); let mut b = core::Block::new(&prev, vec![], reward_key).unwrap(); @@ -88,7 +84,7 @@ fn mine_empty_chain() { #[test] fn mine_forks() { - env_logger::init(); + let _ = env_logger::init(); let mut rng = OsRng::new().unwrap(); let chain = grin_chain::Chain::init(".grin2".to_string(), Arc::new(NoopAdapter {})) .unwrap(); diff --git a/config/src/config.rs b/config/src/config.rs index 2d82ddc7d..048315a13 100644 --- a/config/src/config.rs +++ b/config/src/config.rs @@ -22,7 +22,6 @@ use std::fs::File; use toml; use grin::{ServerConfig, MinerConfig}; -use wallet::WalletConfig; use types::{ConfigMembers, GlobalConfig, @@ -87,10 +86,10 @@ impl GlobalConfig { return Ok(()) } } - + // Give up Err(ConfigError::FileNotFoundError(String::from(""))) - + } /// Takes the path to a config file, or if NONE, tries @@ -102,7 +101,7 @@ impl GlobalConfig { if let Some(fp) = file_path { return_value.config_file_path = Some(PathBuf::from(&fp)); } else { - return_value.derive_config_location(); + return_value.derive_config_location().unwrap(); } //No attempt at a config file, just return defaults @@ -124,6 +123,7 @@ impl GlobalConfig { return_value.read_config() } + /// Read config pub fn read_config(mut self) -> Result { let mut file = File::open(self.config_file_path.as_mut().unwrap())?; let mut contents = String::new(); @@ -149,6 +149,7 @@ impl GlobalConfig { } } + /// Serialize config pub fn ser_config(&mut self) -> Result { let encoded:Result = toml::to_string(self.members.as_mut().unwrap()); match encoded { @@ -169,6 +170,7 @@ impl GlobalConfig { return self.members.as_mut().unwrap().wallet.as_mut().unwrap().enable_wallet; }*/ + /// Enable mining pub fn mining_enabled(&mut self) -> bool { return self.members.as_mut().unwrap().mining.as_mut().unwrap().enable_mining; } @@ -186,11 +188,11 @@ fn test_read_config() { test_mode = false #7 = FULL_NODE, not sure how to serialise this properly to use constants capabilities = [7] - + [server.p2p_config] host = "127.0.0.1" port = 13414 - + #Mining section is optional, if it's not here it will default to not mining [mining] enable_mining = true @@ -206,4 +208,4 @@ fn test_read_config() { println!("Decoded.server: {:?}", decoded.server); println!("Decoded wallet: {:?}", decoded.wallet); panic!("panic"); -} \ No newline at end of file +} diff --git a/config/src/types.rs b/config/src/types.rs index 157deaed9..8a7f2bf86 100644 --- a/config/src/types.rs +++ b/config/src/types.rs @@ -20,7 +20,6 @@ use std::fmt; use grin::{ServerConfig, MinerConfig}; -use wallet::WalletConfig; /// Error type wrapping config errors. @@ -54,7 +53,7 @@ impl fmt::Display for ConfigError { ConfigError::SerializationError(ref message) => { write!(f, "Error serializing configuration: {}", message) } - } + } } } @@ -67,9 +66,9 @@ impl From for ConfigError { } } -/// Going to hold all of the various configuration types +/// Going to hold all of the various configuration types /// separately for now, then put them together as a single -/// ServerConfig object afterwards. This is to flatten +/// ServerConfig object afterwards. This is to flatten /// out the configuration file into logical sections, /// as they tend to be quite nested in the code /// Most structs optional, as they may or may not @@ -77,12 +76,13 @@ impl From for ConfigError { #[derive(Debug, Serialize, Deserialize)] pub struct GlobalConfig { - //Keep track of the file we've read + ///Keep track of the file we've read pub config_file_path: Option, - //keep track of whether we're using - //a config file or just the defaults - //for each member + /// keep track of whether we're using + /// a config file or just the defaults + /// for each member pub using_config_file: bool, + /// Global member config pub members: Option, } @@ -93,11 +93,13 @@ pub struct GlobalConfig { #[derive(Debug, Serialize, Deserialize)] pub struct ConfigMembers { + /// Server config pub server: ServerConfig, + /// Mining config pub mining: Option, //removing wallet from here for now, //as its concerns are separate from the server's, really //given it needs to manage keys. It should probably //stay command line only for the time being //pub wallet: Option -} \ No newline at end of file +} diff --git a/core/src/core/block.rs b/core/src/core/block.rs index c7452b4a6..3487e608f 100644 --- a/core/src/core/block.rs +++ b/core/src/core/block.rs @@ -24,7 +24,6 @@ use core::{Input, Output, Proof, TxKernel, Transaction, COINBASE_KERNEL, COINBAS use core::transaction::merkle_inputs_outputs; use consensus::REWARD; use consensus::MINIMUM_DIFFICULTY; -use consensus::PROOFSIZE; use core::hash::{Hash, Hashed, ZERO_HASH}; use core::target::Difficulty; use ser::{self, Readable, Reader, Writeable, Writer}; diff --git a/core/src/core/mod.rs b/core/src/core/mod.rs index 57bed9f88..52eb03d3b 100644 --- a/core/src/core/mod.rs +++ b/core/src/core/mod.rs @@ -83,7 +83,10 @@ pub trait Committed { /// Proof of work pub struct Proof { + /// The nonces pub nonces:Vec, + + /// The proof size pub proof_size: usize, } @@ -124,7 +127,7 @@ impl Clone for Proof { } impl Proof { - + /// Builds a proof with all bytes zeroed out pub fn new(in_nonces:Vec) -> Proof { Proof { diff --git a/core/src/core/pmmr.rs b/core/src/core/pmmr.rs index aba6cbecc..c2398e48d 100644 --- a/core/src/core/pmmr.rs +++ b/core/src/core/pmmr.rs @@ -38,7 +38,7 @@ use std::clone::Clone; use std::fmt::Debug; use std::marker::PhantomData; -use std::ops::{self, Deref}; +use std::ops::{self}; use core::hash::{Hash, Hashed}; use ser::{self, Readable, Reader, Writeable, Writer}; @@ -96,11 +96,14 @@ impl Summable for NoSum { /// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum. #[derive(Debug, Clone, PartialEq, Eq)] pub struct HashSum where T: Summable { + /// The hash pub hash: Hash, + /// The sum pub sum: T::Sum, } impl HashSum where T: Summable + Writeable { + /// Create a hash sum from a summable pub fn from_summable(idx: u64, elmt: T) -> HashSum { let hash = Hashed::hash(&elmt); let sum = elmt.sum(); @@ -156,7 +159,7 @@ pub trait Backend where T: Summable { /// Heavily relies on navigation operations within a binary tree. In particular, /// all the implementation needs to keep track of the MMR structure is how far /// we are in the sequence of nodes making up the MMR. -struct PMMR where T: Summable, B: Backend { +pub struct PMMR where T: Summable, B: Backend { last_pos: u64, backend: B, // only needed for parameterizing Backend @@ -179,7 +182,7 @@ impl PMMR where T: Summable + Writeable + Debug + Clone, B: Backend< pub fn root(&self) -> HashSum { let peaks_pos = peaks(self.last_pos); let peaks: Vec>> = map_vec!(peaks_pos, |&pi| self.backend.get(pi)); - + let mut ret = None; for peak in peaks { ret = match (ret, peak) { @@ -199,7 +202,7 @@ impl PMMR where T: Summable + Writeable + Debug + Clone, B: Backend< let mut to_append = vec![current_hashsum.clone()]; let mut height = 0; let mut pos = elmt_pos; - + // we look ahead one position in the MMR, if the expected node has a higher // height it means we have to build a higher peak by summing with a previous // sibling. we do it iteratively in case the new peak itself allows the @@ -231,13 +234,12 @@ impl PMMR where T: Summable + Writeable + Debug + Clone, B: Backend< // only leaves can be pruned return; } - + // loop going up the tree, from node to parent, as long as we stay inside // the tree. let mut to_prune = vec![]; let mut current = position; while current+1 < self.last_pos { - let current_height = bintree_postorder_height(current); let next_height = bintree_postorder_height(current+1); // compare the node's height to the next height, if the next is higher @@ -257,7 +259,7 @@ impl PMMR where T: Summable + Writeable + Debug + Clone, B: Backend< // can't prune when our parent isn't here yet break; } - to_prune.push(current); + to_prune.push(current); // if we have a pruned sibling, we can continue up the tree // otherwise we're done @@ -289,7 +291,7 @@ fn peaks(num: u64) -> Vec { if bintree_postorder_height(num+1) > bintree_postorder_height(num) { return vec![]; } - + // our top peak is always on the leftmost side of the tree and leftmost trees // have for index a binary values with all 1s (i.e. 11, 111, 1111, etc.) let mut top = 1; @@ -454,8 +456,10 @@ fn most_significant_pos(num: u64) -> u64 { #[cfg(test)] mod test { use super::*; - use core::hash::{Hash, Hashed}; + use core::hash::Hashed; use std::sync::{Arc, Mutex}; + use std::ops::Deref; + #[test] fn some_all_ones() { @@ -687,7 +691,7 @@ mod test { pmmr.prune(1); assert_eq!(orig_root, pmmr.root()); assert_eq!(ba.used_size(), orig_sz - 7); - + // pruning everything should only leave us the peaks for n in 1..16 { pmmr.prune(n); diff --git a/core/src/core/sumtree.rs b/core/src/core/sumtree.rs index 9c551231b..dbff576ff 100644 --- a/core/src/core/sumtree.rs +++ b/core/src/core/sumtree.rs @@ -698,6 +698,7 @@ where } #[allow(dead_code)] +#[allow(missing_docs)] pub fn print_tree(tree: &SumTree) where T: Summable + Writeable, diff --git a/core/src/core/target.rs b/core/src/core/target.rs index e490c35c0..b30cbfab9 100644 --- a/core/src/core/target.rs +++ b/core/src/core/target.rs @@ -21,11 +21,9 @@ use std::fmt; use std::ops::{Add, Mul, Div, Sub}; -use std::io::Cursor; -use std::u64::MAX; use serde::{Serialize, Serializer, Deserialize, Deserializer, de}; -use byteorder::{ByteOrder, ReadBytesExt, BigEndian}; +use byteorder::{ByteOrder, BigEndian}; use core::hash::Hash; use ser::{self, Reader, Writer, Writeable, Readable}; @@ -150,7 +148,7 @@ impl<'de> de::Visitor<'de> for DiffVisitor { where E: de::Error { let num_in = s.parse::(); - if let Err(e)=num_in { + if let Err(_)=num_in { return Err(de::Error::invalid_value(de::Unexpected::Str(s), &"a value number")); }; Ok(Difficulty { num: num_in.unwrap() }) diff --git a/core/src/genesis.rs b/core/src/genesis.rs index 30d31478f..b612b4ec7 100644 --- a/core/src/genesis.rs +++ b/core/src/genesis.rs @@ -18,7 +18,6 @@ use time; use core; use consensus::MINIMUM_DIFFICULTY; -use consensus::PROOFSIZE; use core::hash::Hashed; use core::target::Difficulty; use global; diff --git a/core/src/global.rs b/core/src/global.rs index dde84dbe6..0dca2bc25 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -28,14 +28,19 @@ use consensus::DEFAULT_SIZESHIFT; /// Define these here, as they should be developer-set, not really tweakable /// by users +/// Automated testing sizeshift pub const AUTOMATED_TESTING_SIZESHIFT:u8 = 10; +/// Automated testing proof size pub const AUTOMATED_TESTING_PROOF_SIZE:usize = 4; +/// User testing sizeshift pub const USER_TESTING_SIZESHIFT:u8 = 16; +/// User testing proof size pub const USER_TESTING_PROOF_SIZE:usize = 42; +/// Mining parameter modes #[derive(Debug, Clone, Serialize, Deserialize)] pub enum MiningParameterMode { /// For CI testing @@ -49,14 +54,17 @@ pub enum MiningParameterMode { } lazy_static!{ + /// The mining parameter mode pub static ref MINING_PARAMETER_MODE: RwLock = RwLock::new(MiningParameterMode::Production); } +/// Set the mining mode pub fn set_mining_mode(mode:MiningParameterMode){ let mut param_ref=MINING_PARAMETER_MODE.write().unwrap(); *param_ref=mode; } +/// The sizeshift pub fn sizeshift() -> u8 { let param_ref=MINING_PARAMETER_MODE.read().unwrap(); match *param_ref { @@ -66,6 +74,7 @@ pub fn sizeshift() -> u8 { } } +/// The proofsize pub fn proofsize() -> usize { let param_ref=MINING_PARAMETER_MODE.read().unwrap(); match *param_ref { @@ -75,6 +84,7 @@ pub fn proofsize() -> usize { } } +/// Are we in automated testing mode? pub fn is_automated_testing_mode() -> bool { let param_ref=MINING_PARAMETER_MODE.read().unwrap(); if let MiningParameterMode::AutomatedTesting=*param_ref { @@ -83,4 +93,3 @@ pub fn is_automated_testing_mode() -> bool { return false; } } - diff --git a/core/src/macros.rs b/core/src/macros.rs index 0f13a999e..1c51c91af 100644 --- a/core/src/macros.rs +++ b/core/src/macros.rs @@ -39,6 +39,7 @@ macro_rules! try_map_vec { /// Eliminates some of the verbosity in having iter and collect /// around every fitler_map call. +#[macro_export] macro_rules! filter_map_vec { ($thing:expr, $mapfn:expr ) => { $thing.iter() @@ -52,6 +53,7 @@ macro_rules! filter_map_vec { /// Example: /// let foo = vec![1,2,3] /// println!(tee!(foo, foo.append(vec![3,4,5])) +#[macro_export] macro_rules! tee { ($thing:ident, $thing_expr:expr) => { { diff --git a/core/src/pow/mod.rs b/core/src/pow/mod.rs index 7e5c40982..847d2b976 100644 --- a/core/src/pow/mod.rs +++ b/core/src/pow/mod.rs @@ -31,8 +31,6 @@ use consensus::EASINESS; use core::BlockHeader; use core::hash::Hashed; use core::Proof; -use global; -use global::{MiningParameterMode, MINING_PARAMETER_MODE}; use core::target::Difficulty; use pow::cuckoo::{Cuckoo, Error}; @@ -41,10 +39,10 @@ use pow::cuckoo::{Cuckoo, Error}; /// pub trait MiningWorker { - + /// This only sets parameters and does initialisation work now fn new(ease: u32, sizeshift: u32, proof_size:usize) -> Self; - + /// Actually perform a mining attempt on the given input and /// return a proof if found fn mine(&mut self, header: &[u8]) -> Result; @@ -70,8 +68,8 @@ pub fn pow20(miner:&mut T, bh: &mut BlockHeader, diff: Difficul /// Runs a proof of work computation over the provided block using the provided Mining Worker, /// until the required difficulty target is reached. May take a while for a low target... -pub fn pow_size(miner:&mut T, bh: &mut BlockHeader, - diff: Difficulty, sizeshift: u32) -> Result<(), Error> { +pub fn pow_size(miner:&mut T, bh: &mut BlockHeader, + diff: Difficulty, _: u32) -> Result<(), Error> { let start_nonce = bh.nonce; // try to find a cuckoo cycle on that header hash @@ -104,9 +102,12 @@ pub fn pow_size(miner:&mut T, bh: &mut BlockHeader, #[cfg(test)] mod test { use super::*; + use global; use core::target::Difficulty; use genesis; use consensus::MINIMUM_DIFFICULTY; + use global::MiningParameterMode; + #[test] fn genesis_pow() { diff --git a/core/src/ser.rs b/core/src/ser.rs index bed7b936a..26c6988e0 100644 --- a/core/src/ser.rs +++ b/core/src/ser.rs @@ -388,6 +388,7 @@ impl Writeable for [u8; 4] { /// Useful marker trait on types that can be sized byte slices pub trait AsFixedBytes: Sized + AsRef<[u8]> { + /// The length in bytes fn len(&self) -> usize; } diff --git a/grin/src/adapters.rs b/grin/src/adapters.rs index ca3c3d92a..d15d9b286 100644 --- a/grin/src/adapters.rs +++ b/grin/src/adapters.rs @@ -13,21 +13,19 @@ // limitations under the License. use std::net::SocketAddr; -use std::ops::Deref; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, RwLock}; use std::thread; use chain::{self, ChainAdapter}; use core::core::{self, Output}; use core::core::hash::{Hash, Hashed}; use core::core::target::Difficulty; -use p2p::{self, NetAdapter, Server, PeerStore, PeerData, Capabilities, State}; +use p2p::{self, NetAdapter, Server, PeerStore, PeerData, State}; use pool; use secp::pedersen::Commitment; use util::OneTime; use store; use sync; -use core::global; use core::global::{MiningParameterMode,MINING_PARAMETER_MODE}; /// Implementation of the NetAdapter for the blockchain. Gets notified when new @@ -210,9 +208,17 @@ impl NetToChainAdapter { pub fn start_sync(&self, sync: sync::Syncer) { let arc_sync = Arc::new(sync); self.syncer.init(arc_sync.clone()); - thread::Builder::new().name("syncer".to_string()).spawn(move || { - arc_sync.run(); + let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(move || { + let sync_run_result = arc_sync.run(); + match sync_run_result { + Ok(_) => {} + Err(_) => {} + } }); + match spawn_result { + Ok(_) => {} + Err(_) => {} + } } /// Prepare options for the chain pipeline diff --git a/grin/src/miner.rs b/grin/src/miner.rs index 7d20b1f99..8d213de74 100644 --- a/grin/src/miner.rs +++ b/grin/src/miner.rs @@ -65,7 +65,7 @@ pub struct HeaderPartWriter { impl Default for HeaderPartWriter { fn default() -> HeaderPartWriter { - HeaderPartWriter { + HeaderPartWriter { bytes_written: 0, writing_pre: true, pre_nonce: Vec::new(), @@ -91,13 +91,13 @@ impl ser::Writer for HeaderPartWriter { fn write_fixed_bytes(&mut self, bytes_in: &T) -> Result<(), ser::Error> { if self.writing_pre { for i in 0..bytes_in.len() {self.pre_nonce.push(bytes_in.as_ref()[i])}; - + } else if self.bytes_written!=0 { for i in 0..bytes_in.len() {self.post_nonce.push(bytes_in.as_ref()[i])}; } self.bytes_written+=bytes_in.len(); - + if self.bytes_written==PRE_NONCE_SIZE && self.writing_pre { self.writing_pre=false; self.bytes_written=0; @@ -140,20 +140,20 @@ impl Miner { } /// Inner part of the mining loop for cuckoo-miner asynch mode - pub fn inner_loop_async(&self, plugin_miner:&mut PluginMiner, - difficulty:Difficulty, + pub fn inner_loop_async(&self, plugin_miner:&mut PluginMiner, + difficulty:Difficulty, b:&mut Block, cuckoo_size: u32, head:&BlockHeader, - latest_hash:&Hash) + latest_hash:&Hash) -> Option { - + debug!("(Server ID: {}) Mining at Cuckoo{} for at most 2 secs at height {} and difficulty {}.", self.debug_output_id, cuckoo_size, b.header.height, b.header.difficulty); - + // look for a pow for at most 2 sec on the same block (to give a chance to new // transactions) and as long as the head hasn't changed // Will change this to something else at some point @@ -189,12 +189,12 @@ impl Miner { } /// The inner part of mining loop for synchronous mode - pub fn inner_loop_sync(&self, - miner:&mut T, + pub fn inner_loop_sync(&self, + miner:&mut T, b:&mut Block, - cuckoo_size: u32, + cuckoo_size: u32, head:&BlockHeader, - latest_hash:&mut Hash) + latest_hash:&mut Hash) -> Option { // look for a pow for at most 2 sec on the same block (to give a chance to new // transactions) and as long as the head hasn't changed @@ -206,7 +206,7 @@ impl Miner { latest_hash, b.header.difficulty); let mut iter_count = 0; - + if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 { debug!("(Server ID: {}) Artificially slowing down loop by {}ms per iteration.", self.debug_output_id, @@ -215,7 +215,7 @@ impl Miner { let mut sol=None; while head.hash() == *latest_hash && time::get_time().sec < deadline { - + let pow_hash = b.hash(); if let Ok(proof) = miner.mine(&pow_hash[..]) { let proof_diff=proof.clone().to_difficulty(); @@ -250,9 +250,9 @@ impl Miner { /// Starts the mining loop, building a new block on top of the existing /// chain anytime required and looking for PoW solution. - pub fn run_loop(&self, - miner_config:MinerConfig, - server_config:ServerConfig, + pub fn run_loop(&self, + miner_config:MinerConfig, + server_config:ServerConfig, cuckoo_size:u32, proof_size:usize) { @@ -283,28 +283,28 @@ impl Miner { } if let Some(mut p) = plugin_miner.as_mut() { if use_async { - sol = self.inner_loop_async(&mut p, - b.header.difficulty.clone(), + sol = self.inner_loop_async(&mut p, + b.header.difficulty.clone(), &mut b, cuckoo_size, &head, &latest_hash); } else { - sol = self.inner_loop_sync(p, + sol = self.inner_loop_sync(p, &mut b, cuckoo_size, &head, &mut latest_hash); } - } + } if let Some(mut m) = miner.as_mut() { - sol = self.inner_loop_sync(m, + sol = self.inner_loop_sync(m, &mut b, cuckoo_size, &head, &mut latest_hash); } - + // if we found a solution, push our block out if let Some(proof) = sol { info!("(Server ID: {}) Found valid proof of work, adding block {}.", @@ -322,7 +322,7 @@ impl Miner { } else { coinbase = self.get_coinbase(); } - } + } } } diff --git a/grin/src/plugin.rs b/grin/src/plugin.rs index e2e0451f9..bdd30f83e 100644 --- a/grin/src/plugin.rs +++ b/grin/src/plugin.rs @@ -22,9 +22,7 @@ use std::env; use core::pow::cuckoo; use core::pow::cuckoo::Error; use core::pow::MiningWorker; -use core::consensus::DEFAULT_SIZESHIFT; -use core::global; -use std::collections::HashMap; +use core::global; use core::core::Proof; use types::{MinerConfig, ServerConfig}; @@ -35,9 +33,7 @@ use cuckoo_miner::{ CuckooMiner, CuckooPluginManager, CuckooMinerConfig, - CuckooMinerError, - CuckooMinerSolution, - CuckooPluginCapabilities}; + CuckooMinerSolution}; //For now, we're just going to keep a static reference around to the loaded config //And not allow querying the plugin directory twice once a plugin has been selected @@ -48,7 +44,9 @@ lazy_static!{ static ref LOADED_CONFIG: Mutex> = Mutex::new(None); } +/// plugin miner pub struct PluginMiner { + /// the miner pub miner:Option, last_solution: CuckooMinerSolution, config: CuckooMinerConfig, @@ -65,7 +63,8 @@ impl Default for PluginMiner { } impl PluginMiner { - pub fn init(&mut self, miner_config: MinerConfig, server_config: ServerConfig){ + /// Init the plugin miner + pub fn init(&mut self, miner_config: MinerConfig, _server_config: ServerConfig){ //Get directory of executable let mut exe_path=env::current_exe().unwrap(); exe_path.pop(); @@ -83,8 +82,8 @@ impl PluginMiner { //First, load and query the plugins in the given directory //These should all be stored in 'deps' at the moment relative - //to the executable path, though they should appear somewhere else - //when packaging is more//thought out + //to the executable path, though they should appear somewhere else + //when packaging is more//thought out let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap(); @@ -100,7 +99,7 @@ impl PluginMiner { let mut plugin_manager = CuckooPluginManager::new().unwrap(); let result=plugin_manager.load_plugin_dir(plugin_install_path); - if let Err(e) = result { + if let Err(_) = result { error!("Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/deps."); panic!("Unable to load plugin directory... Please check configuration values"); } @@ -115,7 +114,7 @@ impl PluginMiner { //insert it into the miner configuration being created below let mut config = CuckooMinerConfig::new(); - + info!("Mining using plugin: {}", caps[0].full_path.clone()); config.plugin_full_path = caps[0].full_path.clone(); if let Some(l) = miner_config.cuckoo_miner_parameter_list { @@ -134,12 +133,13 @@ impl PluginMiner { panic!("Unable to init mining plugin."); } - self.config=config.clone(); + self.config=config.clone(); self.miner=Some(result.unwrap()); } + /// Get the miner pub fn get_consumable(&mut self)->CuckooMiner{ - + //this will load the associated plugin let result=CuckooMiner::new(self.config.clone()); if let Err(e) = result { @@ -148,7 +148,7 @@ impl PluginMiner { } result.unwrap() } - + } impl MiningWorker for PluginMiner { @@ -158,9 +158,9 @@ impl MiningWorker for PluginMiner { /// version of the miner for now, though this should become /// configurable somehow - fn new(ease: u32, - sizeshift: u32, - proof_size: usize) -> Self { + fn new(_ease: u32, + _sizeshift: u32, + _proof_size: usize) -> Self { PluginMiner::default() } @@ -175,4 +175,3 @@ impl MiningWorker for PluginMiner { Err(Error::NoSolution) } } - diff --git a/grin/src/seed.rs b/grin/src/seed.rs index 1f79626ca..bf09f9d4f 100644 --- a/grin/src/seed.rs +++ b/grin/src/seed.rs @@ -19,10 +19,8 @@ use rand::{thread_rng, Rng}; use std::cmp::min; use std::net::SocketAddr; -use std::ops::Deref; use std::str::{self, FromStr}; use std::sync::Arc; -use std::thread; use std::time; use cpupool; @@ -93,7 +91,12 @@ impl Seeder { for p in disconnected { if p.is_banned() { debug!("Marking peer {} as banned.", p.info.addr); - peer_store.update_state(p.info.addr, p2p::State::Banned); + let update_result = peer_store.update_state( + p.info.addr, p2p::State::Banned); + match update_result { + Ok(()) => {} + Err(_) => {} + } } } @@ -240,11 +243,19 @@ fn connect_and_req(capab: p2p::Capabilities, .then(move |p| { match p { Ok(Some(p)) => { - p.send_peer_request(capab); + let peer_result = p.send_peer_request(capab); + match peer_result { + Ok(()) => {} + Err(_) => {} + } } Err(e) => { error!("Peer request error: {:?}", e); - peer_store.update_state(addr, p2p::State::Defunct); + let update_result = peer_store.update_state(addr, p2p::State::Defunct); + match update_result { + Ok(()) => {} + Err(_) => {} + } } _ => {} } diff --git a/grin/src/server.rs b/grin/src/server.rs index e8f32ae31..6ac1201c6 100644 --- a/grin/src/server.rs +++ b/grin/src/server.rs @@ -17,36 +17,31 @@ //! as a facade. use std::net::SocketAddr; -use std::sync::{Arc, Mutex, RwLock}; +use std::sync::{Arc, RwLock}; use std::thread; use std::time; -use futures::{future, Future, Stream}; +use futures::{Future, Stream}; use tokio_core::reactor; use tokio_timer::Timer; use adapters::*; use api; use chain; -use chain::ChainStore; -use core::{self, consensus}; -use core::core::hash::Hashed; -use core::pow::cuckoo; -use core::pow::MiningWorker; use miner; use p2p; use pool; use seed; -use store; use sync; use types::*; -use plugin::PluginMiner; use core::global; /// Grin server holding internal structures. pub struct Server { + /// server config pub config: ServerConfig, + /// event handle evt_handle: reactor::Handle, /// handle to our network server p2p: Arc, @@ -137,6 +132,7 @@ impl Server { Ok(()) } + /// Number of peers pub fn peer_count(&self) -> u32 { self.p2p.peer_count() } @@ -152,10 +148,11 @@ impl Server { miner.set_debug_output_id(format!("Port {}",self.config.p2p_config.unwrap().port)); let server_config = self.config.clone(); thread::spawn(move || { - miner.run_loop(config.clone(), server_config, cuckoo_size as u32, proof_size); + miner.run_loop(config.clone(), server_config, cuckoo_size as u32, proof_size); }); } + /// The chain head pub fn head(&self) -> chain::Tip { self.chain.head().unwrap() } diff --git a/grin/src/sync.rs b/grin/src/sync.rs index 39a19824f..f59e53341 100644 --- a/grin/src/sync.rs +++ b/grin/src/sync.rs @@ -20,7 +20,6 @@ /// How many block bodies to download in parallel const MAX_BODY_DOWNLOADS: usize = 8; -use std::ops::Deref; use std::sync::{Arc, Mutex}; use std::thread; use std::time::{Instant, Duration}; @@ -152,7 +151,11 @@ impl Syncer { while blocks_to_download.len() > 0 && blocks_downloading.len() < MAX_BODY_DOWNLOADS { let h = blocks_to_download.pop().unwrap(); let peer = self.p2p.random_peer().unwrap(); - peer.send_block_request(h); + let send_result = peer.send_block_request(h); + match send_result { + Ok(_) => {} + Err(_) => {} + } blocks_downloading.push((h, Instant::now())); } debug!("Requesting more full block hashes to download, total: {}.", @@ -199,7 +202,7 @@ impl Syncer { } // ask for more headers if we got as many as required if hs_len == (p2p::MAX_BLOCK_HEADERS as usize) { - self.request_headers(); + self.request_headers().unwrap(); } } diff --git a/grin/src/types.rs b/grin/src/types.rs index 8c1862542..3f5316b76 100644 --- a/grin/src/types.rs +++ b/grin/src/types.rs @@ -84,7 +84,7 @@ pub struct ServerConfig { /// Method used to get the list of seed nodes for initial bootstrap. pub seeding_type: Seeding, - + /// The list of seed nodes, if using Seeding as a seed type pub seeds: Option>, @@ -173,6 +173,8 @@ impl Default for MinerConfig { #[derive(Clone)] pub struct ServerStats { + /// Number of peers pub peer_count:u32, + /// Chain head pub head: chain::Tip, } diff --git a/grin/tests/framework.rs b/grin/tests/framework.rs index 0ecf324f6..948b02bf9 100644 --- a/grin/tests/framework.rs +++ b/grin/tests/framework.rs @@ -30,23 +30,15 @@ extern crate futures_cpupool; use std::thread; use std::time; use std::default::Default; -use std::mem; use std::fs; -use std::sync::{Arc, Mutex, RwLock}; -use std::ops::Deref; +use std::sync::{Arc, Mutex}; -use futures::{Future}; -use futures::future::join_all; -use futures::task::park; use tokio_core::reactor; -use tokio_core::reactor::Remote; -use tokio_core::reactor::Handle; use tokio_timer::Timer; use secp::Secp256k1; use wallet::WalletConfig; -use core::consensus; /// Just removes all results from previous runs @@ -61,18 +53,19 @@ pub fn clean_all_output(test_name_dir:&str){ /// Errors that can be returned by LocalServerContainer #[derive(Debug)] +#[allow(dead_code)] pub enum Error { Internal(String), Argument(String), - NotFound, + NotFound, } /// All-in-one server configuration struct, for convenience -/// +/// #[derive(Clone)] pub struct LocalServerContainerConfig { - + //user friendly name for the server, also denotes what dir //the data files will appear in pub name: String, @@ -81,8 +74,8 @@ pub struct LocalServerContainerConfig { pub base_addr: String, //Port the server (p2p) is running on - pub p2p_server_port: u16, - + pub p2p_server_port: u16, + //Port the API server is running on pub api_server_port: u16, @@ -113,7 +106,7 @@ pub struct LocalServerContainerConfig { pub coinbase_wallet_address: String, //When running a wallet, the address to check inputs and send - //finalised transactions to, + //finalised transactions to, pub wallet_validating_node_url:String, @@ -168,7 +161,7 @@ pub struct LocalServerContainer { //the list of peers to connect to pub peer_list: Vec, - + //base directory for the server instance working_dir: String, @@ -193,8 +186,7 @@ impl LocalServerContainer { })) } - pub fn run_server(&mut self, - duration_in_seconds: u64) -> grin::ServerStats + pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats { let mut event_loop = reactor::Core::new().unwrap(); @@ -208,7 +200,7 @@ impl LocalServerContainer { seeds=vec![self.config.seed_addr.to_string()]; } - + let s = grin::Server::future( grin::ServerConfig{ api_http_addr: api_addr, @@ -227,7 +219,7 @@ impl LocalServerContainer { thread::sleep(time::Duration::from_millis(1000)); } - let mut miner_config = grin::MinerConfig { + let miner_config = grin::MinerConfig { enable_mining: self.config.start_miner, burn_reward: self.config.burn_mining_rewards, use_cuckoo_miner: false, @@ -238,7 +230,7 @@ impl LocalServerContainer { slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()), ..Default::default() }; - + if self.config.start_miner == true { println!("starting Miner on port {}", self.config.p2p_server_port); s.start_miner(miner_config); @@ -251,7 +243,7 @@ impl LocalServerContainer { let timeout = Timer::default().sleep(time::Duration::from_secs(duration_in_seconds)); - event_loop.run(timeout); + event_loop.run(timeout).unwrap(); if self.wallet_is_running{ self.stop_wallet(); @@ -260,15 +252,15 @@ impl LocalServerContainer { s.get_server_stats().unwrap() } - + /// Starts a wallet daemon to receive and returns the /// listening server url - - pub fn run_wallet(&mut self, duration_in_seconds: u64) { + + pub fn run_wallet(&mut self, _duration_in_seconds: u64) { //URL on which to start the wallet listener (i.e. api server) let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port); - + //Just use the name of the server for a seed for now let seed = format!("{}", self.config.name); @@ -277,18 +269,18 @@ impl LocalServerContainer { let s = Secp256k1::new(); let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes()) .expect("Error deriving extended key from seed."); - + println!("Starting the Grin wallet receiving daemon on {} ", self.config.wallet_port ); let mut wallet_config = WalletConfig::default(); - + wallet_config.api_http_addr = format!("http://{}", url); wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone(); - wallet_config.data_file_dir=self.working_dir.clone(); + wallet_config.data_file_dir=self.working_dir.clone(); let mut api_server = api::ApiServer::new("/v1".to_string()); - - api_server.register_endpoint("/receive".to_string(), wallet::WalletReceiver { + + api_server.register_endpoint("/receive".to_string(), wallet::WalletReceiver { key: key, config: wallet_config, }); @@ -301,9 +293,9 @@ impl LocalServerContainer { self.wallet_is_running = true; } - + /// Stops the running wallet server - + pub fn stop_wallet(&mut self){ let mut api_server = self.api_server.as_mut().unwrap(); api_server.stop(); @@ -314,7 +306,7 @@ impl LocalServerContainer { pub fn add_peer(&mut self, addr:String){ self.peer_list.push(addr); } - + } /// Configuration values for container pool @@ -428,16 +420,16 @@ impl LocalServerContainerPool { self.is_seeding=true; } - let server_address = format!("{}:{}", - server_config.base_addr, - server_config.p2p_server_port); + let _server_address = format!("{}:{}", + server_config.base_addr, + server_config.p2p_server_port); - let mut server_container = LocalServerContainer::new(server_config.clone()).unwrap(); + let server_container = LocalServerContainer::new(server_config.clone()).unwrap(); //self.server_containers.push(server_arc); //Create a future that runs the server for however many seconds //collect them all and run them in the run_all_servers - let run_time = self.config.run_length_in_seconds; + let _run_time = self.config.run_length_in_seconds; self.server_containers.push(server_container); @@ -447,9 +439,9 @@ impl LocalServerContainerPool { /// adds n servers, ready to run /// /// - + #[allow(dead_code)] pub fn create_servers(&mut self, number: u16){ - for n in 0..number { + for _ in 0..number { //self.create_server(); } } @@ -489,7 +481,7 @@ impl LocalServerContainerPool { for handle in handles { match handle.join() { - Ok(v) => {} + Ok(_) => {} Err(e) => { println!("Error starting server thread: {:?}", e); panic!(e); @@ -522,5 +514,4 @@ impl LocalServerContainerPool { } } } - } diff --git a/grin/tests/simulnet.rs b/grin/tests/simulnet.rs index d1c9f2a67..edaedadbd 100644 --- a/grin/tests/simulnet.rs +++ b/grin/tests/simulnet.rs @@ -25,9 +25,6 @@ extern crate futures; extern crate tokio_core; extern crate tokio_timer; -use std::sync::{Arc, Mutex, RwLock}; -use std::fs; - mod framework; use std::thread; @@ -35,7 +32,7 @@ use std::time; use std::default::Default; use futures::{Future, Poll, Async}; -use futures::task::park; +use futures::task::current; use tokio_core::reactor; use tokio_timer::Timer; @@ -51,7 +48,7 @@ use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerCon /// Block and mining into a wallet for a bit #[test] fn basic_genesis_mine() { - env_logger::init(); + let _ = env_logger::init(); global::set_mining_mode(MiningParameterMode::AutomatedTesting); let test_name_dir = "genesis_mine"; @@ -82,7 +79,7 @@ fn basic_genesis_mine() { /// messages they all end up connected. #[test] fn simulate_seeding() { - env_logger::init(); + let _ = env_logger::init(); global::set_mining_mode(MiningParameterMode::AutomatedTesting); let test_name_dir = "simulate_seeding"; @@ -116,13 +113,13 @@ fn simulate_seeding() { server_config.p2p_server_port )); - for i in 0..4 { - pool.create_server(&mut server_config); - } + for _ in 0..4 { + pool.create_server(&mut server_config); + } pool.connect_all_peers(); - let result_vec = pool.run_all_servers(); + let _ = pool.run_all_servers(); } /// Create 1 server, start it mining, then connect 4 other peers mining and @@ -136,8 +133,9 @@ fn simulate_seeding() { // being, // As it's more for actively testing and hurts CI a lot //#[test] +#[allow(dead_code)] fn simulate_parallel_mining() { - env_logger::init(); + let _ = env_logger::init(); global::set_mining_mode(MiningParameterMode::AutomatedTesting); let test_name_dir = "simulate_parallel_mining"; @@ -179,7 +177,7 @@ fn simulate_parallel_mining() { pool.connect_all_peers(); - let result_vec = pool.run_all_servers(); + let _ = pool.run_all_servers(); // Check mining difficulty here?, though I'd think it's more valuable // to simply output it. Can at least see the evolution of the difficulty target @@ -335,7 +333,7 @@ impl<'a> Future for HeadChange<'a> { Ok(Async::Ready(new_head)) } else { // egregious polling, asking the task to schedule us every iteration - park().unpark(); + current().notify(); Ok(Async::NotReady) } } diff --git a/p2p/src/conn.rs b/p2p/src/conn.rs index e7523d4f2..cc69cb41e 100644 --- a/p2p/src/conn.rs +++ b/p2p/src/conn.rs @@ -24,12 +24,12 @@ use futures; use futures::{Stream, Future}; use futures::stream; use futures::sync::mpsc::{Sender, UnboundedSender, UnboundedReceiver}; -use tokio_core::io::{WriteHalf, ReadHalf, write_all, read_exact}; use tokio_core::net::TcpStream; +use tokio_io::{AsyncRead, AsyncWrite}; +use tokio_io::io::{read_exact, write_all}; use tokio_timer::{Timer, TimerError}; -use tokio_io::*; -use core::core::hash::{Hash, ZERO_HASH}; +use core::core::hash::Hash; use core::ser; use msg::*; use types::Error; @@ -65,6 +65,7 @@ impl Handler for F /// A higher level connection wrapping the TcpStream. Maintains the amount of /// data transmitted and deals with the low-level task of sending and /// receiving data, parsing message headers and timeouts. +#[allow(dead_code)] pub struct Connection { // Channel to push bytes to the remote peer outbound_chan: UnboundedSender>, @@ -150,7 +151,7 @@ impl Connection { }) // write the data and make sure the future returns the right types .fold(writer, |writer, data| { - write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, buf)| writer) + write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer) }); Box::new(send_data) } @@ -287,7 +288,7 @@ impl TimeoutConnection { underlying: conn, expected_responses: expects, }; - (me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, e2)| e1))) + (me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1))) } /// Sends a request and registers a timer on the provided message type and @@ -298,7 +299,7 @@ impl TimeoutConnection { body: &W, expect_h: Option<(Hash)>) -> Result<(), Error> { - let sent = try!(self.underlying.send_msg(t, body)); + let _sent = try!(self.underlying.send_msg(t, body)); let mut expects = self.expected_responses.lock().unwrap(); expects.push((rt, expect_h, Instant::now())); diff --git a/p2p/src/lib.rs b/p2p/src/lib.rs index e42bf2f91..c61aaf0e1 100644 --- a/p2p/src/lib.rs +++ b/p2p/src/lib.rs @@ -31,7 +31,6 @@ extern crate grin_util as util; #[macro_use] extern crate log; extern crate futures; -#[macro_use] extern crate tokio_core; extern crate tokio_io; extern crate bytes; diff --git a/p2p/src/msg.rs b/p2p/src/msg.rs index b053fe41d..9ae6a5cb9 100644 --- a/p2p/src/msg.rs +++ b/p2p/src/msg.rs @@ -19,7 +19,7 @@ use num::FromPrimitive; use futures::future::{Future, ok}; use tokio_core::net::TcpStream; -use tokio_core::io::{write_all, read_exact}; +use tokio_io::io::{read_exact, write_all}; use core::consensus::MAX_MSG_LEN; use core::core::BlockHeader; @@ -42,6 +42,7 @@ const MAGIC: [u8; 2] = [0x1e, 0xc5]; pub const HEADER_LEN: u64 = 11; /// Codes for each error that can be produced reading a message. +#[allow(dead_code)] pub enum ErrCodes { UnsupportedVersion = 100, } @@ -105,12 +106,12 @@ pub fn write_msg(conn: TcpStream, let write_msg = ok((conn)).and_then(move |conn| { // prepare the body first so we know its serialized length let mut body_buf = vec![]; - ser::serialize(&mut body_buf, &msg); + ser::serialize(&mut body_buf, &msg).unwrap(); // build and serialize the header using the body size let mut header_buf = vec![]; let blen = body_buf.len() as u64; - ser::serialize(&mut header_buf, &MsgHeader::new(msg_type, blen)); + ser::serialize(&mut header_buf, &MsgHeader::new(msg_type, blen)).unwrap(); // send the whole thing write_all(conn, header_buf) @@ -202,9 +203,9 @@ impl Writeable for Hand { [write_u32, self.version], [write_u32, self.capabilities.bits()], [write_u64, self.nonce]); - self.total_difficulty.write(writer); - self.sender_addr.write(writer); - self.receiver_addr.write(writer); + self.total_difficulty.write(writer).unwrap(); + self.sender_addr.write(writer).unwrap(); + self.receiver_addr.write(writer).unwrap(); writer.write_bytes(&self.user_agent) } } @@ -250,8 +251,8 @@ impl Writeable for Shake { ser_multiwrite!(writer, [write_u32, self.version], [write_u32, self.capabilities.bits()]); - self.total_difficulty.write(writer); - writer.write_bytes(&self.user_agent); + self.total_difficulty.write(writer).unwrap(); + writer.write_bytes(&self.user_agent).unwrap(); Ok(()) } } @@ -302,7 +303,7 @@ impl Writeable for PeerAddrs { fn write(&self, writer: &mut W) -> Result<(), ser::Error> { try!(writer.write_u32(self.peers.len() as u32)); for p in &self.peers { - p.write(writer); + p.write(writer).unwrap(); } Ok(()) } @@ -464,13 +465,13 @@ impl Readable for Headers { pub struct Empty {} impl Writeable for Empty { - fn write(&self, writer: &mut W) -> Result<(), ser::Error> { + fn write(&self, _: &mut W) -> Result<(), ser::Error> { Ok(()) } } impl Readable for Empty { - fn read(reader: &mut Reader) -> Result { + fn read(_: &mut Reader) -> Result { Ok(Empty {}) } } diff --git a/p2p/src/peer.rs b/p2p/src/peer.rs index dce0a0ff5..b0395da15 100644 --- a/p2p/src/peer.rs +++ b/p2p/src/peer.rs @@ -91,7 +91,7 @@ impl Peer { // handle disconnection, standard disconnections aren't considered an error let mut state = state.write().unwrap(); match res { - Ok(res) => { + Ok(_) => { *state = State::Disconnected; info!("Client {} disconnected.", addr); Ok(()) diff --git a/p2p/src/protocol.rs b/p2p/src/protocol.rs index d7aebcc3f..9cca056e9 100644 --- a/p2p/src/protocol.rs +++ b/p2p/src/protocol.rs @@ -14,9 +14,7 @@ use std::sync::{Mutex, Arc}; -use futures; use futures::Future; -use futures::stream; use futures::sync::mpsc::UnboundedSender; use tokio_core::net::TcpStream; @@ -28,6 +26,7 @@ use msg::*; use types::*; use util::OneTime; +#[allow(dead_code)] pub struct ProtocolV1 { conn: OneTime, @@ -128,7 +127,7 @@ fn handle_payload(adapter: &NetAdapter, match header.msg_type { Type::Ping => { let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?; - sender.send(data); + sender.send(data).unwrap(); Ok(None) } Type::Pong => Ok(None), @@ -148,7 +147,7 @@ fn handle_payload(adapter: &NetAdapter, try!(ser::serialize(&mut data, &MsgHeader::new(Type::Block, body_data.len() as u64))); data.append(&mut body_data); - sender.send(data); + sender.send(data).unwrap(); } Ok(None) } @@ -170,7 +169,7 @@ fn handle_payload(adapter: &NetAdapter, try!(ser::serialize(&mut data, &MsgHeader::new(Type::Headers, body_data.len() as u64))); data.append(&mut body_data); - sender.send(data); + sender.send(data).unwrap(); Ok(None) } @@ -193,7 +192,7 @@ fn handle_payload(adapter: &NetAdapter, try!(ser::serialize(&mut data, &MsgHeader::new(Type::PeerAddrs, body_data.len() as u64))); data.append(&mut body_data); - sender.send(data); + sender.send(data).unwrap(); Ok(None) } diff --git a/p2p/src/rate_limit.rs b/p2p/src/rate_limit.rs index 95854351e..7e86d1d1c 100644 --- a/p2p/src/rate_limit.rs +++ b/p2p/src/rate_limit.rs @@ -14,12 +14,11 @@ //! Provides wrappers for throttling readers and writers -use std::time::{Instant, Duration}; +use std::time::Instant; use std::io; use futures::*; use tokio_io::*; -use bytes::{Buf, BytesMut, BufMut}; /// A Rate Limited Reader #[derive(Debug)] @@ -32,6 +31,7 @@ pub struct ThrottledReader { last_check: Instant, } +#[allow(dead_code)] impl ThrottledReader { /// Adds throttling to a reader. /// The resulting reader will read at most `max` amount of bytes per second @@ -105,6 +105,7 @@ pub struct ThrottledWriter { last_check: Instant, } +#[allow(dead_code)] impl ThrottledWriter { /// Adds throttling to a writer. /// The resulting writer will write at most `max` amount of bytes per second @@ -188,7 +189,7 @@ mod test { for _ in 0..16 { let _ = t_buf.write_buf(&mut Cursor::new(vec![1; 8])); } - + let cursor = t_buf.into_inner(); assert_eq!(cursor.position(), 8); } @@ -203,7 +204,7 @@ mod test { for _ in 0..16 { let _ = t_buf.read_buf(&mut dst); } - + assert_eq!(dst.position(), 8); } -} \ No newline at end of file +} diff --git a/p2p/src/server.rs b/p2p/src/server.rs index 7ebca4d96..af23bc004 100644 --- a/p2p/src/server.rs +++ b/p2p/src/server.rs @@ -41,20 +41,20 @@ impl NetAdapter for DummyAdapter { fn total_difficulty(&self) -> Difficulty { Difficulty::one() } - fn transaction_received(&self, tx: core::Transaction) {} - fn block_received(&self, b: core::Block) {} - fn headers_received(&self, bh: Vec) {} - fn locate_headers(&self, locator: Vec) -> Vec { + fn transaction_received(&self, _: core::Transaction) {} + fn block_received(&self, _: core::Block) {} + fn headers_received(&self, _: Vec) {} + fn locate_headers(&self, _: Vec) -> Vec { vec![] } - fn get_block(&self, h: Hash) -> Option { + fn get_block(&self, _: Hash) -> Option { None } - fn find_peer_addrs(&self, capab: Capabilities) -> Vec { + fn find_peer_addrs(&self, _: Capabilities) -> Vec { vec![] } - fn peer_addrs_received(&self, peer_addrs: Vec) {} - fn peer_connected(&self, pi: &PeerInfo) {} + fn peer_addrs_received(&self, _: Vec) {} + fn peer_connected(&self, _: &PeerInfo) {} } /// P2P server implementation, handling bootstrapping to find and connect to @@ -97,7 +97,7 @@ impl Server { // main peer acceptance future handling handshake let hp = h.clone(); - let peers = socket.incoming().map_err(From::from).map(move |(conn, addr)| { + let peers = socket.incoming().map_err(From::from).map(move |(conn, _)| { let adapter = adapter.clone(); let total_diff = adapter.total_difficulty(); let peers = peers.clone(); @@ -275,7 +275,7 @@ impl Server { for p in peers.deref() { p.stop(); } - self.stop.into_inner().unwrap().complete(()); + self.stop.into_inner().unwrap().send(()).unwrap(); } } diff --git a/p2p/src/types.rs b/p2p/src/types.rs index ef2b80e48..dd5337e4f 100644 --- a/p2p/src/types.rs +++ b/p2p/src/types.rs @@ -33,6 +33,7 @@ pub const MAX_LOCATORS: u32 = 10; pub const MAX_BLOCK_HEADERS: u32 = 512; /// Maximum number of block bodies a peer should ever ask for and send +#[allow(dead_code)] pub const MAX_BLOCK_BODIES: u32 = 16; /// Maximum number of peer addresses a peer should ever send @@ -57,7 +58,7 @@ impl From for Error { } } impl From for Error { - fn from(e: TimerError) -> Error { + fn from(_: TimerError) -> Error { Error::Timeout } } diff --git a/p2p/tests/peer_handshake.rs b/p2p/tests/peer_handshake.rs index 2d945481e..29718915a 100644 --- a/p2p/tests/peer_handshake.rs +++ b/p2p/tests/peer_handshake.rs @@ -26,7 +26,6 @@ use futures::future::Future; use tokio_core::net::TcpStream; use tokio_core::reactor::{self, Core}; -use core::ser; use core::core::target::Difficulty; use p2p::Peer; diff --git a/pool/src/blockchain.rs b/pool/src/blockchain.rs index 499848d43..a64a55364 100644 --- a/pool/src/blockchain.rs +++ b/pool/src/blockchain.rs @@ -1,7 +1,7 @@ // This file is (hopefully) temporary. // // It contains a trait based on (but not exactly equal to) the trait defined -// for the blockchain UTXO set, discussed at +// for the blockchain UTXO set, discussed at // https://github.com/ignopeverell/grin/issues/29, and a dummy implementation // of said trait. // Notably, UtxoDiff has been left off, and the question of how to handle @@ -20,11 +20,12 @@ use std::sync::RwLock; use types::BlockChain; -/// A DummyUtxoSet for mocking up the chain +/// A DummyUtxoSet for mocking up the chain pub struct DummyUtxoSet { outputs : HashMap } +#[allow(dead_code)] impl DummyUtxoSet { pub fn empty() -> DummyUtxoSet{ DummyUtxoSet{outputs: HashMap::new()} @@ -50,7 +51,7 @@ impl DummyUtxoSet { self.outputs.insert(output.commitment(), output.clone()); } } - pub fn rewind(&self, b: &block::Block) -> DummyUtxoSet { + pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet { DummyUtxoSet{outputs: HashMap::new()} } pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> { @@ -75,10 +76,12 @@ impl DummyUtxoSet { /// A DummyChain is the mocked chain for playing with what methods we would /// need +#[allow(dead_code)] pub struct DummyChainImpl { utxo: RwLock } +#[allow(dead_code)] impl DummyChainImpl { pub fn new() -> DummyChainImpl { DummyChainImpl{ diff --git a/pool/src/graph.rs b/pool/src/graph.rs index 019a2fc47..9bd1a3836 100644 --- a/pool/src/graph.rs +++ b/pool/src/graph.rs @@ -15,15 +15,9 @@ //! Base types for the transaction pool's Directed Acyclic Graphs use std::vec::Vec; -use std::sync::Arc; -use std::sync::RwLock; -use std::sync::Weak; -use std::cell::RefCell; use std::collections::HashMap; use secp::pedersen::Commitment; -use secp::{Secp256k1, ContextFlag}; -use secp::key; use time; use rand; @@ -36,24 +30,28 @@ use core::core; /// These are the vertices of both of the graph structures pub struct PoolEntry { // Core data - // Unique identifier of this pool entry and the corresponding transaction + /// Unique identifier of this pool entry and the corresponding transaction pub transaction_hash: core::hash::Hash, - // Metadata - size_estimate: u64, + // Metadata + /// Size estimate + pub size_estimate: u64, + /// Receive timestamp pub receive_ts: time::Tm, } impl PoolEntry { + /// Create new transaction pool entry pub fn new(tx: &core::transaction::Transaction) -> PoolEntry { PoolEntry{ transaction_hash: transaction_identifier(tx), size_estimate : estimate_transaction_size(tx), - receive_ts: time::now()} + receive_ts: time::now()} } } -fn estimate_transaction_size(tx: &core::transaction::Transaction) -> u64 { +/// TODO guessing this needs implementing +fn estimate_transaction_size(_tx: &core::transaction::Transaction) -> u64 { 0 } @@ -72,24 +70,32 @@ pub struct Edge { } impl Edge{ + /// Create new edge pub fn new(source: Option, destination: Option, output: Commitment) -> Edge { Edge{source: source, destination: destination, output: output} } + /// Create new edge with a source pub fn with_source(&self, src: Option) -> Edge { Edge{source: src, destination: self.destination, output: self.output} } + /// Create new edge with destination pub fn with_destination(&self, dst: Option) -> Edge { Edge{source: self.source, destination: dst, output: self.output} } + /// The output commitment of the edge pub fn output_commitment(&self) -> Commitment { self.output } + + /// The destination hash of the edge pub fn destination_hash(&self) -> Option { self.destination } + + /// The source hash of the edge pub fn source_hash(&self) -> Option { self.source } @@ -108,13 +114,14 @@ pub struct DirectedGraph { edges: HashMap, vertices: Vec, - // A small optimization: keeping roots (vertices with in-degree 0) in a + // A small optimization: keeping roots (vertices with in-degree 0) in a // separate list makes topological sort a bit faster. (This is true for // Kahn's, not sure about other implementations) roots: Vec, } impl DirectedGraph { + /// Create an empty directed graph pub fn empty() -> DirectedGraph { DirectedGraph{ edges: HashMap::new(), @@ -123,14 +130,17 @@ impl DirectedGraph { } } + /// Get an edge by its commitment pub fn get_edge_by_commitment(&self, output_commitment: &Commitment) -> Option<&Edge> { self.edges.get(output_commitment) } + /// Remove an edge by its commitment pub fn remove_edge_by_commitment(&mut self, output_commitment: &Commitment) -> Option { self.edges.remove(output_commitment) } + /// Remove a vertex by its hash pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option { match self.roots.iter().position(|x| x.transaction_hash == tx_hash) { Some(i) => Some(self.roots.swap_remove(i)), @@ -163,8 +173,8 @@ impl DirectedGraph { } } - // add_vertex_only adds a vertex, meant to be complemented by add_edge_only - // in cases where delivering a vector of edges is not feasible or efficient + /// add_vertex_only adds a vertex, meant to be complemented by add_edge_only + /// in cases where delivering a vector of edges is not feasible or efficient pub fn add_vertex_only(&mut self, vertex: PoolEntry, is_root: bool) { if is_root { self.roots.push(vertex); @@ -173,6 +183,7 @@ impl DirectedGraph { } } + /// add_edge_only adds an edge pub fn add_edge_only(&mut self, edge: Edge) { self.edges.insert(edge.output_commitment(), edge); } @@ -181,7 +192,7 @@ impl DirectedGraph { pub fn len_vertices(&self) -> usize { self.vertices.len() + self.roots.len() } - + /// Number of root vertices only pub fn len_roots(&self) -> usize { self.roots.len() @@ -209,6 +220,8 @@ pub fn transaction_identifier(tx: &core::transaction::Transaction) -> core::hash #[cfg(test)] mod tests { use super::*; + use secp::{Secp256k1, ContextFlag}; + use secp::key; #[test] fn test_add_entry() { @@ -243,7 +256,7 @@ mod tests { } /// For testing/debugging: a random tx hash -fn random_hash() -> core::hash::Hash { +pub fn random_hash() -> core::hash::Hash { let hash_bytes: [u8;32]= rand::random(); core::hash::Hash(hash_bytes) } diff --git a/pool/src/lib.rs b/pool/src/lib.rs index e3478f9ee..406626b47 100644 --- a/pool/src/lib.rs +++ b/pool/src/lib.rs @@ -28,7 +28,6 @@ mod pool; extern crate time; extern crate rand; -#[macro_use] extern crate log; extern crate grin_core as core; diff --git a/pool/src/pool.rs b/pool/src/pool.rs index b46891f7f..ebf588be6 100644 --- a/pool/src/pool.rs +++ b/pool/src/pool.rs @@ -20,22 +20,22 @@ pub use graph; use core::core::transaction; use core::core::block; use core::core::hash; -// Temporary blockchain dummy impls -use blockchain::{DummyChain, DummyChainImpl, DummyUtxoSet}; use secp; use secp::pedersen::Commitment; -use std::sync::{Arc, RwLock, Weak}; +use std::sync::Arc; use std::collections::HashMap; /// The pool itself. /// The transactions HashMap holds ownership of all transactions in the pool, /// keyed by their transaction hash. pub struct TransactionPool { + /// All transactions in the pool pub transactions: HashMap>, - + /// The pool itself pub pool : Pool, + /// Orphans in the pool pub orphans: Orphans, // blockchain is a DummyChain, for now, which mimics what the future @@ -44,6 +44,7 @@ pub struct TransactionPool { } impl TransactionPool where T: BlockChain { + /// Create a new transaction pool pub fn new(chain: Arc) -> TransactionPool { TransactionPool{ transactions: HashMap::new(), @@ -53,15 +54,15 @@ impl TransactionPool where T: BlockChain { } } - /// Searches for an output, designated by its commitment, from the current + /// Searches for an output, designated by its commitment, from the current /// best UTXO view, presented by taking the best blockchain UTXO set (as /// determined by the blockchain component) and rectifying pool spent and /// unspents. - /// Detects double spends and unknown references from the pool and + /// Detects double spends and unknown references from the pool and /// blockchain only; any conflicts with entries in the orphans set must /// be accounted for separately, if relevant. pub fn search_for_best_output(&self, output_commitment: &Commitment) -> Parent { - // The current best unspent set is: + // The current best unspent set is: // Pool unspent + (blockchain unspent - pool->blockchain spent) // Pool unspents are unconditional so we check those first self.pool.get_available_output(output_commitment). @@ -76,7 +77,7 @@ impl TransactionPool where T: BlockChain { // output designated by output_commitment. fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option { self.blockchain.get_unspent(output_commitment). - map(|o| match self.pool.get_blockchain_spent(output_commitment) { + map(|_| match self.pool.get_blockchain_spent(output_commitment) { Some(x) => Parent::AlreadySpent{other_tx: x.destination_hash().unwrap()}, None => Parent::BlockTransaction, }) @@ -96,10 +97,12 @@ impl TransactionPool where T: BlockChain { self.pool.num_transactions() } + /// Get the number of orphans in the pool pub fn orphans_size(&self) -> usize { self.orphans.num_transactions() } + /// Get the total size (transactions + orphans) of the pool pub fn total_size(&self) -> usize { self.pool.num_transactions() + self.orphans.num_transactions() } @@ -110,15 +113,15 @@ impl TransactionPool where T: BlockChain { /// if necessary, and performing any connection-related validity checks. /// Happens under an exclusive mutable reference gated by the write portion /// of a RWLock. - pub fn add_to_memory_pool(&mut self, source: TxSource, tx: transaction::Transaction) -> Result<(), PoolError> { + pub fn add_to_memory_pool(&mut self, _: TxSource, tx: transaction::Transaction) -> Result<(), PoolError> { // Making sure the transaction is valid before anything else. let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); tx.validate(&secp).map_err(|_| PoolError::Invalid)?; - // The first check invovles ensuring that an identical transaction is + // The first check invovles ensuring that an identical transaction is // not already in the pool's transaction set. - // A non-authoritative similar check should be performed under the - // pool's read lock before we get to this point, which would catch the + // A non-authoritative similar check should be performed under the + // pool's read lock before we get to this point, which would catch the // majority of duplicate cases. The race condition is caught here. // TODO: When the transaction identifier is finalized, the assumptions // here may change depending on the exact coverage of the identifier. @@ -156,7 +159,7 @@ impl TransactionPool where T: BlockChain { // Next we examine the outputs this transaction creates and ensure // that they do not already exist. - // I believe its worth preventing duplicate outputs from being + // I believe its worth preventing duplicate outputs from being // accepted, even though it is possible for them to be mined // with strict ordering. In the future, if desirable, this could // be node policy config or more intelligent. @@ -182,8 +185,8 @@ impl TransactionPool where T: BlockChain { // output is unique. No further checks are necessary. self.pool.add_pool_transaction(pool_entry, blockchain_refs, pool_refs, new_unspents); - - self.reconcile_orphans(); + + self.reconcile_orphans().unwrap(); self.transactions.insert(tx_hash, Box::new(tx)); Ok(()) @@ -194,11 +197,11 @@ impl TransactionPool where T: BlockChain { // checking above. // First, any references resolved to the pool need to be compared // against active orphan pool_connections. - // Note that pool_connections here also does double duty to + // Note that pool_connections here also does double duty to // account for blockchain connections. for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) { match self.orphans.get_external_spent_output(&pool_ref.output_commitment()){ - // Should the below err be subtyped to orphans somehow? + // Should the below err be subtyped to orphans somehow? Some(x) => return Err(PoolError::DoubleSpend{other_tx: x.destination_hash().unwrap(), spent_output: x.output_commitment()}), None => {}, } @@ -223,8 +226,8 @@ impl TransactionPool where T: BlockChain { /// Check the output for a conflict with an existing output. /// /// Checks the output (by commitment) against outputs in the blockchain - /// or in the pool. If the transaction is destined for orphans, the - /// orphans set is checked as well. + /// or in the pool. If the transaction is destined for orphans, the + /// orphans set is checked as well. fn check_duplicate_outputs(&self, output : &transaction::Output, is_orphan: bool) -> Result<(), PoolError> { // Checking against current blockchain unspent outputs // We want outputs even if they're spent by pool txs, so we ignore @@ -249,7 +252,7 @@ impl TransactionPool where T: BlockChain { }; - // If the transaction might go into orphans, perform the same + // If the transaction might go into orphans, perform the same // checks as above but against the orphan set instead. if is_orphan { // Checking against orphan outputs @@ -295,7 +298,7 @@ impl TransactionPool where T: BlockChain { None => { // The reference does not resolve to anything. // Make sure this missing_output has not already - // been claimed, then add this entry to + // been claimed, then add this entry to // missing_refs match self.orphans.get_unknown_output(&orphan_commitment) { Some(x) => return Err(PoolError::DoubleSpend{ @@ -311,7 +314,7 @@ impl TransactionPool where T: BlockChain { Ok(missing_refs) } - /// The primary goal of the reconcile_orphans method is to eliminate any + /// The primary goal of the reconcile_orphans method is to eliminate any /// orphans who conflict with the recently accepted pool transaction. /// TODO: How do we handle fishing orphans out that look like they could /// be freed? Current thought is to do so under a different lock domain @@ -332,9 +335,9 @@ impl TransactionPool where T: BlockChain { /// /// Returns a list of transactions which have been evicted from the pool /// due to the recent block. Because transaction association information is - /// irreversibly lost in the blockchain, we must keep track of these + /// irreversibly lost in the blockchain, we must keep track of these /// evicted transactions elsewhere so that we can make a best effort at - /// returning them to the pool in the event of a reorg that invalidates + /// returning them to the pool in the event of a reorg that invalidates /// this block. pub fn reconcile_block(&mut self, block: &block::Block) -> Result>, PoolError> { // If this pool has been kept in sync correctly, serializing all @@ -350,7 +353,7 @@ impl TransactionPool where T: BlockChain { // consumes the same blockchain output. // If one exists, we mark the transaction and then examine its // children. Recursively, we mark each child until a child is - // fully satisfied by outputs in the updated utxo view (after + // fully satisfied by outputs in the updated utxo view (after // reconciliation of the block), or there are no more children. // // Additionally, to protect our invariant dictating no duplicate @@ -358,11 +361,11 @@ impl TransactionPool where T: BlockChain { // against outputs generated by the pool and the corresponding // transactions are also marked. // - // After marking concludes, sweeping begins. In order, the marked + // After marking concludes, sweeping begins. In order, the marked // transactions are removed, the vertexes corresponding to the // transactions are removed, all the marked transactions' outputs are // removed, and all remaining non-blockchain inputs are returned to the - // unspent_outputs set. + // unspent_outputs set. // // After the pool has been successfully processed, an orphans // reconciliation job is triggered. @@ -389,7 +392,7 @@ impl TransactionPool where T: BlockChain { } let freed_txs = self.sweep_transactions(marked_transactions); - self.reconcile_orphans(); + self.reconcile_orphans().unwrap(); Ok(freed_txs) } @@ -397,9 +400,9 @@ impl TransactionPool where T: BlockChain { /// The mark portion of our mark-and-sweep pool cleanup. /// /// The transaction designated by conflicting_tx is immediately marked. - /// Each output of this transaction is then examined; if a transaction in - /// the pool spends this output and the output is not replaced by an - /// identical output included in the updated UTXO set, the child is marked + /// Each output of this transaction is then examined; if a transaction in + /// the pool spends this output and the output is not replaced by an + /// identical output included in the updated UTXO set, the child is marked /// as well and the process continues recursively. /// /// Marked transactions are added to the mutable marked_txs HashMap which @@ -466,6 +469,8 @@ mod tests { use secp::{Secp256k1, ContextFlag, constants}; use secp::key; use core::core::build; + use blockchain::{DummyChain, DummyChainImpl, DummyUtxoSet}; + use std::sync::{Arc, RwLock}; macro_rules! expect_output_parent { ($pool:expr, $expected:pat, $( $output:expr ),+ ) => { @@ -478,7 +483,6 @@ mod tests { } } - #[test] /// A basic test; add a pair of transactions to the pool. fn test_basic_pool_add() { @@ -542,7 +546,8 @@ mod tests { } } - #[test] + + #[test] /// Testing various expected error conditions pub fn test_pool_add_error() { let mut dummy_chain = DummyChainImpl::new(); @@ -595,7 +600,7 @@ mod tests { Ok(_) => panic!("Expected error when adding double spend, got Ok"), Err(x) => { match x { - PoolError::DoubleSpend{other_tx, spent_output} => { + PoolError::DoubleSpend{other_tx: _, spent_output} => { if spent_output != test_output(6).commitment() { panic!("Unexpected parameter in DoubleSpend: {:?}", x); } @@ -647,7 +652,7 @@ mod tests { let pool = RwLock::new(test_setup(&chain_ref)); // Preparation: We will introduce a three root pool transactions. - // 1. A transaction that should be invalidated because it is exactly + // 1. A transaction that should be invalidated because it is exactly // contained in the block. // 2. A transaction that should be invalidated because the input is // consumed in the block, although it is not exactly consumed. @@ -657,7 +662,7 @@ mod tests { let valid_transaction = test_transaction(vec![30], vec![14,15]); // We will also introduce a few children: - // 4. A transaction that descends from transaction 1, that is in + // 4. A transaction that descends from transaction 1, that is in // turn exactly contained in the block. let block_child = test_transaction(vec![8], vec![4,3]); // 5. A transaction that descends from transaction 4, that is not @@ -681,9 +686,9 @@ mod tests { // transaction 9 let mixed_child = test_transaction(vec![11,13], vec![2]); - // Add transactions. + // Add transactions. // Note: There are some ordering constraints that must be followed here - // until orphans is 100% implemented. Once the orphans process has + // until orphans is 100% implemented. Once the orphans process has // stabilized, we can mix these up to exercise that path a bit. let mut txs_to_add = vec![block_transaction, conflict_transaction, valid_transaction, block_child, pool_child, conflict_child, @@ -755,7 +760,7 @@ mod tests { expect_output_parent!(read_pool, Parent::AlreadySpent{other_tx: _}, 15); - // We should have unspent pool references at 1, 13, 14 + // We should have unspent pool references at 1, 13, 14 expect_output_parent!(read_pool, Parent::PoolTransaction{tx_ref: _}, 1, 13, 14); @@ -765,9 +770,8 @@ mod tests { // Evicted transactions should have unknown outputs expect_output_parent!(read_pool, Parent::Unknown, 2, 11); } - - } + #[test] /// Test transaction selection and block building. fn test_block_building() { @@ -819,7 +823,7 @@ mod tests { txs = read_pool.prepare_mineable_transactions(3); assert_eq!(txs.len(), 3); // TODO: This is ugly, either make block::new take owned - // txs instead of mut refs, or change + // txs instead of mut refs, or change // prepare_mineable_transactions to return mut refs let block_txs: Vec = txs.drain(..).map(|x| *x).collect(); let tx_refs = block_txs.iter().collect(); @@ -840,7 +844,7 @@ mod tests { assert_eq!(write_pool.total_size(), 2); } - + } @@ -856,7 +860,7 @@ mod tests { /// Cobble together a test transaction for testing the transaction pool. /// /// Connectivity here is the most important element. - /// Every output is given a blinding key equal to its value, so that the + /// Every output is given a blinding key equal to its value, so that the /// entire commitment can be derived deterministically from just the value. /// /// Fees are the remainder between input and output values, so the numbers diff --git a/pool/src/types.rs b/pool/src/types.rs index a496c93ad..b62364384 100644 --- a/pool/src/types.rs +++ b/pool/src/types.rs @@ -16,10 +16,6 @@ //! and its top-level members. use std::vec::Vec; -use std::sync::Arc; -use std::sync::RwLock; -use std::sync::Weak; -use std::cell::RefCell; use std::collections::HashMap; use std::iter::Iterator; use std::fmt; @@ -28,19 +24,16 @@ use secp::pedersen::Commitment; pub use graph; -use time; - use core::core::transaction; -use core::core::block; use core::core::hash; /// Placeholder: the data representing where we heard about a tx from. /// -/// Used to make decisions based on transaction acceptance priority from +/// Used to make decisions based on transaction acceptance priority from /// various sources. For example, a node may want to bypass pool size /// restrictions when accepting a transaction from a local wallet. /// -/// Most likely this will evolve to contain some sort of network identifier, +/// Most likely this will evolve to contain some sort of network identifier, /// once we get a better sense of what transaction building might look like. pub struct TxSource { /// Human-readable name used for logging and errors. @@ -71,14 +64,31 @@ impl fmt::Debug for Parent { } } +// TODO document this enum more accurately +/// Enum of errors #[derive(Debug)] pub enum PoolError { + /// An invalid pool entry Invalid, + /// An entry already in the pool AlreadyInPool, - DuplicateOutput{other_tx: Option, in_chain: bool, - output: Commitment}, - DoubleSpend{other_tx: hash::Hash, spent_output: Commitment}, - // An orphan successfully added to the orphans set + /// A duplicate output + DuplicateOutput{ + /// The other transaction + other_tx: Option, + /// Is in chain? + in_chain: bool, + /// The output + output: Commitment + }, + /// A double spend + DoubleSpend{ + /// The other transaction + other_tx: hash::Hash, + /// The spent output + spent_output: Commitment + }, + /// An orphan successfully added to the orphans set OrphanTransaction, } @@ -95,9 +105,9 @@ pub trait BlockChain { /// the blockchain. /// Reservations of outputs by orphan transactions (not fully connected) are /// not respected. -/// Spending references (input -> output) exist in two structures: internal -/// graph references are contained in the pool edge sets, while references -/// sourced from the blockchain's UTXO set are contained in the +/// Spending references (input -> output) exist in two structures: internal +/// graph references are contained in the pool edge sets, while references +/// sourced from the blockchain's UTXO set are contained in the /// blockchain_connections set. /// Spent by references (output-> input) exist in two structures: pool-pool /// connections are in the pool edge set, while unspent (dangling) references @@ -105,12 +115,12 @@ pub trait BlockChain { pub struct Pool { graph : graph::DirectedGraph, - // available_outputs are unspent outputs of the current pool set, - // maintained as edges with empty destinations, keyed by the + // available_outputs are unspent outputs of the current pool set, + // maintained as edges with empty destinations, keyed by the // output's hash. available_outputs: HashMap, - // Consumed blockchain utxo's are kept in a separate map. + // Consumed blockchain utxo's are kept in a separate map. consumed_blockchain_outputs: HashMap } @@ -209,7 +219,7 @@ impl Pool { } } -impl TransactionGraphContainer for Pool { +impl TransactionGraphContainer for Pool { fn get_graph(&self) -> &graph::DirectedGraph { &self.graph } @@ -225,21 +235,21 @@ impl TransactionGraphContainer for Pool { } /// Orphans contains the elements of the transaction graph that have not been -/// connected in full to the blockchain. +/// connected in full to the blockchain. pub struct Orphans { graph : graph::DirectedGraph, - // available_outputs are unspent outputs of the current orphan set, + // available_outputs are unspent outputs of the current orphan set, // maintained as edges with empty destinations. available_outputs: HashMap, - // missing_outputs are spending references (inputs) with missing + // missing_outputs are spending references (inputs) with missing // corresponding outputs, maintained as edges with empty sources. missing_outputs: HashMap, // pool_connections are bidirectional edges which connect to the pool - // graph. They should map one-to-one to pool graph available_outputs. - // pool_connections should not be viewed authoritatively, they are + // graph. They should map one-to-one to pool graph available_outputs. + // pool_connections should not be viewed authoritatively, they are // merely informational until the transaction is officially connected to // the pool. pool_connections: HashMap, @@ -255,12 +265,12 @@ impl Orphans { } } - /// Checks for a double spent output, given the hash of the output, + /// Checks for a double spent output, given the hash of the output, /// ONLY in the data maintained by the orphans set. This includes links /// to the pool as well as links internal to orphan transactions. /// Returns the transaction hash corresponding to the conflicting /// transaction. - fn check_double_spend(&self, o: transaction::Output) -> Option { + pub fn check_double_spend(&self, o: transaction::Output) -> Option { self.graph.get_edge_by_commitment(&o.commitment()).or(self.pool_connections.get(&o.commitment())).map(|x| x.destination_hash().unwrap()) } @@ -340,14 +350,14 @@ impl TransactionGraphContainer for Orphans { /// consumed by another transaction in this graph, /// 3) [External] Unspent: An output produced by a transaction in this graph /// that is not yet spent. -/// +/// /// There is no concept of an external "spent by" reference (output produced by -/// a transaction in the graph spent by a transaction in another source), as +/// a transaction in the graph spent by a transaction in another source), as /// these references are expected to be maintained by descendent graph. Outputs -/// follow a heirarchy (Blockchain -> Pool -> Orphans) where each descendent -/// exists at a lower priority than their parent. An output consumed by a +/// follow a heirarchy (Blockchain -> Pool -> Orphans) where each descendent +/// exists at a lower priority than their parent. An output consumed by a /// child graph is marked as unspent in the parent graph and an external spent -/// in the child. This ensures that no descendent set must modify state in a +/// in the child. This ensures that no descendent set must modify state in a /// set of higher priority. pub trait TransactionGraphContainer { /// Accessor for graph object @@ -365,7 +375,7 @@ pub trait TransactionGraphContainer { self.get_available_output(c).is_some() } - /// Checks if the pool has anything by this output already, between + /// Checks if the pool has anything by this output already, between /// available outputs and internal ones. fn find_output(&self, c: &Commitment) -> Option { self.get_available_output(c). diff --git a/secp256k1zkp/src/pedersen.rs b/secp256k1zkp/src/pedersen.rs index c95a5e158..fb4c29928 100644 --- a/secp256k1zkp/src/pedersen.rs +++ b/secp256k1zkp/src/pedersen.rs @@ -143,6 +143,7 @@ impl AsRef<[u8]> for RangeProof { } impl RangeProof { + /// Create the zero range proof pub fn zero() -> RangeProof { RangeProof { proof: [0; constants::MAX_PROOF_SIZE], diff --git a/store/src/lib.rs b/store/src/lib.rs index 2df5e4bbd..819b62796 100644 --- a/store/src/lib.rs +++ b/store/src/lib.rs @@ -187,6 +187,8 @@ impl<'a> Batch<'a> { } } + /// Delete a single key from the batch. The write function + /// must be called to "commit" the batch to storage. pub fn delete(mut self, key: &[u8]) -> Result, Error> { self.batch.delete(key)?; Ok(self) diff --git a/wallet/src/checker.rs b/wallet/src/checker.rs index 0fe8c3e69..5bd29bb65 100644 --- a/wallet/src/checker.rs +++ b/wallet/src/checker.rs @@ -29,7 +29,7 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) { let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); // operate within a lock on wallet data - WalletData::with_wallet(&config.data_file_dir, |wallet_data| { + let _ = WalletData::with_wallet(&config.data_file_dir, |wallet_data| { // check each output that's not spent for out in &mut wallet_data.outputs { @@ -41,7 +41,7 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) { // TODO check the pool for unconfirmed let out_res = get_output_by_commitment(config, commitment); - + if out_res.is_ok() { // output is known, it's a new utxo out.status = OutputStatus::Unspent; diff --git a/wallet/src/receiver.rs b/wallet/src/receiver.rs index 6f66329c5..b9b95b1d6 100644 --- a/wallet/src/receiver.rs +++ b/wallet/src/receiver.rs @@ -50,13 +50,13 @@ //! So we may as well have it in place already. use std::convert::From; -use secp::{self, Secp256k1}; +use secp::{self}; use secp::key::SecretKey; use core::core::{Block, Transaction, TxKernel, Output, build}; use core::ser; use api::{self, ApiEndpoint, Operation, ApiResult}; -use extkey::{self, ExtendedKey}; +use extkey::ExtendedKey; use types::*; use util; @@ -70,13 +70,13 @@ struct TxWrapper { /// transaction, adding our receiving output, to broadcast to the rest of the /// network. pub fn receive_json_tx(config: &WalletConfig, ext_key: &ExtendedKey, partial_tx_str: &str) -> Result<(), Error> { - + let (amount, blinding, partial_tx) = partial_tx_from_json(partial_tx_str)?; let final_tx = receive_transaction(&config, ext_key, amount, blinding, partial_tx)?; let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap()); let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str()); - api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })?; + let _: TxWrapper = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })?; Ok(()) } @@ -135,9 +135,9 @@ impl ApiEndpoint for WalletReceiver { WalletReceiveRequest::PartialTransaction(partial_tx_str) => { debug!("Operation {} with transaction {}", op, &partial_tx_str); receive_json_tx(&self.config, &self.key, &partial_tx_str).map_err(|e| { - api::Error::Internal(format!("Error processing partial transaction: {:?}", e)) - }); - + api::Error::Internal(format!("Error processing partial transaction: {:?}", e)) + }).unwrap(); + //TODO: Return emptiness for now, should be a proper enum return type Ok(CbData { output: String::from(""), diff --git a/wallet/src/sender.rs b/wallet/src/sender.rs index df9b63790..dda4edeff 100644 --- a/wallet/src/sender.rs +++ b/wallet/src/sender.rs @@ -13,7 +13,7 @@ // limitations under the License. use std::convert::From; -use secp::{self, Secp256k1}; +use secp::{self}; use secp::key::SecretKey; use checker; @@ -29,21 +29,18 @@ use api; /// recipients wallet receiver (to be implemented). pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64, dest: String) -> Result<(), Error> { checker::refresh_outputs(&config, ext_key); - + let (tx, blind_sum) = build_send_tx(config, ext_key, amount)?; let json_tx = partial_tx_to_json(amount, blind_sum, tx); - + if dest == "stdout" { println!("{}", json_tx); } else if &dest[..4] == "http" { - let url = format!("{}/v1/receive/receive_json_tx", - &dest); + let url = format!("{}/v1/receive/receive_json_tx", &dest); debug!("Posting partial transaction to {}", url); let request = WalletReceiveRequest::PartialTransaction(json_tx); - let res: CbData = api::client::post(url.as_str(), - &request) - .expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url)); - + let _: CbData = api::client::post(url.as_str(), &request) + .expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url)); } Ok(()) } diff --git a/wallet/src/types.rs b/wallet/src/types.rs index ad019c393..df1e5cac7 100644 --- a/wallet/src/types.rs +++ b/wallet/src/types.rs @@ -65,7 +65,7 @@ impl From for Error { } impl From for Error { - fn from(e: num::ParseIntError) -> Error { + fn from(_: num::ParseIntError) -> Error { Error::Format("Invalid hex".to_string()) } } @@ -91,7 +91,7 @@ pub struct WalletConfig { impl Default for WalletConfig { fn default() -> WalletConfig { - WalletConfig { + WalletConfig { enable_wallet: false, api_http_addr: "http://127.0.0.1:13415".to_string(), check_node_api_http_addr: "http://127.0.0.1:13415".to_string(), @@ -161,12 +161,12 @@ impl WalletData { fs::create_dir_all(data_file_dir).unwrap_or_else(|why| { info!("! {:?}", why.kind()); }); - + let data_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, DAT_FILE); let lock_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, LOCK_FILE); // create the lock files, if it already exists, will produce an error - OpenOptions::new().write(true).create_new(true).open(lock_file_path).map_err(|e| { + OpenOptions::new().write(true).create_new(true).open(lock_file_path).map_err(|_| { Error::WalletData(format!("Could not create wallet lock file. Either \ some other process is using the wallet or there's a write access \ issue.")) @@ -178,7 +178,7 @@ impl WalletData { wdat.write(data_file_path)?; // delete the lock file - fs::remove_file(lock_file_path).map_err(|e| { + fs::remove_file(lock_file_path).map_err(|_| { Error::WalletData(format!("Could not remove wallet lock file. Maybe insufficient \ rights?")) })?;