Cleanup build warnings (#87)

* minor cleanup - unused imports
* cleanup build warnings - unused vars
* make structs pub to get rid of the private_in_public lint warning
* missing docs on RangeProof
* add missing docs to store delete function
* cleaned up deprecation warning -
tokio_core -> tokio_io
complete() -> send()
This commit is contained in:
AntiochP 2017-08-09 20:54:10 -04:00 committed by Ignotus Peverell
parent 131ea2f799
commit 3b4a48b2fd
48 changed files with 418 additions and 362 deletions

View file

@ -21,11 +21,10 @@
// }
// }
use std::sync::{Arc, Mutex, RwLock};
use std::sync::{Arc, RwLock};
use std::thread;
use core::core::{Transaction, Output};
use core::core::hash::Hash;
use core::ser;
use chain::{self, Tip};
use pool;
@ -51,7 +50,7 @@ impl ApiEndpoint for ChainApi {
vec![Operation::Get]
}
fn get(&self, id: String) -> ApiResult<Tip> {
fn get(&self, _: String) -> ApiResult<Tip> {
self.chain.head().map_err(|e| Error::Internal(format!("{:?}", e)))
}
}
@ -75,7 +74,7 @@ impl ApiEndpoint for OutputApi {
fn get(&self, id: String) -> ApiResult<Output> {
debug!("GET output {}", id);
let c = util::from_hex(id.clone()).map_err(|e| Error::Argument(format!("Not a valid commitment: {}", id)))?;
let c = util::from_hex(id.clone()).map_err(|_| Error::Argument(format!("Not a valid commitment: {}", id)))?;
match self.chain.get_unspent(&Commitment::from_vec(c)) {
Some(utxo) => Ok(utxo),
@ -93,7 +92,7 @@ pub struct PoolApi<T> {
}
#[derive(Serialize, Deserialize)]
struct PoolInfo {
pub struct PoolInfo {
pool_size: usize,
orphans_size: usize,
total_size: usize,
@ -111,7 +110,7 @@ impl<T> ApiEndpoint for PoolApi<T>
vec![Operation::Get, Operation::Custom("push".to_string())]
}
fn get(&self, id: String) -> ApiResult<PoolInfo> {
fn get(&self, _: String) -> ApiResult<PoolInfo> {
let pool = self.tx_pool.read().unwrap();
Ok(PoolInfo {
pool_size: pool.pool_size(),
@ -120,9 +119,9 @@ impl<T> ApiEndpoint for PoolApi<T>
})
}
fn operation(&self, op: String, input: TxWrapper) -> ApiResult<()> {
fn operation(&self, _: String, input: TxWrapper) -> ApiResult<()> {
let tx_bin = util::from_hex(input.tx_hex)
.map_err(|e| Error::Argument(format!("Invalid hex in transaction wrapper.")))?;
.map_err(|_| Error::Argument(format!("Invalid hex in transaction wrapper.")))?;
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
Error::Argument("Could not deserialize transaction, invalid format.".to_string())
@ -146,7 +145,7 @@ impl<T> ApiEndpoint for PoolApi<T>
/// Dummy wrapper for the hex-encoded serialized transaction.
#[derive(Serialize, Deserialize)]
struct TxWrapper {
pub struct TxWrapper {
tx_hex: String,
}

View file

@ -30,9 +30,8 @@ use iron::{Iron, Request, Response, IronResult, IronError, status, headers, List
use iron::method::Method;
use iron::modifiers::Header;
use iron::middleware::Handler;
use iron::error::HttpResult;
use router::Router;
use serde::{Serialize, Deserialize};
use serde::Serialize;
use serde::de::DeserializeOwned;
use serde_json;
@ -331,7 +330,6 @@ impl ApiServer {
#[cfg(test)]
mod test {
use super::*;
use rest::*;
#[derive(Serialize, Deserialize)]
pub struct Animal {

View file

@ -97,7 +97,8 @@ impl Chain {
Err(e) => return Err(Error::StoreErr(e)),
};
let head = chain_store.head()?;
// TODO - confirm this was safe to remove based on code above?
// let head = chain_store.head()?;
Ok(Chain {
store: Arc::new(chain_store),
@ -176,7 +177,7 @@ impl Chain {
fn check_orphans(&self) {
// first check how many we have to retry, unfort. we can't extend the lock
// in the loop as it needs to be freed before going in process_block
let mut orphan_count = 0;
let orphan_count;
{
let orphans = self.orphans.lock().unwrap();
orphan_count = orphans.len();
@ -184,13 +185,13 @@ impl Chain {
// pop each orphan and retry, if still orphaned, will be pushed again
for _ in 0..orphan_count {
let mut popped = None;
let popped;
{
let mut orphans = self.orphans.lock().unwrap();
popped = orphans.pop_back();
}
if let Some((opts, orphan)) = popped {
self.process_block(orphan, opts);
let _process_result = self.process_block(orphan, opts);
}
}
}

View file

@ -14,7 +14,6 @@
//! Implementation of the chain block acceptance (or refusal) pipeline.
use std::convert::From;
use std::sync::{Arc, Mutex};
use secp;
@ -22,22 +21,24 @@ use time;
use core::consensus;
use core::core::hash::{Hash, Hashed};
use core::core::target::Difficulty;
use core::core::{BlockHeader, Block, Proof};
use core::core::{BlockHeader, Block};
use core::pow;
use core::ser;
use types::*;
use store;
use core::global;
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
/// Contextual information required to process a new block and either reject or
/// accept it.
pub struct BlockContext {
/// The options
pub opts: Options,
/// The store
pub store: Arc<ChainStore>,
/// The adapter
pub adapter: Arc<ChainAdapter>,
/// The head
pub head: Tip,
/// The lock
pub lock: Arc<Mutex<bool>>,
}
@ -68,11 +69,12 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
b.hash()
);
ctx.lock.lock();
let _ = ctx.lock.lock().unwrap();
add_block(b, &mut ctx)?;
update_head(b, &mut ctx)
}
/// Process the block header
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
info!(
@ -84,7 +86,7 @@ pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<O
validate_header(&bh, &mut ctx)?;
add_block_header(bh, &mut ctx)?;
ctx.lock.lock();
let _ = ctx.lock.lock().unwrap();
update_header_head(bh, &mut ctx)
}
@ -149,7 +151,6 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
return Err(Error::DifficultyTooLow);
}
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
let cycle_size = if ctx.opts.intersects(EASY_POW) {
global::sizeshift()
} else {

View file

@ -41,6 +41,7 @@ pub struct ChainKVStore {
}
impl ChainKVStore {
/// Create new chain store
pub fn new(root_path: String) -> Result<ChainKVStore, Error> {
let db = grin_store::Store::open(format!("{}/{}", root_path, STORE_SUBPATH).as_str())?;
Ok(ChainKVStore { db: db })
@ -152,7 +153,7 @@ impl ChainStore for ChainKVStore {
self.db.put_ser(
&u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height),
&real_prev,
);
).unwrap();
prev_h = real_prev.previous;
prev_height = real_prev.height - 1;
} else {

View file

@ -27,6 +27,7 @@ use grin_store;
bitflags! {
/// Options for block validation
pub flags Options: u32 {
/// None flag
const NONE = 0b00000001,
/// Runs without checking the Proof of Work, mostly to make testing easier.
const SKIP_POW = 0b00000010,
@ -37,6 +38,7 @@ bitflags! {
}
}
/// Errors
#[derive(Debug)]
pub enum Error {
/// The block doesn't fit anywhere in our chain
@ -202,5 +204,5 @@ pub trait ChainAdapter {
/// Dummy adapter used as a placeholder for real implementations
pub struct NoopAdapter {}
impl ChainAdapter for NoopAdapter {
fn block_accepted(&self, b: &Block) {}
fn block_accepted(&self, _: &Block) {}
}

View file

@ -26,7 +26,6 @@ use std::thread;
use rand::os::OsRng;
use grin_chain::types::*;
use grin_chain::store;
use grin_core::core::hash::Hashed;
use grin_core::core::target::Difficulty;
use grin_core::pow;
@ -34,15 +33,13 @@ use grin_core::core;
use grin_core::consensus;
use grin_core::pow::cuckoo;
use grin_core::global;
use grin_core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
use grin::{ServerConfig, MinerConfig};
use grin_core::global::MiningParameterMode;
use grin_core::pow::MiningWorker;
#[test]
fn mine_empty_chain() {
env_logger::init();
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let mut rng = OsRng::new().unwrap();
let chain = grin_chain::Chain::init(".grin".to_string(), Arc::new(NoopAdapter {}))
@ -52,7 +49,6 @@ fn mine_empty_chain() {
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
let server_config = ServerConfig::default();
let mut miner_config = grin::MinerConfig {
enable_mining: true,
burn_reward: true,
@ -88,7 +84,7 @@ fn mine_empty_chain() {
#[test]
fn mine_forks() {
env_logger::init();
let _ = env_logger::init();
let mut rng = OsRng::new().unwrap();
let chain = grin_chain::Chain::init(".grin2".to_string(), Arc::new(NoopAdapter {}))
.unwrap();

View file

@ -22,7 +22,6 @@ use std::fs::File;
use toml;
use grin::{ServerConfig,
MinerConfig};
use wallet::WalletConfig;
use types::{ConfigMembers,
GlobalConfig,
@ -102,7 +101,7 @@ impl GlobalConfig {
if let Some(fp) = file_path {
return_value.config_file_path = Some(PathBuf::from(&fp));
} else {
return_value.derive_config_location();
return_value.derive_config_location().unwrap();
}
//No attempt at a config file, just return defaults
@ -124,6 +123,7 @@ impl GlobalConfig {
return_value.read_config()
}
/// Read config
pub fn read_config(mut self) -> Result<GlobalConfig, ConfigError> {
let mut file = File::open(self.config_file_path.as_mut().unwrap())?;
let mut contents = String::new();
@ -149,6 +149,7 @@ impl GlobalConfig {
}
}
/// Serialize config
pub fn ser_config(&mut self) -> Result<String, ConfigError> {
let encoded:Result<String, toml::ser::Error> = toml::to_string(self.members.as_mut().unwrap());
match encoded {
@ -169,6 +170,7 @@ impl GlobalConfig {
return self.members.as_mut().unwrap().wallet.as_mut().unwrap().enable_wallet;
}*/
/// Enable mining
pub fn mining_enabled(&mut self) -> bool {
return self.members.as_mut().unwrap().mining.as_mut().unwrap().enable_mining;
}

View file

@ -20,7 +20,6 @@ use std::fmt;
use grin::{ServerConfig,
MinerConfig};
use wallet::WalletConfig;
/// Error type wrapping config errors.
@ -77,12 +76,13 @@ impl From<io::Error> for ConfigError {
#[derive(Debug, Serialize, Deserialize)]
pub struct GlobalConfig {
//Keep track of the file we've read
///Keep track of the file we've read
pub config_file_path: Option<PathBuf>,
//keep track of whether we're using
//a config file or just the defaults
//for each member
/// keep track of whether we're using
/// a config file or just the defaults
/// for each member
pub using_config_file: bool,
/// Global member config
pub members: Option<ConfigMembers>,
}
@ -93,7 +93,9 @@ pub struct GlobalConfig {
#[derive(Debug, Serialize, Deserialize)]
pub struct ConfigMembers {
/// Server config
pub server: ServerConfig,
/// Mining config
pub mining: Option<MinerConfig>,
//removing wallet from here for now,
//as its concerns are separate from the server's, really

View file

@ -24,7 +24,6 @@ use core::{Input, Output, Proof, TxKernel, Transaction, COINBASE_KERNEL, COINBAS
use core::transaction::merkle_inputs_outputs;
use consensus::REWARD;
use consensus::MINIMUM_DIFFICULTY;
use consensus::PROOFSIZE;
use core::hash::{Hash, Hashed, ZERO_HASH};
use core::target::Difficulty;
use ser::{self, Readable, Reader, Writeable, Writer};

View file

@ -83,7 +83,10 @@ pub trait Committed {
/// Proof of work
pub struct Proof {
/// The nonces
pub nonces:Vec<u32>,
/// The proof size
pub proof_size: usize,
}

View file

@ -38,7 +38,7 @@
use std::clone::Clone;
use std::fmt::Debug;
use std::marker::PhantomData;
use std::ops::{self, Deref};
use std::ops::{self};
use core::hash::{Hash, Hashed};
use ser::{self, Readable, Reader, Writeable, Writer};
@ -96,11 +96,14 @@ impl<T> Summable for NoSum<T> {
/// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum.
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct HashSum<T> where T: Summable {
/// The hash
pub hash: Hash,
/// The sum
pub sum: T::Sum,
}
impl<T> HashSum<T> where T: Summable + Writeable {
/// Create a hash sum from a summable
pub fn from_summable(idx: u64, elmt: T) -> HashSum<T> {
let hash = Hashed::hash(&elmt);
let sum = elmt.sum();
@ -156,7 +159,7 @@ pub trait Backend<T> where T: Summable {
/// Heavily relies on navigation operations within a binary tree. In particular,
/// all the implementation needs to keep track of the MMR structure is how far
/// we are in the sequence of nodes making up the MMR.
struct PMMR<T, B> where T: Summable, B: Backend<T> {
pub struct PMMR<T, B> where T: Summable, B: Backend<T> {
last_pos: u64,
backend: B,
// only needed for parameterizing Backend
@ -237,7 +240,6 @@ impl<T, B> PMMR<T, B> where T: Summable + Writeable + Debug + Clone, B: Backend<
let mut to_prune = vec![];
let mut current = position;
while current+1 < self.last_pos {
let current_height = bintree_postorder_height(current);
let next_height = bintree_postorder_height(current+1);
// compare the node's height to the next height, if the next is higher
@ -454,8 +456,10 @@ fn most_significant_pos(num: u64) -> u64 {
#[cfg(test)]
mod test {
use super::*;
use core::hash::{Hash, Hashed};
use core::hash::Hashed;
use std::sync::{Arc, Mutex};
use std::ops::Deref;
#[test]
fn some_all_ones() {

View file

@ -698,6 +698,7 @@ where
}
#[allow(dead_code)]
#[allow(missing_docs)]
pub fn print_tree<T>(tree: &SumTree<T>)
where
T: Summable + Writeable,

View file

@ -21,11 +21,9 @@
use std::fmt;
use std::ops::{Add, Mul, Div, Sub};
use std::io::Cursor;
use std::u64::MAX;
use serde::{Serialize, Serializer, Deserialize, Deserializer, de};
use byteorder::{ByteOrder, ReadBytesExt, BigEndian};
use byteorder::{ByteOrder, BigEndian};
use core::hash::Hash;
use ser::{self, Reader, Writer, Writeable, Readable};
@ -150,7 +148,7 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
where E: de::Error
{
let num_in = s.parse::<u64>();
if let Err(e)=num_in {
if let Err(_)=num_in {
return Err(de::Error::invalid_value(de::Unexpected::Str(s), &"a value number"));
};
Ok(Difficulty { num: num_in.unwrap() })

View file

@ -18,7 +18,6 @@ use time;
use core;
use consensus::MINIMUM_DIFFICULTY;
use consensus::PROOFSIZE;
use core::hash::Hashed;
use core::target::Difficulty;
use global;

View file

@ -28,14 +28,19 @@ use consensus::DEFAULT_SIZESHIFT;
/// Define these here, as they should be developer-set, not really tweakable
/// by users
/// Automated testing sizeshift
pub const AUTOMATED_TESTING_SIZESHIFT:u8 = 10;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE:usize = 4;
/// User testing sizeshift
pub const USER_TESTING_SIZESHIFT:u8 = 16;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE:usize = 42;
/// Mining parameter modes
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum MiningParameterMode {
/// For CI testing
@ -49,14 +54,17 @@ pub enum MiningParameterMode {
}
lazy_static!{
/// The mining parameter mode
pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> = RwLock::new(MiningParameterMode::Production);
}
/// Set the mining mode
pub fn set_mining_mode(mode:MiningParameterMode){
let mut param_ref=MINING_PARAMETER_MODE.write().unwrap();
*param_ref=mode;
}
/// The sizeshift
pub fn sizeshift() -> u8 {
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
match *param_ref {
@ -66,6 +74,7 @@ pub fn sizeshift() -> u8 {
}
}
/// The proofsize
pub fn proofsize() -> usize {
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
match *param_ref {
@ -75,6 +84,7 @@ pub fn proofsize() -> usize {
}
}
/// Are we in automated testing mode?
pub fn is_automated_testing_mode() -> bool {
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
if let MiningParameterMode::AutomatedTesting=*param_ref {
@ -83,4 +93,3 @@ pub fn is_automated_testing_mode() -> bool {
return false;
}
}

View file

@ -39,6 +39,7 @@ macro_rules! try_map_vec {
/// Eliminates some of the verbosity in having iter and collect
/// around every fitler_map call.
#[macro_export]
macro_rules! filter_map_vec {
($thing:expr, $mapfn:expr ) => {
$thing.iter()
@ -52,6 +53,7 @@ macro_rules! filter_map_vec {
/// Example:
/// let foo = vec![1,2,3]
/// println!(tee!(foo, foo.append(vec![3,4,5]))
#[macro_export]
macro_rules! tee {
($thing:ident, $thing_expr:expr) => {
{

View file

@ -31,8 +31,6 @@ use consensus::EASINESS;
use core::BlockHeader;
use core::hash::Hashed;
use core::Proof;
use global;
use global::{MiningParameterMode, MINING_PARAMETER_MODE};
use core::target::Difficulty;
use pow::cuckoo::{Cuckoo, Error};
@ -71,7 +69,7 @@ pub fn pow20<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader, diff: Difficul
/// Runs a proof of work computation over the provided block using the provided Mining Worker,
/// until the required difficulty target is reached. May take a while for a low target...
pub fn pow_size<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader,
diff: Difficulty, sizeshift: u32) -> Result<(), Error> {
diff: Difficulty, _: u32) -> Result<(), Error> {
let start_nonce = bh.nonce;
// try to find a cuckoo cycle on that header hash
@ -104,9 +102,12 @@ pub fn pow_size<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader,
#[cfg(test)]
mod test {
use super::*;
use global;
use core::target::Difficulty;
use genesis;
use consensus::MINIMUM_DIFFICULTY;
use global::MiningParameterMode;
#[test]
fn genesis_pow() {

View file

@ -388,6 +388,7 @@ impl Writeable for [u8; 4] {
/// Useful marker trait on types that can be sized byte slices
pub trait AsFixedBytes: Sized + AsRef<[u8]> {
/// The length in bytes
fn len(&self) -> usize;
}

View file

@ -13,21 +13,19 @@
// limitations under the License.
use std::net::SocketAddr;
use std::ops::Deref;
use std::sync::{Arc, Mutex, RwLock};
use std::sync::{Arc, RwLock};
use std::thread;
use chain::{self, ChainAdapter};
use core::core::{self, Output};
use core::core::hash::{Hash, Hashed};
use core::core::target::Difficulty;
use p2p::{self, NetAdapter, Server, PeerStore, PeerData, Capabilities, State};
use p2p::{self, NetAdapter, Server, PeerStore, PeerData, State};
use pool;
use secp::pedersen::Commitment;
use util::OneTime;
use store;
use sync;
use core::global;
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
@ -210,9 +208,17 @@ impl NetToChainAdapter {
pub fn start_sync(&self, sync: sync::Syncer) {
let arc_sync = Arc::new(sync);
self.syncer.init(arc_sync.clone());
thread::Builder::new().name("syncer".to_string()).spawn(move || {
arc_sync.run();
let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(move || {
let sync_run_result = arc_sync.run();
match sync_run_result {
Ok(_) => {}
Err(_) => {}
}
});
match spawn_result {
Ok(_) => {}
Err(_) => {}
}
}
/// Prepare options for the chain pipeline

View file

@ -22,9 +22,7 @@ use std::env;
use core::pow::cuckoo;
use core::pow::cuckoo::Error;
use core::pow::MiningWorker;
use core::consensus::DEFAULT_SIZESHIFT;
use core::global;
use std::collections::HashMap;
use core::core::Proof;
use types::{MinerConfig, ServerConfig};
@ -35,9 +33,7 @@ use cuckoo_miner::{
CuckooMiner,
CuckooPluginManager,
CuckooMinerConfig,
CuckooMinerError,
CuckooMinerSolution,
CuckooPluginCapabilities};
CuckooMinerSolution};
//For now, we're just going to keep a static reference around to the loaded config
//And not allow querying the plugin directory twice once a plugin has been selected
@ -48,7 +44,9 @@ lazy_static!{
static ref LOADED_CONFIG: Mutex<Option<CuckooMinerConfig>> = Mutex::new(None);
}
/// plugin miner
pub struct PluginMiner {
/// the miner
pub miner:Option<CuckooMiner>,
last_solution: CuckooMinerSolution,
config: CuckooMinerConfig,
@ -65,7 +63,8 @@ impl Default for PluginMiner {
}
impl PluginMiner {
pub fn init(&mut self, miner_config: MinerConfig, server_config: ServerConfig){
/// Init the plugin miner
pub fn init(&mut self, miner_config: MinerConfig, _server_config: ServerConfig){
//Get directory of executable
let mut exe_path=env::current_exe().unwrap();
exe_path.pop();
@ -100,7 +99,7 @@ impl PluginMiner {
let mut plugin_manager = CuckooPluginManager::new().unwrap();
let result=plugin_manager.load_plugin_dir(plugin_install_path);
if let Err(e) = result {
if let Err(_) = result {
error!("Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/deps.");
panic!("Unable to load plugin directory... Please check configuration values");
}
@ -138,6 +137,7 @@ impl PluginMiner {
self.miner=Some(result.unwrap());
}
/// Get the miner
pub fn get_consumable(&mut self)->CuckooMiner{
//this will load the associated plugin
@ -158,9 +158,9 @@ impl MiningWorker for PluginMiner {
/// version of the miner for now, though this should become
/// configurable somehow
fn new(ease: u32,
sizeshift: u32,
proof_size: usize) -> Self {
fn new(_ease: u32,
_sizeshift: u32,
_proof_size: usize) -> Self {
PluginMiner::default()
}
@ -175,4 +175,3 @@ impl MiningWorker for PluginMiner {
Err(Error::NoSolution)
}
}

View file

@ -19,10 +19,8 @@
use rand::{thread_rng, Rng};
use std::cmp::min;
use std::net::SocketAddr;
use std::ops::Deref;
use std::str::{self, FromStr};
use std::sync::Arc;
use std::thread;
use std::time;
use cpupool;
@ -93,7 +91,12 @@ impl Seeder {
for p in disconnected {
if p.is_banned() {
debug!("Marking peer {} as banned.", p.info.addr);
peer_store.update_state(p.info.addr, p2p::State::Banned);
let update_result = peer_store.update_state(
p.info.addr, p2p::State::Banned);
match update_result {
Ok(()) => {}
Err(_) => {}
}
}
}
@ -240,11 +243,19 @@ fn connect_and_req(capab: p2p::Capabilities,
.then(move |p| {
match p {
Ok(Some(p)) => {
p.send_peer_request(capab);
let peer_result = p.send_peer_request(capab);
match peer_result {
Ok(()) => {}
Err(_) => {}
}
}
Err(e) => {
error!("Peer request error: {:?}", e);
peer_store.update_state(addr, p2p::State::Defunct);
let update_result = peer_store.update_state(addr, p2p::State::Defunct);
match update_result {
Ok(()) => {}
Err(_) => {}
}
}
_ => {}
}

View file

@ -17,36 +17,31 @@
//! as a facade.
use std::net::SocketAddr;
use std::sync::{Arc, Mutex, RwLock};
use std::sync::{Arc, RwLock};
use std::thread;
use std::time;
use futures::{future, Future, Stream};
use futures::{Future, Stream};
use tokio_core::reactor;
use tokio_timer::Timer;
use adapters::*;
use api;
use chain;
use chain::ChainStore;
use core::{self, consensus};
use core::core::hash::Hashed;
use core::pow::cuckoo;
use core::pow::MiningWorker;
use miner;
use p2p;
use pool;
use seed;
use store;
use sync;
use types::*;
use plugin::PluginMiner;
use core::global;
/// Grin server holding internal structures.
pub struct Server {
/// server config
pub config: ServerConfig,
/// event handle
evt_handle: reactor::Handle,
/// handle to our network server
p2p: Arc<p2p::Server>,
@ -137,6 +132,7 @@ impl Server {
Ok(())
}
/// Number of peers
pub fn peer_count(&self) -> u32 {
self.p2p.peer_count()
}
@ -156,6 +152,7 @@ impl Server {
});
}
/// The chain head
pub fn head(&self) -> chain::Tip {
self.chain.head().unwrap()
}

View file

@ -20,7 +20,6 @@
/// How many block bodies to download in parallel
const MAX_BODY_DOWNLOADS: usize = 8;
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::{Instant, Duration};
@ -152,7 +151,11 @@ impl Syncer {
while blocks_to_download.len() > 0 && blocks_downloading.len() < MAX_BODY_DOWNLOADS {
let h = blocks_to_download.pop().unwrap();
let peer = self.p2p.random_peer().unwrap();
peer.send_block_request(h);
let send_result = peer.send_block_request(h);
match send_result {
Ok(_) => {}
Err(_) => {}
}
blocks_downloading.push((h, Instant::now()));
}
debug!("Requesting more full block hashes to download, total: {}.",
@ -199,7 +202,7 @@ impl Syncer {
}
// ask for more headers if we got as many as required
if hs_len == (p2p::MAX_BLOCK_HEADERS as usize) {
self.request_headers();
self.request_headers().unwrap();
}
}

View file

@ -173,6 +173,8 @@ impl Default for MinerConfig {
#[derive(Clone)]
pub struct ServerStats {
/// Number of peers
pub peer_count:u32,
/// Chain head
pub head: chain::Tip,
}

View file

@ -30,23 +30,15 @@ extern crate futures_cpupool;
use std::thread;
use std::time;
use std::default::Default;
use std::mem;
use std::fs;
use std::sync::{Arc, Mutex, RwLock};
use std::ops::Deref;
use std::sync::{Arc, Mutex};
use futures::{Future};
use futures::future::join_all;
use futures::task::park;
use tokio_core::reactor;
use tokio_core::reactor::Remote;
use tokio_core::reactor::Handle;
use tokio_timer::Timer;
use secp::Secp256k1;
use wallet::WalletConfig;
use core::consensus;
/// Just removes all results from previous runs
@ -61,6 +53,7 @@ pub fn clean_all_output(test_name_dir:&str){
/// Errors that can be returned by LocalServerContainer
#[derive(Debug)]
#[allow(dead_code)]
pub enum Error {
Internal(String),
Argument(String),
@ -193,8 +186,7 @@ impl LocalServerContainer {
}))
}
pub fn run_server(&mut self,
duration_in_seconds: u64) -> grin::ServerStats
pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats
{
let mut event_loop = reactor::Core::new().unwrap();
@ -227,7 +219,7 @@ impl LocalServerContainer {
thread::sleep(time::Duration::from_millis(1000));
}
let mut miner_config = grin::MinerConfig {
let miner_config = grin::MinerConfig {
enable_mining: self.config.start_miner,
burn_reward: self.config.burn_mining_rewards,
use_cuckoo_miner: false,
@ -251,7 +243,7 @@ impl LocalServerContainer {
let timeout = Timer::default().sleep(time::Duration::from_secs(duration_in_seconds));
event_loop.run(timeout);
event_loop.run(timeout).unwrap();
if self.wallet_is_running{
self.stop_wallet();
@ -264,7 +256,7 @@ impl LocalServerContainer {
/// Starts a wallet daemon to receive and returns the
/// listening server url
pub fn run_wallet(&mut self, duration_in_seconds: u64) {
pub fn run_wallet(&mut self, _duration_in_seconds: u64) {
//URL on which to start the wallet listener (i.e. api server)
let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
@ -428,16 +420,16 @@ impl LocalServerContainerPool {
self.is_seeding=true;
}
let server_address = format!("{}:{}",
let _server_address = format!("{}:{}",
server_config.base_addr,
server_config.p2p_server_port);
let mut server_container = LocalServerContainer::new(server_config.clone()).unwrap();
let server_container = LocalServerContainer::new(server_config.clone()).unwrap();
//self.server_containers.push(server_arc);
//Create a future that runs the server for however many seconds
//collect them all and run them in the run_all_servers
let run_time = self.config.run_length_in_seconds;
let _run_time = self.config.run_length_in_seconds;
self.server_containers.push(server_container);
@ -447,9 +439,9 @@ impl LocalServerContainerPool {
/// adds n servers, ready to run
///
///
#[allow(dead_code)]
pub fn create_servers(&mut self, number: u16){
for n in 0..number {
for _ in 0..number {
//self.create_server();
}
}
@ -489,7 +481,7 @@ impl LocalServerContainerPool {
for handle in handles {
match handle.join() {
Ok(v) => {}
Ok(_) => {}
Err(e) => {
println!("Error starting server thread: {:?}", e);
panic!(e);
@ -522,5 +514,4 @@ impl LocalServerContainerPool {
}
}
}
}

View file

@ -25,9 +25,6 @@ extern crate futures;
extern crate tokio_core;
extern crate tokio_timer;
use std::sync::{Arc, Mutex, RwLock};
use std::fs;
mod framework;
use std::thread;
@ -35,7 +32,7 @@ use std::time;
use std::default::Default;
use futures::{Future, Poll, Async};
use futures::task::park;
use futures::task::current;
use tokio_core::reactor;
use tokio_timer::Timer;
@ -51,7 +48,7 @@ use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerCon
/// Block and mining into a wallet for a bit
#[test]
fn basic_genesis_mine() {
env_logger::init();
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "genesis_mine";
@ -82,7 +79,7 @@ fn basic_genesis_mine() {
/// messages they all end up connected.
#[test]
fn simulate_seeding() {
env_logger::init();
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "simulate_seeding";
@ -116,13 +113,13 @@ fn simulate_seeding() {
server_config.p2p_server_port
));
for i in 0..4 {
for _ in 0..4 {
pool.create_server(&mut server_config);
}
pool.connect_all_peers();
let result_vec = pool.run_all_servers();
let _ = pool.run_all_servers();
}
/// Create 1 server, start it mining, then connect 4 other peers mining and
@ -136,8 +133,9 @@ fn simulate_seeding() {
// being,
// As it's more for actively testing and hurts CI a lot
//#[test]
#[allow(dead_code)]
fn simulate_parallel_mining() {
env_logger::init();
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "simulate_parallel_mining";
@ -179,7 +177,7 @@ fn simulate_parallel_mining() {
pool.connect_all_peers();
let result_vec = pool.run_all_servers();
let _ = pool.run_all_servers();
// Check mining difficulty here?, though I'd think it's more valuable
// to simply output it. Can at least see the evolution of the difficulty target
@ -335,7 +333,7 @@ impl<'a> Future for HeadChange<'a> {
Ok(Async::Ready(new_head))
} else {
// egregious polling, asking the task to schedule us every iteration
park().unpark();
current().notify();
Ok(Async::NotReady)
}
}

View file

@ -24,12 +24,12 @@ use futures;
use futures::{Stream, Future};
use futures::stream;
use futures::sync::mpsc::{Sender, UnboundedSender, UnboundedReceiver};
use tokio_core::io::{WriteHalf, ReadHalf, write_all, read_exact};
use tokio_core::net::TcpStream;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{read_exact, write_all};
use tokio_timer::{Timer, TimerError};
use tokio_io::*;
use core::core::hash::{Hash, ZERO_HASH};
use core::core::hash::Hash;
use core::ser;
use msg::*;
use types::Error;
@ -65,6 +65,7 @@ impl<F> Handler for F
/// A higher level connection wrapping the TcpStream. Maintains the amount of
/// data transmitted and deals with the low-level task of sending and
/// receiving data, parsing message headers and timeouts.
#[allow(dead_code)]
pub struct Connection {
// Channel to push bytes to the remote peer
outbound_chan: UnboundedSender<Vec<u8>>,
@ -150,7 +151,7 @@ impl Connection {
})
// write the data and make sure the future returns the right types
.fold(writer, |writer, data| {
write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, buf)| writer)
write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer)
});
Box::new(send_data)
}
@ -287,7 +288,7 @@ impl TimeoutConnection {
underlying: conn,
expected_responses: expects,
};
(me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, e2)| e1)))
(me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)))
}
/// Sends a request and registers a timer on the provided message type and
@ -298,7 +299,7 @@ impl TimeoutConnection {
body: &W,
expect_h: Option<(Hash)>)
-> Result<(), Error> {
let sent = try!(self.underlying.send_msg(t, body));
let _sent = try!(self.underlying.send_msg(t, body));
let mut expects = self.expected_responses.lock().unwrap();
expects.push((rt, expect_h, Instant::now()));

View file

@ -31,7 +31,6 @@ extern crate grin_util as util;
#[macro_use]
extern crate log;
extern crate futures;
#[macro_use]
extern crate tokio_core;
extern crate tokio_io;
extern crate bytes;

View file

@ -19,7 +19,7 @@ use num::FromPrimitive;
use futures::future::{Future, ok};
use tokio_core::net::TcpStream;
use tokio_core::io::{write_all, read_exact};
use tokio_io::io::{read_exact, write_all};
use core::consensus::MAX_MSG_LEN;
use core::core::BlockHeader;
@ -42,6 +42,7 @@ const MAGIC: [u8; 2] = [0x1e, 0xc5];
pub const HEADER_LEN: u64 = 11;
/// Codes for each error that can be produced reading a message.
#[allow(dead_code)]
pub enum ErrCodes {
UnsupportedVersion = 100,
}
@ -105,12 +106,12 @@ pub fn write_msg<T>(conn: TcpStream,
let write_msg = ok((conn)).and_then(move |conn| {
// prepare the body first so we know its serialized length
let mut body_buf = vec![];
ser::serialize(&mut body_buf, &msg);
ser::serialize(&mut body_buf, &msg).unwrap();
// build and serialize the header using the body size
let mut header_buf = vec![];
let blen = body_buf.len() as u64;
ser::serialize(&mut header_buf, &MsgHeader::new(msg_type, blen));
ser::serialize(&mut header_buf, &MsgHeader::new(msg_type, blen)).unwrap();
// send the whole thing
write_all(conn, header_buf)
@ -202,9 +203,9 @@ impl Writeable for Hand {
[write_u32, self.version],
[write_u32, self.capabilities.bits()],
[write_u64, self.nonce]);
self.total_difficulty.write(writer);
self.sender_addr.write(writer);
self.receiver_addr.write(writer);
self.total_difficulty.write(writer).unwrap();
self.sender_addr.write(writer).unwrap();
self.receiver_addr.write(writer).unwrap();
writer.write_bytes(&self.user_agent)
}
}
@ -250,8 +251,8 @@ impl Writeable for Shake {
ser_multiwrite!(writer,
[write_u32, self.version],
[write_u32, self.capabilities.bits()]);
self.total_difficulty.write(writer);
writer.write_bytes(&self.user_agent);
self.total_difficulty.write(writer).unwrap();
writer.write_bytes(&self.user_agent).unwrap();
Ok(())
}
}
@ -302,7 +303,7 @@ impl Writeable for PeerAddrs {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
try!(writer.write_u32(self.peers.len() as u32));
for p in &self.peers {
p.write(writer);
p.write(writer).unwrap();
}
Ok(())
}
@ -464,13 +465,13 @@ impl Readable for Headers {
pub struct Empty {}
impl Writeable for Empty {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
fn write<W: Writer>(&self, _: &mut W) -> Result<(), ser::Error> {
Ok(())
}
}
impl Readable for Empty {
fn read(reader: &mut Reader) -> Result<Empty, ser::Error> {
fn read(_: &mut Reader) -> Result<Empty, ser::Error> {
Ok(Empty {})
}
}

View file

@ -91,7 +91,7 @@ impl Peer {
// handle disconnection, standard disconnections aren't considered an error
let mut state = state.write().unwrap();
match res {
Ok(res) => {
Ok(_) => {
*state = State::Disconnected;
info!("Client {} disconnected.", addr);
Ok(())

View file

@ -14,9 +14,7 @@
use std::sync::{Mutex, Arc};
use futures;
use futures::Future;
use futures::stream;
use futures::sync::mpsc::UnboundedSender;
use tokio_core::net::TcpStream;
@ -28,6 +26,7 @@ use msg::*;
use types::*;
use util::OneTime;
#[allow(dead_code)]
pub struct ProtocolV1 {
conn: OneTime<TimeoutConnection>,
@ -128,7 +127,7 @@ fn handle_payload(adapter: &NetAdapter,
match header.msg_type {
Type::Ping => {
let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?;
sender.send(data);
sender.send(data).unwrap();
Ok(None)
}
Type::Pong => Ok(None),
@ -148,7 +147,7 @@ fn handle_payload(adapter: &NetAdapter,
try!(ser::serialize(&mut data,
&MsgHeader::new(Type::Block, body_data.len() as u64)));
data.append(&mut body_data);
sender.send(data);
sender.send(data).unwrap();
}
Ok(None)
}
@ -170,7 +169,7 @@ fn handle_payload(adapter: &NetAdapter,
try!(ser::serialize(&mut data,
&MsgHeader::new(Type::Headers, body_data.len() as u64)));
data.append(&mut body_data);
sender.send(data);
sender.send(data).unwrap();
Ok(None)
}
@ -193,7 +192,7 @@ fn handle_payload(adapter: &NetAdapter,
try!(ser::serialize(&mut data,
&MsgHeader::new(Type::PeerAddrs, body_data.len() as u64)));
data.append(&mut body_data);
sender.send(data);
sender.send(data).unwrap();
Ok(None)
}

View file

@ -14,12 +14,11 @@
//! Provides wrappers for throttling readers and writers
use std::time::{Instant, Duration};
use std::time::Instant;
use std::io;
use futures::*;
use tokio_io::*;
use bytes::{Buf, BytesMut, BufMut};
/// A Rate Limited Reader
#[derive(Debug)]
@ -32,6 +31,7 @@ pub struct ThrottledReader<R: AsyncRead> {
last_check: Instant,
}
#[allow(dead_code)]
impl<R: AsyncRead> ThrottledReader<R> {
/// Adds throttling to a reader.
/// The resulting reader will read at most `max` amount of bytes per second
@ -105,6 +105,7 @@ pub struct ThrottledWriter<W: AsyncWrite> {
last_check: Instant,
}
#[allow(dead_code)]
impl<W: AsyncWrite> ThrottledWriter<W> {
/// Adds throttling to a writer.
/// The resulting writer will write at most `max` amount of bytes per second

View file

@ -41,20 +41,20 @@ impl NetAdapter for DummyAdapter {
fn total_difficulty(&self) -> Difficulty {
Difficulty::one()
}
fn transaction_received(&self, tx: core::Transaction) {}
fn block_received(&self, b: core::Block) {}
fn headers_received(&self, bh: Vec<core::BlockHeader>) {}
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
fn transaction_received(&self, _: core::Transaction) {}
fn block_received(&self, _: core::Block) {}
fn headers_received(&self, _: Vec<core::BlockHeader>) {}
fn locate_headers(&self, _: Vec<Hash>) -> Vec<core::BlockHeader> {
vec![]
}
fn get_block(&self, h: Hash) -> Option<core::Block> {
fn get_block(&self, _: Hash) -> Option<core::Block> {
None
}
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
fn find_peer_addrs(&self, _: Capabilities) -> Vec<SocketAddr> {
vec![]
}
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {}
fn peer_connected(&self, pi: &PeerInfo) {}
fn peer_addrs_received(&self, _: Vec<SocketAddr>) {}
fn peer_connected(&self, _: &PeerInfo) {}
}
/// P2P server implementation, handling bootstrapping to find and connect to
@ -97,7 +97,7 @@ impl Server {
// main peer acceptance future handling handshake
let hp = h.clone();
let peers = socket.incoming().map_err(From::from).map(move |(conn, addr)| {
let peers = socket.incoming().map_err(From::from).map(move |(conn, _)| {
let adapter = adapter.clone();
let total_diff = adapter.total_difficulty();
let peers = peers.clone();
@ -275,7 +275,7 @@ impl Server {
for p in peers.deref() {
p.stop();
}
self.stop.into_inner().unwrap().complete(());
self.stop.into_inner().unwrap().send(()).unwrap();
}
}

View file

@ -33,6 +33,7 @@ pub const MAX_LOCATORS: u32 = 10;
pub const MAX_BLOCK_HEADERS: u32 = 512;
/// Maximum number of block bodies a peer should ever ask for and send
#[allow(dead_code)]
pub const MAX_BLOCK_BODIES: u32 = 16;
/// Maximum number of peer addresses a peer should ever send
@ -57,7 +58,7 @@ impl From<io::Error> for Error {
}
}
impl From<TimerError> for Error {
fn from(e: TimerError) -> Error {
fn from(_: TimerError) -> Error {
Error::Timeout
}
}

View file

@ -26,7 +26,6 @@ use futures::future::Future;
use tokio_core::net::TcpStream;
use tokio_core::reactor::{self, Core};
use core::ser;
use core::core::target::Difficulty;
use p2p::Peer;

View file

@ -25,6 +25,7 @@ pub struct DummyUtxoSet {
outputs : HashMap<Commitment, transaction::Output>
}
#[allow(dead_code)]
impl DummyUtxoSet {
pub fn empty() -> DummyUtxoSet{
DummyUtxoSet{outputs: HashMap::new()}
@ -50,7 +51,7 @@ impl DummyUtxoSet {
self.outputs.insert(output.commitment(), output.clone());
}
}
pub fn rewind(&self, b: &block::Block) -> DummyUtxoSet {
pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet {
DummyUtxoSet{outputs: HashMap::new()}
}
pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> {
@ -75,10 +76,12 @@ impl DummyUtxoSet {
/// A DummyChain is the mocked chain for playing with what methods we would
/// need
#[allow(dead_code)]
pub struct DummyChainImpl {
utxo: RwLock<DummyUtxoSet>
}
#[allow(dead_code)]
impl DummyChainImpl {
pub fn new() -> DummyChainImpl {
DummyChainImpl{

View file

@ -15,15 +15,9 @@
//! Base types for the transaction pool's Directed Acyclic Graphs
use std::vec::Vec;
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::Weak;
use std::cell::RefCell;
use std::collections::HashMap;
use secp::pedersen::Commitment;
use secp::{Secp256k1, ContextFlag};
use secp::key;
use time;
use rand;
@ -36,15 +30,18 @@ use core::core;
/// These are the vertices of both of the graph structures
pub struct PoolEntry {
// Core data
// Unique identifier of this pool entry and the corresponding transaction
/// Unique identifier of this pool entry and the corresponding transaction
pub transaction_hash: core::hash::Hash,
// Metadata
size_estimate: u64,
/// Size estimate
pub size_estimate: u64,
/// Receive timestamp
pub receive_ts: time::Tm,
}
impl PoolEntry {
/// Create new transaction pool entry
pub fn new(tx: &core::transaction::Transaction) -> PoolEntry {
PoolEntry{
transaction_hash: transaction_identifier(tx),
@ -53,7 +50,8 @@ impl PoolEntry {
}
}
fn estimate_transaction_size(tx: &core::transaction::Transaction) -> u64 {
/// TODO guessing this needs implementing
fn estimate_transaction_size(_tx: &core::transaction::Transaction) -> u64 {
0
}
@ -72,24 +70,32 @@ pub struct Edge {
}
impl Edge{
/// Create new edge
pub fn new(source: Option<core::hash::Hash>, destination: Option<core::hash::Hash>, output: Commitment) -> Edge {
Edge{source: source, destination: destination, output: output}
}
/// Create new edge with a source
pub fn with_source(&self, src: Option<core::hash::Hash>) -> Edge {
Edge{source: src, destination: self.destination, output: self.output}
}
/// Create new edge with destination
pub fn with_destination(&self, dst: Option<core::hash::Hash>) -> Edge {
Edge{source: self.source, destination: dst, output: self.output}
}
/// The output commitment of the edge
pub fn output_commitment(&self) -> Commitment {
self.output
}
/// The destination hash of the edge
pub fn destination_hash(&self) -> Option<core::hash::Hash> {
self.destination
}
/// The source hash of the edge
pub fn source_hash(&self) -> Option<core::hash::Hash> {
self.source
}
@ -115,6 +121,7 @@ pub struct DirectedGraph {
}
impl DirectedGraph {
/// Create an empty directed graph
pub fn empty() -> DirectedGraph {
DirectedGraph{
edges: HashMap::new(),
@ -123,14 +130,17 @@ impl DirectedGraph {
}
}
/// Get an edge by its commitment
pub fn get_edge_by_commitment(&self, output_commitment: &Commitment) -> Option<&Edge> {
self.edges.get(output_commitment)
}
/// Remove an edge by its commitment
pub fn remove_edge_by_commitment(&mut self, output_commitment: &Commitment) -> Option<Edge> {
self.edges.remove(output_commitment)
}
/// Remove a vertex by its hash
pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> {
match self.roots.iter().position(|x| x.transaction_hash == tx_hash) {
Some(i) => Some(self.roots.swap_remove(i)),
@ -163,8 +173,8 @@ impl DirectedGraph {
}
}
// add_vertex_only adds a vertex, meant to be complemented by add_edge_only
// in cases where delivering a vector of edges is not feasible or efficient
/// add_vertex_only adds a vertex, meant to be complemented by add_edge_only
/// in cases where delivering a vector of edges is not feasible or efficient
pub fn add_vertex_only(&mut self, vertex: PoolEntry, is_root: bool) {
if is_root {
self.roots.push(vertex);
@ -173,6 +183,7 @@ impl DirectedGraph {
}
}
/// add_edge_only adds an edge
pub fn add_edge_only(&mut self, edge: Edge) {
self.edges.insert(edge.output_commitment(), edge);
}
@ -209,6 +220,8 @@ pub fn transaction_identifier(tx: &core::transaction::Transaction) -> core::hash
#[cfg(test)]
mod tests {
use super::*;
use secp::{Secp256k1, ContextFlag};
use secp::key;
#[test]
fn test_add_entry() {
@ -243,7 +256,7 @@ mod tests {
}
/// For testing/debugging: a random tx hash
fn random_hash() -> core::hash::Hash {
pub fn random_hash() -> core::hash::Hash {
let hash_bytes: [u8;32]= rand::random();
core::hash::Hash(hash_bytes)
}

View file

@ -28,7 +28,6 @@ mod pool;
extern crate time;
extern crate rand;
#[macro_use]
extern crate log;
extern crate grin_core as core;

View file

@ -20,22 +20,22 @@ pub use graph;
use core::core::transaction;
use core::core::block;
use core::core::hash;
// Temporary blockchain dummy impls
use blockchain::{DummyChain, DummyChainImpl, DummyUtxoSet};
use secp;
use secp::pedersen::Commitment;
use std::sync::{Arc, RwLock, Weak};
use std::sync::Arc;
use std::collections::HashMap;
/// The pool itself.
/// The transactions HashMap holds ownership of all transactions in the pool,
/// keyed by their transaction hash.
pub struct TransactionPool<T> {
/// All transactions in the pool
pub transactions: HashMap<hash::Hash, Box<transaction::Transaction>>,
/// The pool itself
pub pool : Pool,
/// Orphans in the pool
pub orphans: Orphans,
// blockchain is a DummyChain, for now, which mimics what the future
@ -44,6 +44,7 @@ pub struct TransactionPool<T> {
}
impl<T> TransactionPool<T> where T: BlockChain {
/// Create a new transaction pool
pub fn new(chain: Arc<T>) -> TransactionPool<T> {
TransactionPool{
transactions: HashMap::new(),
@ -76,7 +77,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
// output designated by output_commitment.
fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option<Parent> {
self.blockchain.get_unspent(output_commitment).
map(|o| match self.pool.get_blockchain_spent(output_commitment) {
map(|_| match self.pool.get_blockchain_spent(output_commitment) {
Some(x) => Parent::AlreadySpent{other_tx: x.destination_hash().unwrap()},
None => Parent::BlockTransaction,
})
@ -96,10 +97,12 @@ impl<T> TransactionPool<T> where T: BlockChain {
self.pool.num_transactions()
}
/// Get the number of orphans in the pool
pub fn orphans_size(&self) -> usize {
self.orphans.num_transactions()
}
/// Get the total size (transactions + orphans) of the pool
pub fn total_size(&self) -> usize {
self.pool.num_transactions() + self.orphans.num_transactions()
}
@ -110,7 +113,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// if necessary, and performing any connection-related validity checks.
/// Happens under an exclusive mutable reference gated by the write portion
/// of a RWLock.
pub fn add_to_memory_pool(&mut self, source: TxSource, tx: transaction::Transaction) -> Result<(), PoolError> {
pub fn add_to_memory_pool(&mut self, _: TxSource, tx: transaction::Transaction) -> Result<(), PoolError> {
// Making sure the transaction is valid before anything else.
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
tx.validate(&secp).map_err(|_| PoolError::Invalid)?;
@ -183,7 +186,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
self.pool.add_pool_transaction(pool_entry, blockchain_refs,
pool_refs, new_unspents);
self.reconcile_orphans();
self.reconcile_orphans().unwrap();
self.transactions.insert(tx_hash, Box::new(tx));
Ok(())
@ -389,7 +392,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
}
let freed_txs = self.sweep_transactions(marked_transactions);
self.reconcile_orphans();
self.reconcile_orphans().unwrap();
Ok(freed_txs)
}
@ -466,6 +469,8 @@ mod tests {
use secp::{Secp256k1, ContextFlag, constants};
use secp::key;
use core::core::build;
use blockchain::{DummyChain, DummyChainImpl, DummyUtxoSet};
use std::sync::{Arc, RwLock};
macro_rules! expect_output_parent {
($pool:expr, $expected:pat, $( $output:expr ),+ ) => {
@ -478,7 +483,6 @@ mod tests {
}
}
#[test]
/// A basic test; add a pair of transactions to the pool.
fn test_basic_pool_add() {
@ -542,6 +546,7 @@ mod tests {
}
}
#[test]
/// Testing various expected error conditions
pub fn test_pool_add_error() {
@ -595,7 +600,7 @@ mod tests {
Ok(_) => panic!("Expected error when adding double spend, got Ok"),
Err(x) => {
match x {
PoolError::DoubleSpend{other_tx, spent_output} => {
PoolError::DoubleSpend{other_tx: _, spent_output} => {
if spent_output != test_output(6).commitment() {
panic!("Unexpected parameter in DoubleSpend: {:?}", x);
}
@ -765,9 +770,8 @@ mod tests {
// Evicted transactions should have unknown outputs
expect_output_parent!(read_pool, Parent::Unknown, 2, 11);
}
}
#[test]
/// Test transaction selection and block building.
fn test_block_building() {

View file

@ -16,10 +16,6 @@
//! and its top-level members.
use std::vec::Vec;
use std::sync::Arc;
use std::sync::RwLock;
use std::sync::Weak;
use std::cell::RefCell;
use std::collections::HashMap;
use std::iter::Iterator;
use std::fmt;
@ -28,10 +24,7 @@ use secp::pedersen::Commitment;
pub use graph;
use time;
use core::core::transaction;
use core::core::block;
use core::core::hash;
/// Placeholder: the data representing where we heard about a tx from.
@ -71,14 +64,31 @@ impl fmt::Debug for Parent {
}
}
// TODO document this enum more accurately
/// Enum of errors
#[derive(Debug)]
pub enum PoolError {
/// An invalid pool entry
Invalid,
/// An entry already in the pool
AlreadyInPool,
DuplicateOutput{other_tx: Option<hash::Hash>, in_chain: bool,
output: Commitment},
DoubleSpend{other_tx: hash::Hash, spent_output: Commitment},
// An orphan successfully added to the orphans set
/// A duplicate output
DuplicateOutput{
/// The other transaction
other_tx: Option<hash::Hash>,
/// Is in chain?
in_chain: bool,
/// The output
output: Commitment
},
/// A double spend
DoubleSpend{
/// The other transaction
other_tx: hash::Hash,
/// The spent output
spent_output: Commitment
},
/// An orphan successfully added to the orphans set
OrphanTransaction,
}
@ -260,7 +270,7 @@ impl Orphans {
/// to the pool as well as links internal to orphan transactions.
/// Returns the transaction hash corresponding to the conflicting
/// transaction.
fn check_double_spend(&self, o: transaction::Output) -> Option<hash::Hash> {
pub fn check_double_spend(&self, o: transaction::Output) -> Option<hash::Hash> {
self.graph.get_edge_by_commitment(&o.commitment()).or(self.pool_connections.get(&o.commitment())).map(|x| x.destination_hash().unwrap())
}

View file

@ -143,6 +143,7 @@ impl AsRef<[u8]> for RangeProof {
}
impl RangeProof {
/// Create the zero range proof
pub fn zero() -> RangeProof {
RangeProof {
proof: [0; constants::MAX_PROOF_SIZE],

View file

@ -187,6 +187,8 @@ impl<'a> Batch<'a> {
}
}
/// Delete a single key from the batch. The write function
/// must be called to "commit" the batch to storage.
pub fn delete(mut self, key: &[u8]) -> Result<Batch<'a>, Error> {
self.batch.delete(key)?;
Ok(self)

View file

@ -29,7 +29,7 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) {
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
// operate within a lock on wallet data
WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
let _ = WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
// check each output that's not spent
for out in &mut wallet_data.outputs {

View file

@ -50,13 +50,13 @@
//! So we may as well have it in place already.
use std::convert::From;
use secp::{self, Secp256k1};
use secp::{self};
use secp::key::SecretKey;
use core::core::{Block, Transaction, TxKernel, Output, build};
use core::ser;
use api::{self, ApiEndpoint, Operation, ApiResult};
use extkey::{self, ExtendedKey};
use extkey::ExtendedKey;
use types::*;
use util;
@ -76,7 +76,7 @@ pub fn receive_json_tx(config: &WalletConfig, ext_key: &ExtendedKey, partial_tx_
let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap());
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })?;
let _: TxWrapper = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })?;
Ok(())
}
@ -136,7 +136,7 @@ impl ApiEndpoint for WalletReceiver {
debug!("Operation {} with transaction {}", op, &partial_tx_str);
receive_json_tx(&self.config, &self.key, &partial_tx_str).map_err(|e| {
api::Error::Internal(format!("Error processing partial transaction: {:?}", e))
});
}).unwrap();
//TODO: Return emptiness for now, should be a proper enum return type
Ok(CbData {

View file

@ -13,7 +13,7 @@
// limitations under the License.
use std::convert::From;
use secp::{self, Secp256k1};
use secp::{self};
use secp::key::SecretKey;
use checker;
@ -36,14 +36,11 @@ pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64,
if dest == "stdout" {
println!("{}", json_tx);
} else if &dest[..4] == "http" {
let url = format!("{}/v1/receive/receive_json_tx",
&dest);
let url = format!("{}/v1/receive/receive_json_tx", &dest);
debug!("Posting partial transaction to {}", url);
let request = WalletReceiveRequest::PartialTransaction(json_tx);
let res: CbData = api::client::post(url.as_str(),
&request)
let _: CbData = api::client::post(url.as_str(), &request)
.expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url));
}
Ok(())
}

View file

@ -65,7 +65,7 @@ impl From<serde_json::Error> for Error {
}
impl From<num::ParseIntError> for Error {
fn from(e: num::ParseIntError) -> Error {
fn from(_: num::ParseIntError) -> Error {
Error::Format("Invalid hex".to_string())
}
}
@ -166,7 +166,7 @@ impl WalletData {
let lock_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, LOCK_FILE);
// create the lock files, if it already exists, will produce an error
OpenOptions::new().write(true).create_new(true).open(lock_file_path).map_err(|e| {
OpenOptions::new().write(true).create_new(true).open(lock_file_path).map_err(|_| {
Error::WalletData(format!("Could not create wallet lock file. Either \
some other process is using the wallet or there's a write access \
issue."))
@ -178,7 +178,7 @@ impl WalletData {
wdat.write(data_file_path)?;
// delete the lock file
fs::remove_file(lock_file_path).map_err(|e| {
fs::remove_file(lock_file_path).map_err(|_| {
Error::WalletData(format!("Could not remove wallet lock file. Maybe insufficient \
rights?"))
})?;