mirror of
https://github.com/mimblewimble/grin.git
synced 2025-02-08 04:11:08 +03:00
Cleanup build warnings (#87)
* minor cleanup - unused imports * cleanup build warnings - unused vars * make structs pub to get rid of the private_in_public lint warning * missing docs on RangeProof * add missing docs to store delete function * cleaned up deprecation warning - tokio_core -> tokio_io complete() -> send()
This commit is contained in:
parent
131ea2f799
commit
3b4a48b2fd
48 changed files with 418 additions and 362 deletions
|
@ -21,11 +21,10 @@
|
||||||
// }
|
// }
|
||||||
// }
|
// }
|
||||||
|
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
use core::core::{Transaction, Output};
|
use core::core::{Transaction, Output};
|
||||||
use core::core::hash::Hash;
|
|
||||||
use core::ser;
|
use core::ser;
|
||||||
use chain::{self, Tip};
|
use chain::{self, Tip};
|
||||||
use pool;
|
use pool;
|
||||||
|
@ -51,7 +50,7 @@ impl ApiEndpoint for ChainApi {
|
||||||
vec![Operation::Get]
|
vec![Operation::Get]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get(&self, id: String) -> ApiResult<Tip> {
|
fn get(&self, _: String) -> ApiResult<Tip> {
|
||||||
self.chain.head().map_err(|e| Error::Internal(format!("{:?}", e)))
|
self.chain.head().map_err(|e| Error::Internal(format!("{:?}", e)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -75,13 +74,13 @@ impl ApiEndpoint for OutputApi {
|
||||||
|
|
||||||
fn get(&self, id: String) -> ApiResult<Output> {
|
fn get(&self, id: String) -> ApiResult<Output> {
|
||||||
debug!("GET output {}", id);
|
debug!("GET output {}", id);
|
||||||
let c = util::from_hex(id.clone()).map_err(|e| Error::Argument(format!("Not a valid commitment: {}", id)))?;
|
let c = util::from_hex(id.clone()).map_err(|_| Error::Argument(format!("Not a valid commitment: {}", id)))?;
|
||||||
|
|
||||||
match self.chain.get_unspent(&Commitment::from_vec(c)) {
|
match self.chain.get_unspent(&Commitment::from_vec(c)) {
|
||||||
Some(utxo) => Ok(utxo),
|
Some(utxo) => Ok(utxo),
|
||||||
None => Err(Error::NotFound),
|
None => Err(Error::NotFound),
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +92,7 @@ pub struct PoolApi<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct PoolInfo {
|
pub struct PoolInfo {
|
||||||
pool_size: usize,
|
pool_size: usize,
|
||||||
orphans_size: usize,
|
orphans_size: usize,
|
||||||
total_size: usize,
|
total_size: usize,
|
||||||
|
@ -111,7 +110,7 @@ impl<T> ApiEndpoint for PoolApi<T>
|
||||||
vec![Operation::Get, Operation::Custom("push".to_string())]
|
vec![Operation::Get, Operation::Custom("push".to_string())]
|
||||||
}
|
}
|
||||||
|
|
||||||
fn get(&self, id: String) -> ApiResult<PoolInfo> {
|
fn get(&self, _: String) -> ApiResult<PoolInfo> {
|
||||||
let pool = self.tx_pool.read().unwrap();
|
let pool = self.tx_pool.read().unwrap();
|
||||||
Ok(PoolInfo {
|
Ok(PoolInfo {
|
||||||
pool_size: pool.pool_size(),
|
pool_size: pool.pool_size(),
|
||||||
|
@ -120,9 +119,9 @@ impl<T> ApiEndpoint for PoolApi<T>
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
fn operation(&self, op: String, input: TxWrapper) -> ApiResult<()> {
|
fn operation(&self, _: String, input: TxWrapper) -> ApiResult<()> {
|
||||||
let tx_bin = util::from_hex(input.tx_hex)
|
let tx_bin = util::from_hex(input.tx_hex)
|
||||||
.map_err(|e| Error::Argument(format!("Invalid hex in transaction wrapper.")))?;
|
.map_err(|_| Error::Argument(format!("Invalid hex in transaction wrapper.")))?;
|
||||||
|
|
||||||
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
|
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
|
||||||
Error::Argument("Could not deserialize transaction, invalid format.".to_string())
|
Error::Argument("Could not deserialize transaction, invalid format.".to_string())
|
||||||
|
@ -146,7 +145,7 @@ impl<T> ApiEndpoint for PoolApi<T>
|
||||||
|
|
||||||
/// Dummy wrapper for the hex-encoded serialized transaction.
|
/// Dummy wrapper for the hex-encoded serialized transaction.
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
struct TxWrapper {
|
pub struct TxWrapper {
|
||||||
tx_hex: String,
|
tx_hex: String,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,9 +30,8 @@ use iron::{Iron, Request, Response, IronResult, IronError, status, headers, List
|
||||||
use iron::method::Method;
|
use iron::method::Method;
|
||||||
use iron::modifiers::Header;
|
use iron::modifiers::Header;
|
||||||
use iron::middleware::Handler;
|
use iron::middleware::Handler;
|
||||||
use iron::error::HttpResult;
|
|
||||||
use router::Router;
|
use router::Router;
|
||||||
use serde::{Serialize, Deserialize};
|
use serde::Serialize;
|
||||||
use serde::de::DeserializeOwned;
|
use serde::de::DeserializeOwned;
|
||||||
use serde_json;
|
use serde_json;
|
||||||
|
|
||||||
|
@ -331,7 +330,6 @@ impl ApiServer {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use rest::*;
|
|
||||||
|
|
||||||
#[derive(Serialize, Deserialize)]
|
#[derive(Serialize, Deserialize)]
|
||||||
pub struct Animal {
|
pub struct Animal {
|
||||||
|
|
|
@ -97,7 +97,8 @@ impl Chain {
|
||||||
Err(e) => return Err(Error::StoreErr(e)),
|
Err(e) => return Err(Error::StoreErr(e)),
|
||||||
};
|
};
|
||||||
|
|
||||||
let head = chain_store.head()?;
|
// TODO - confirm this was safe to remove based on code above?
|
||||||
|
// let head = chain_store.head()?;
|
||||||
|
|
||||||
Ok(Chain {
|
Ok(Chain {
|
||||||
store: Arc::new(chain_store),
|
store: Arc::new(chain_store),
|
||||||
|
@ -172,11 +173,11 @@ impl Chain {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Pop orphans out of the queue and check if we can now accept them.
|
/// Pop orphans out of the queue and check if we can now accept them.
|
||||||
fn check_orphans(&self) {
|
fn check_orphans(&self) {
|
||||||
// first check how many we have to retry, unfort. we can't extend the lock
|
// first check how many we have to retry, unfort. we can't extend the lock
|
||||||
// in the loop as it needs to be freed before going in process_block
|
// in the loop as it needs to be freed before going in process_block
|
||||||
let mut orphan_count = 0;
|
let orphan_count;
|
||||||
{
|
{
|
||||||
let orphans = self.orphans.lock().unwrap();
|
let orphans = self.orphans.lock().unwrap();
|
||||||
orphan_count = orphans.len();
|
orphan_count = orphans.len();
|
||||||
|
@ -184,13 +185,13 @@ impl Chain {
|
||||||
|
|
||||||
// pop each orphan and retry, if still orphaned, will be pushed again
|
// pop each orphan and retry, if still orphaned, will be pushed again
|
||||||
for _ in 0..orphan_count {
|
for _ in 0..orphan_count {
|
||||||
let mut popped = None;
|
let popped;
|
||||||
{
|
{
|
||||||
let mut orphans = self.orphans.lock().unwrap();
|
let mut orphans = self.orphans.lock().unwrap();
|
||||||
popped = orphans.pop_back();
|
popped = orphans.pop_back();
|
||||||
}
|
}
|
||||||
if let Some((opts, orphan)) = popped {
|
if let Some((opts, orphan)) = popped {
|
||||||
self.process_block(orphan, opts);
|
let _process_result = self.process_block(orphan, opts);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
|
|
||||||
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
//! Implementation of the chain block acceptance (or refusal) pipeline.
|
||||||
|
|
||||||
use std::convert::From;
|
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
|
||||||
use secp;
|
use secp;
|
||||||
|
@ -22,22 +21,24 @@ use time;
|
||||||
|
|
||||||
use core::consensus;
|
use core::consensus;
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::target::Difficulty;
|
use core::core::{BlockHeader, Block};
|
||||||
use core::core::{BlockHeader, Block, Proof};
|
|
||||||
use core::pow;
|
use core::pow;
|
||||||
use core::ser;
|
|
||||||
use types::*;
|
use types::*;
|
||||||
use store;
|
use store;
|
||||||
use core::global;
|
use core::global;
|
||||||
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
|
|
||||||
|
|
||||||
/// Contextual information required to process a new block and either reject or
|
/// Contextual information required to process a new block and either reject or
|
||||||
/// accept it.
|
/// accept it.
|
||||||
pub struct BlockContext {
|
pub struct BlockContext {
|
||||||
|
/// The options
|
||||||
pub opts: Options,
|
pub opts: Options,
|
||||||
|
/// The store
|
||||||
pub store: Arc<ChainStore>,
|
pub store: Arc<ChainStore>,
|
||||||
|
/// The adapter
|
||||||
pub adapter: Arc<ChainAdapter>,
|
pub adapter: Arc<ChainAdapter>,
|
||||||
|
/// The head
|
||||||
pub head: Tip,
|
pub head: Tip,
|
||||||
|
/// The lock
|
||||||
pub lock: Arc<Mutex<bool>>,
|
pub lock: Arc<Mutex<bool>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,11 +69,12 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
|
||||||
b.hash()
|
b.hash()
|
||||||
);
|
);
|
||||||
|
|
||||||
ctx.lock.lock();
|
let _ = ctx.lock.lock().unwrap();
|
||||||
add_block(b, &mut ctx)?;
|
add_block(b, &mut ctx)?;
|
||||||
update_head(b, &mut ctx)
|
update_head(b, &mut ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Process the block header
|
||||||
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
|
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
|
||||||
|
|
||||||
info!(
|
info!(
|
||||||
|
@ -84,7 +86,7 @@ pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<O
|
||||||
validate_header(&bh, &mut ctx)?;
|
validate_header(&bh, &mut ctx)?;
|
||||||
add_block_header(bh, &mut ctx)?;
|
add_block_header(bh, &mut ctx)?;
|
||||||
|
|
||||||
ctx.lock.lock();
|
let _ = ctx.lock.lock().unwrap();
|
||||||
update_header_head(bh, &mut ctx)
|
update_header_head(bh, &mut ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,11 +151,10 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
||||||
return Err(Error::DifficultyTooLow);
|
return Err(Error::DifficultyTooLow);
|
||||||
}
|
}
|
||||||
|
|
||||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
|
||||||
let cycle_size = if ctx.opts.intersects(EASY_POW) {
|
let cycle_size = if ctx.opts.intersects(EASY_POW) {
|
||||||
global::sizeshift()
|
global::sizeshift()
|
||||||
} else {
|
} else {
|
||||||
consensus::DEFAULT_SIZESHIFT
|
consensus::DEFAULT_SIZESHIFT
|
||||||
};
|
};
|
||||||
debug!("Validating block with cuckoo size {}", cycle_size);
|
debug!("Validating block with cuckoo size {}", cycle_size);
|
||||||
if !pow::verify_size(header, cycle_size as u32) {
|
if !pow::verify_size(header, cycle_size as u32) {
|
||||||
|
|
|
@ -41,6 +41,7 @@ pub struct ChainKVStore {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ChainKVStore {
|
impl ChainKVStore {
|
||||||
|
/// Create new chain store
|
||||||
pub fn new(root_path: String) -> Result<ChainKVStore, Error> {
|
pub fn new(root_path: String) -> Result<ChainKVStore, Error> {
|
||||||
let db = grin_store::Store::open(format!("{}/{}", root_path, STORE_SUBPATH).as_str())?;
|
let db = grin_store::Store::open(format!("{}/{}", root_path, STORE_SUBPATH).as_str())?;
|
||||||
Ok(ChainKVStore { db: db })
|
Ok(ChainKVStore { db: db })
|
||||||
|
@ -152,7 +153,7 @@ impl ChainStore for ChainKVStore {
|
||||||
self.db.put_ser(
|
self.db.put_ser(
|
||||||
&u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height),
|
&u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height),
|
||||||
&real_prev,
|
&real_prev,
|
||||||
);
|
).unwrap();
|
||||||
prev_h = real_prev.previous;
|
prev_h = real_prev.previous;
|
||||||
prev_height = real_prev.height - 1;
|
prev_height = real_prev.height - 1;
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -27,6 +27,7 @@ use grin_store;
|
||||||
bitflags! {
|
bitflags! {
|
||||||
/// Options for block validation
|
/// Options for block validation
|
||||||
pub flags Options: u32 {
|
pub flags Options: u32 {
|
||||||
|
/// None flag
|
||||||
const NONE = 0b00000001,
|
const NONE = 0b00000001,
|
||||||
/// Runs without checking the Proof of Work, mostly to make testing easier.
|
/// Runs without checking the Proof of Work, mostly to make testing easier.
|
||||||
const SKIP_POW = 0b00000010,
|
const SKIP_POW = 0b00000010,
|
||||||
|
@ -37,6 +38,7 @@ bitflags! {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Errors
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
/// The block doesn't fit anywhere in our chain
|
/// The block doesn't fit anywhere in our chain
|
||||||
|
@ -202,5 +204,5 @@ pub trait ChainAdapter {
|
||||||
/// Dummy adapter used as a placeholder for real implementations
|
/// Dummy adapter used as a placeholder for real implementations
|
||||||
pub struct NoopAdapter {}
|
pub struct NoopAdapter {}
|
||||||
impl ChainAdapter for NoopAdapter {
|
impl ChainAdapter for NoopAdapter {
|
||||||
fn block_accepted(&self, b: &Block) {}
|
fn block_accepted(&self, _: &Block) {}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ use std::thread;
|
||||||
use rand::os::OsRng;
|
use rand::os::OsRng;
|
||||||
|
|
||||||
use grin_chain::types::*;
|
use grin_chain::types::*;
|
||||||
use grin_chain::store;
|
|
||||||
use grin_core::core::hash::Hashed;
|
use grin_core::core::hash::Hashed;
|
||||||
use grin_core::core::target::Difficulty;
|
use grin_core::core::target::Difficulty;
|
||||||
use grin_core::pow;
|
use grin_core::pow;
|
||||||
|
@ -34,15 +33,13 @@ use grin_core::core;
|
||||||
use grin_core::consensus;
|
use grin_core::consensus;
|
||||||
use grin_core::pow::cuckoo;
|
use grin_core::pow::cuckoo;
|
||||||
use grin_core::global;
|
use grin_core::global;
|
||||||
use grin_core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
|
use grin_core::global::MiningParameterMode;
|
||||||
|
|
||||||
use grin::{ServerConfig, MinerConfig};
|
|
||||||
|
|
||||||
use grin_core::pow::MiningWorker;
|
use grin_core::pow::MiningWorker;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn mine_empty_chain() {
|
fn mine_empty_chain() {
|
||||||
env_logger::init();
|
let _ = env_logger::init();
|
||||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||||
let mut rng = OsRng::new().unwrap();
|
let mut rng = OsRng::new().unwrap();
|
||||||
let chain = grin_chain::Chain::init(".grin".to_string(), Arc::new(NoopAdapter {}))
|
let chain = grin_chain::Chain::init(".grin".to_string(), Arc::new(NoopAdapter {}))
|
||||||
|
@ -52,7 +49,6 @@ fn mine_empty_chain() {
|
||||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||||
|
|
||||||
let server_config = ServerConfig::default();
|
|
||||||
let mut miner_config = grin::MinerConfig {
|
let mut miner_config = grin::MinerConfig {
|
||||||
enable_mining: true,
|
enable_mining: true,
|
||||||
burn_reward: true,
|
burn_reward: true,
|
||||||
|
@ -60,7 +56,7 @@ fn mine_empty_chain() {
|
||||||
};
|
};
|
||||||
miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps"));
|
miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps"));
|
||||||
|
|
||||||
let mut cuckoo_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize());
|
let mut cuckoo_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize());
|
||||||
for n in 1..4 {
|
for n in 1..4 {
|
||||||
let prev = chain.head_header().unwrap();
|
let prev = chain.head_header().unwrap();
|
||||||
let mut b = core::Block::new(&prev, vec![], reward_key).unwrap();
|
let mut b = core::Block::new(&prev, vec![], reward_key).unwrap();
|
||||||
|
@ -88,7 +84,7 @@ fn mine_empty_chain() {
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn mine_forks() {
|
fn mine_forks() {
|
||||||
env_logger::init();
|
let _ = env_logger::init();
|
||||||
let mut rng = OsRng::new().unwrap();
|
let mut rng = OsRng::new().unwrap();
|
||||||
let chain = grin_chain::Chain::init(".grin2".to_string(), Arc::new(NoopAdapter {}))
|
let chain = grin_chain::Chain::init(".grin2".to_string(), Arc::new(NoopAdapter {}))
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
|
@ -22,7 +22,6 @@ use std::fs::File;
|
||||||
use toml;
|
use toml;
|
||||||
use grin::{ServerConfig,
|
use grin::{ServerConfig,
|
||||||
MinerConfig};
|
MinerConfig};
|
||||||
use wallet::WalletConfig;
|
|
||||||
|
|
||||||
use types::{ConfigMembers,
|
use types::{ConfigMembers,
|
||||||
GlobalConfig,
|
GlobalConfig,
|
||||||
|
@ -87,10 +86,10 @@ impl GlobalConfig {
|
||||||
return Ok(())
|
return Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Give up
|
// Give up
|
||||||
Err(ConfigError::FileNotFoundError(String::from("")))
|
Err(ConfigError::FileNotFoundError(String::from("")))
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Takes the path to a config file, or if NONE, tries
|
/// Takes the path to a config file, or if NONE, tries
|
||||||
|
@ -102,7 +101,7 @@ impl GlobalConfig {
|
||||||
if let Some(fp) = file_path {
|
if let Some(fp) = file_path {
|
||||||
return_value.config_file_path = Some(PathBuf::from(&fp));
|
return_value.config_file_path = Some(PathBuf::from(&fp));
|
||||||
} else {
|
} else {
|
||||||
return_value.derive_config_location();
|
return_value.derive_config_location().unwrap();
|
||||||
}
|
}
|
||||||
|
|
||||||
//No attempt at a config file, just return defaults
|
//No attempt at a config file, just return defaults
|
||||||
|
@ -124,6 +123,7 @@ impl GlobalConfig {
|
||||||
return_value.read_config()
|
return_value.read_config()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Read config
|
||||||
pub fn read_config(mut self) -> Result<GlobalConfig, ConfigError> {
|
pub fn read_config(mut self) -> Result<GlobalConfig, ConfigError> {
|
||||||
let mut file = File::open(self.config_file_path.as_mut().unwrap())?;
|
let mut file = File::open(self.config_file_path.as_mut().unwrap())?;
|
||||||
let mut contents = String::new();
|
let mut contents = String::new();
|
||||||
|
@ -149,6 +149,7 @@ impl GlobalConfig {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Serialize config
|
||||||
pub fn ser_config(&mut self) -> Result<String, ConfigError> {
|
pub fn ser_config(&mut self) -> Result<String, ConfigError> {
|
||||||
let encoded:Result<String, toml::ser::Error> = toml::to_string(self.members.as_mut().unwrap());
|
let encoded:Result<String, toml::ser::Error> = toml::to_string(self.members.as_mut().unwrap());
|
||||||
match encoded {
|
match encoded {
|
||||||
|
@ -169,6 +170,7 @@ impl GlobalConfig {
|
||||||
return self.members.as_mut().unwrap().wallet.as_mut().unwrap().enable_wallet;
|
return self.members.as_mut().unwrap().wallet.as_mut().unwrap().enable_wallet;
|
||||||
}*/
|
}*/
|
||||||
|
|
||||||
|
/// Enable mining
|
||||||
pub fn mining_enabled(&mut self) -> bool {
|
pub fn mining_enabled(&mut self) -> bool {
|
||||||
return self.members.as_mut().unwrap().mining.as_mut().unwrap().enable_mining;
|
return self.members.as_mut().unwrap().mining.as_mut().unwrap().enable_mining;
|
||||||
}
|
}
|
||||||
|
@ -186,11 +188,11 @@ fn test_read_config() {
|
||||||
test_mode = false
|
test_mode = false
|
||||||
#7 = FULL_NODE, not sure how to serialise this properly to use constants
|
#7 = FULL_NODE, not sure how to serialise this properly to use constants
|
||||||
capabilities = [7]
|
capabilities = [7]
|
||||||
|
|
||||||
[server.p2p_config]
|
[server.p2p_config]
|
||||||
host = "127.0.0.1"
|
host = "127.0.0.1"
|
||||||
port = 13414
|
port = 13414
|
||||||
|
|
||||||
#Mining section is optional, if it's not here it will default to not mining
|
#Mining section is optional, if it's not here it will default to not mining
|
||||||
[mining]
|
[mining]
|
||||||
enable_mining = true
|
enable_mining = true
|
||||||
|
@ -206,4 +208,4 @@ fn test_read_config() {
|
||||||
println!("Decoded.server: {:?}", decoded.server);
|
println!("Decoded.server: {:?}", decoded.server);
|
||||||
println!("Decoded wallet: {:?}", decoded.wallet);
|
println!("Decoded wallet: {:?}", decoded.wallet);
|
||||||
panic!("panic");
|
panic!("panic");
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@ use std::fmt;
|
||||||
|
|
||||||
use grin::{ServerConfig,
|
use grin::{ServerConfig,
|
||||||
MinerConfig};
|
MinerConfig};
|
||||||
use wallet::WalletConfig;
|
|
||||||
|
|
||||||
|
|
||||||
/// Error type wrapping config errors.
|
/// Error type wrapping config errors.
|
||||||
|
@ -54,7 +53,7 @@ impl fmt::Display for ConfigError {
|
||||||
ConfigError::SerializationError(ref message) => {
|
ConfigError::SerializationError(ref message) => {
|
||||||
write!(f, "Error serializing configuration: {}", message)
|
write!(f, "Error serializing configuration: {}", message)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,9 +66,9 @@ impl From<io::Error> for ConfigError {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Going to hold all of the various configuration types
|
/// Going to hold all of the various configuration types
|
||||||
/// separately for now, then put them together as a single
|
/// separately for now, then put them together as a single
|
||||||
/// ServerConfig object afterwards. This is to flatten
|
/// ServerConfig object afterwards. This is to flatten
|
||||||
/// out the configuration file into logical sections,
|
/// out the configuration file into logical sections,
|
||||||
/// as they tend to be quite nested in the code
|
/// as they tend to be quite nested in the code
|
||||||
/// Most structs optional, as they may or may not
|
/// Most structs optional, as they may or may not
|
||||||
|
@ -77,12 +76,13 @@ impl From<io::Error> for ConfigError {
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct GlobalConfig {
|
pub struct GlobalConfig {
|
||||||
//Keep track of the file we've read
|
///Keep track of the file we've read
|
||||||
pub config_file_path: Option<PathBuf>,
|
pub config_file_path: Option<PathBuf>,
|
||||||
//keep track of whether we're using
|
/// keep track of whether we're using
|
||||||
//a config file or just the defaults
|
/// a config file or just the defaults
|
||||||
//for each member
|
/// for each member
|
||||||
pub using_config_file: bool,
|
pub using_config_file: bool,
|
||||||
|
/// Global member config
|
||||||
pub members: Option<ConfigMembers>,
|
pub members: Option<ConfigMembers>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,11 +93,13 @@ pub struct GlobalConfig {
|
||||||
|
|
||||||
#[derive(Debug, Serialize, Deserialize)]
|
#[derive(Debug, Serialize, Deserialize)]
|
||||||
pub struct ConfigMembers {
|
pub struct ConfigMembers {
|
||||||
|
/// Server config
|
||||||
pub server: ServerConfig,
|
pub server: ServerConfig,
|
||||||
|
/// Mining config
|
||||||
pub mining: Option<MinerConfig>,
|
pub mining: Option<MinerConfig>,
|
||||||
//removing wallet from here for now,
|
//removing wallet from here for now,
|
||||||
//as its concerns are separate from the server's, really
|
//as its concerns are separate from the server's, really
|
||||||
//given it needs to manage keys. It should probably
|
//given it needs to manage keys. It should probably
|
||||||
//stay command line only for the time being
|
//stay command line only for the time being
|
||||||
//pub wallet: Option<WalletConfig>
|
//pub wallet: Option<WalletConfig>
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,6 @@ use core::{Input, Output, Proof, TxKernel, Transaction, COINBASE_KERNEL, COINBAS
|
||||||
use core::transaction::merkle_inputs_outputs;
|
use core::transaction::merkle_inputs_outputs;
|
||||||
use consensus::REWARD;
|
use consensus::REWARD;
|
||||||
use consensus::MINIMUM_DIFFICULTY;
|
use consensus::MINIMUM_DIFFICULTY;
|
||||||
use consensus::PROOFSIZE;
|
|
||||||
use core::hash::{Hash, Hashed, ZERO_HASH};
|
use core::hash::{Hash, Hashed, ZERO_HASH};
|
||||||
use core::target::Difficulty;
|
use core::target::Difficulty;
|
||||||
use ser::{self, Readable, Reader, Writeable, Writer};
|
use ser::{self, Readable, Reader, Writeable, Writer};
|
||||||
|
|
|
@ -83,7 +83,10 @@ pub trait Committed {
|
||||||
|
|
||||||
/// Proof of work
|
/// Proof of work
|
||||||
pub struct Proof {
|
pub struct Proof {
|
||||||
|
/// The nonces
|
||||||
pub nonces:Vec<u32>,
|
pub nonces:Vec<u32>,
|
||||||
|
|
||||||
|
/// The proof size
|
||||||
pub proof_size: usize,
|
pub proof_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -124,7 +127,7 @@ impl Clone for Proof {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Proof {
|
impl Proof {
|
||||||
|
|
||||||
/// Builds a proof with all bytes zeroed out
|
/// Builds a proof with all bytes zeroed out
|
||||||
pub fn new(in_nonces:Vec<u32>) -> Proof {
|
pub fn new(in_nonces:Vec<u32>) -> Proof {
|
||||||
Proof {
|
Proof {
|
||||||
|
|
|
@ -38,7 +38,7 @@
|
||||||
use std::clone::Clone;
|
use std::clone::Clone;
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::ops::{self, Deref};
|
use std::ops::{self};
|
||||||
|
|
||||||
use core::hash::{Hash, Hashed};
|
use core::hash::{Hash, Hashed};
|
||||||
use ser::{self, Readable, Reader, Writeable, Writer};
|
use ser::{self, Readable, Reader, Writeable, Writer};
|
||||||
|
@ -96,11 +96,14 @@ impl<T> Summable for NoSum<T> {
|
||||||
/// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum.
|
/// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum.
|
||||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||||
pub struct HashSum<T> where T: Summable {
|
pub struct HashSum<T> where T: Summable {
|
||||||
|
/// The hash
|
||||||
pub hash: Hash,
|
pub hash: Hash,
|
||||||
|
/// The sum
|
||||||
pub sum: T::Sum,
|
pub sum: T::Sum,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> HashSum<T> where T: Summable + Writeable {
|
impl<T> HashSum<T> where T: Summable + Writeable {
|
||||||
|
/// Create a hash sum from a summable
|
||||||
pub fn from_summable(idx: u64, elmt: T) -> HashSum<T> {
|
pub fn from_summable(idx: u64, elmt: T) -> HashSum<T> {
|
||||||
let hash = Hashed::hash(&elmt);
|
let hash = Hashed::hash(&elmt);
|
||||||
let sum = elmt.sum();
|
let sum = elmt.sum();
|
||||||
|
@ -156,7 +159,7 @@ pub trait Backend<T> where T: Summable {
|
||||||
/// Heavily relies on navigation operations within a binary tree. In particular,
|
/// Heavily relies on navigation operations within a binary tree. In particular,
|
||||||
/// all the implementation needs to keep track of the MMR structure is how far
|
/// all the implementation needs to keep track of the MMR structure is how far
|
||||||
/// we are in the sequence of nodes making up the MMR.
|
/// we are in the sequence of nodes making up the MMR.
|
||||||
struct PMMR<T, B> where T: Summable, B: Backend<T> {
|
pub struct PMMR<T, B> where T: Summable, B: Backend<T> {
|
||||||
last_pos: u64,
|
last_pos: u64,
|
||||||
backend: B,
|
backend: B,
|
||||||
// only needed for parameterizing Backend
|
// only needed for parameterizing Backend
|
||||||
|
@ -179,7 +182,7 @@ impl<T, B> PMMR<T, B> where T: Summable + Writeable + Debug + Clone, B: Backend<
|
||||||
pub fn root(&self) -> HashSum<T> {
|
pub fn root(&self) -> HashSum<T> {
|
||||||
let peaks_pos = peaks(self.last_pos);
|
let peaks_pos = peaks(self.last_pos);
|
||||||
let peaks: Vec<Option<HashSum<T>>> = map_vec!(peaks_pos, |&pi| self.backend.get(pi));
|
let peaks: Vec<Option<HashSum<T>>> = map_vec!(peaks_pos, |&pi| self.backend.get(pi));
|
||||||
|
|
||||||
let mut ret = None;
|
let mut ret = None;
|
||||||
for peak in peaks {
|
for peak in peaks {
|
||||||
ret = match (ret, peak) {
|
ret = match (ret, peak) {
|
||||||
|
@ -199,7 +202,7 @@ impl<T, B> PMMR<T, B> where T: Summable + Writeable + Debug + Clone, B: Backend<
|
||||||
let mut to_append = vec![current_hashsum.clone()];
|
let mut to_append = vec![current_hashsum.clone()];
|
||||||
let mut height = 0;
|
let mut height = 0;
|
||||||
let mut pos = elmt_pos;
|
let mut pos = elmt_pos;
|
||||||
|
|
||||||
// we look ahead one position in the MMR, if the expected node has a higher
|
// we look ahead one position in the MMR, if the expected node has a higher
|
||||||
// height it means we have to build a higher peak by summing with a previous
|
// height it means we have to build a higher peak by summing with a previous
|
||||||
// sibling. we do it iteratively in case the new peak itself allows the
|
// sibling. we do it iteratively in case the new peak itself allows the
|
||||||
|
@ -231,13 +234,12 @@ impl<T, B> PMMR<T, B> where T: Summable + Writeable + Debug + Clone, B: Backend<
|
||||||
// only leaves can be pruned
|
// only leaves can be pruned
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// loop going up the tree, from node to parent, as long as we stay inside
|
// loop going up the tree, from node to parent, as long as we stay inside
|
||||||
// the tree.
|
// the tree.
|
||||||
let mut to_prune = vec![];
|
let mut to_prune = vec![];
|
||||||
let mut current = position;
|
let mut current = position;
|
||||||
while current+1 < self.last_pos {
|
while current+1 < self.last_pos {
|
||||||
let current_height = bintree_postorder_height(current);
|
|
||||||
let next_height = bintree_postorder_height(current+1);
|
let next_height = bintree_postorder_height(current+1);
|
||||||
|
|
||||||
// compare the node's height to the next height, if the next is higher
|
// compare the node's height to the next height, if the next is higher
|
||||||
|
@ -257,7 +259,7 @@ impl<T, B> PMMR<T, B> where T: Summable + Writeable + Debug + Clone, B: Backend<
|
||||||
// can't prune when our parent isn't here yet
|
// can't prune when our parent isn't here yet
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
to_prune.push(current);
|
to_prune.push(current);
|
||||||
|
|
||||||
// if we have a pruned sibling, we can continue up the tree
|
// if we have a pruned sibling, we can continue up the tree
|
||||||
// otherwise we're done
|
// otherwise we're done
|
||||||
|
@ -289,7 +291,7 @@ fn peaks(num: u64) -> Vec<u64> {
|
||||||
if bintree_postorder_height(num+1) > bintree_postorder_height(num) {
|
if bintree_postorder_height(num+1) > bintree_postorder_height(num) {
|
||||||
return vec![];
|
return vec![];
|
||||||
}
|
}
|
||||||
|
|
||||||
// our top peak is always on the leftmost side of the tree and leftmost trees
|
// our top peak is always on the leftmost side of the tree and leftmost trees
|
||||||
// have for index a binary values with all 1s (i.e. 11, 111, 1111, etc.)
|
// have for index a binary values with all 1s (i.e. 11, 111, 1111, etc.)
|
||||||
let mut top = 1;
|
let mut top = 1;
|
||||||
|
@ -454,8 +456,10 @@ fn most_significant_pos(num: u64) -> u64 {
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
use core::hash::{Hash, Hashed};
|
use core::hash::Hashed;
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn some_all_ones() {
|
fn some_all_ones() {
|
||||||
|
@ -687,7 +691,7 @@ mod test {
|
||||||
pmmr.prune(1);
|
pmmr.prune(1);
|
||||||
assert_eq!(orig_root, pmmr.root());
|
assert_eq!(orig_root, pmmr.root());
|
||||||
assert_eq!(ba.used_size(), orig_sz - 7);
|
assert_eq!(ba.used_size(), orig_sz - 7);
|
||||||
|
|
||||||
// pruning everything should only leave us the peaks
|
// pruning everything should only leave us the peaks
|
||||||
for n in 1..16 {
|
for n in 1..16 {
|
||||||
pmmr.prune(n);
|
pmmr.prune(n);
|
||||||
|
|
|
@ -698,6 +698,7 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
|
#[allow(missing_docs)]
|
||||||
pub fn print_tree<T>(tree: &SumTree<T>)
|
pub fn print_tree<T>(tree: &SumTree<T>)
|
||||||
where
|
where
|
||||||
T: Summable + Writeable,
|
T: Summable + Writeable,
|
||||||
|
|
|
@ -21,11 +21,9 @@
|
||||||
|
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
use std::ops::{Add, Mul, Div, Sub};
|
use std::ops::{Add, Mul, Div, Sub};
|
||||||
use std::io::Cursor;
|
|
||||||
use std::u64::MAX;
|
|
||||||
|
|
||||||
use serde::{Serialize, Serializer, Deserialize, Deserializer, de};
|
use serde::{Serialize, Serializer, Deserialize, Deserializer, de};
|
||||||
use byteorder::{ByteOrder, ReadBytesExt, BigEndian};
|
use byteorder::{ByteOrder, BigEndian};
|
||||||
|
|
||||||
use core::hash::Hash;
|
use core::hash::Hash;
|
||||||
use ser::{self, Reader, Writer, Writeable, Readable};
|
use ser::{self, Reader, Writer, Writeable, Readable};
|
||||||
|
@ -150,7 +148,7 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
|
||||||
where E: de::Error
|
where E: de::Error
|
||||||
{
|
{
|
||||||
let num_in = s.parse::<u64>();
|
let num_in = s.parse::<u64>();
|
||||||
if let Err(e)=num_in {
|
if let Err(_)=num_in {
|
||||||
return Err(de::Error::invalid_value(de::Unexpected::Str(s), &"a value number"));
|
return Err(de::Error::invalid_value(de::Unexpected::Str(s), &"a value number"));
|
||||||
};
|
};
|
||||||
Ok(Difficulty { num: num_in.unwrap() })
|
Ok(Difficulty { num: num_in.unwrap() })
|
||||||
|
|
|
@ -18,7 +18,6 @@ use time;
|
||||||
|
|
||||||
use core;
|
use core;
|
||||||
use consensus::MINIMUM_DIFFICULTY;
|
use consensus::MINIMUM_DIFFICULTY;
|
||||||
use consensus::PROOFSIZE;
|
|
||||||
use core::hash::Hashed;
|
use core::hash::Hashed;
|
||||||
use core::target::Difficulty;
|
use core::target::Difficulty;
|
||||||
use global;
|
use global;
|
||||||
|
|
|
@ -28,14 +28,19 @@ use consensus::DEFAULT_SIZESHIFT;
|
||||||
/// Define these here, as they should be developer-set, not really tweakable
|
/// Define these here, as they should be developer-set, not really tweakable
|
||||||
/// by users
|
/// by users
|
||||||
|
|
||||||
|
/// Automated testing sizeshift
|
||||||
pub const AUTOMATED_TESTING_SIZESHIFT:u8 = 10;
|
pub const AUTOMATED_TESTING_SIZESHIFT:u8 = 10;
|
||||||
|
|
||||||
|
/// Automated testing proof size
|
||||||
pub const AUTOMATED_TESTING_PROOF_SIZE:usize = 4;
|
pub const AUTOMATED_TESTING_PROOF_SIZE:usize = 4;
|
||||||
|
|
||||||
|
/// User testing sizeshift
|
||||||
pub const USER_TESTING_SIZESHIFT:u8 = 16;
|
pub const USER_TESTING_SIZESHIFT:u8 = 16;
|
||||||
|
|
||||||
|
/// User testing proof size
|
||||||
pub const USER_TESTING_PROOF_SIZE:usize = 42;
|
pub const USER_TESTING_PROOF_SIZE:usize = 42;
|
||||||
|
|
||||||
|
/// Mining parameter modes
|
||||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||||
pub enum MiningParameterMode {
|
pub enum MiningParameterMode {
|
||||||
/// For CI testing
|
/// For CI testing
|
||||||
|
@ -49,14 +54,17 @@ pub enum MiningParameterMode {
|
||||||
}
|
}
|
||||||
|
|
||||||
lazy_static!{
|
lazy_static!{
|
||||||
|
/// The mining parameter mode
|
||||||
pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> = RwLock::new(MiningParameterMode::Production);
|
pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> = RwLock::new(MiningParameterMode::Production);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Set the mining mode
|
||||||
pub fn set_mining_mode(mode:MiningParameterMode){
|
pub fn set_mining_mode(mode:MiningParameterMode){
|
||||||
let mut param_ref=MINING_PARAMETER_MODE.write().unwrap();
|
let mut param_ref=MINING_PARAMETER_MODE.write().unwrap();
|
||||||
*param_ref=mode;
|
*param_ref=mode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The sizeshift
|
||||||
pub fn sizeshift() -> u8 {
|
pub fn sizeshift() -> u8 {
|
||||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
|
@ -66,6 +74,7 @@ pub fn sizeshift() -> u8 {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The proofsize
|
||||||
pub fn proofsize() -> usize {
|
pub fn proofsize() -> usize {
|
||||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||||
match *param_ref {
|
match *param_ref {
|
||||||
|
@ -75,6 +84,7 @@ pub fn proofsize() -> usize {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Are we in automated testing mode?
|
||||||
pub fn is_automated_testing_mode() -> bool {
|
pub fn is_automated_testing_mode() -> bool {
|
||||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||||
if let MiningParameterMode::AutomatedTesting=*param_ref {
|
if let MiningParameterMode::AutomatedTesting=*param_ref {
|
||||||
|
@ -83,4 +93,3 @@ pub fn is_automated_testing_mode() -> bool {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -39,6 +39,7 @@ macro_rules! try_map_vec {
|
||||||
|
|
||||||
/// Eliminates some of the verbosity in having iter and collect
|
/// Eliminates some of the verbosity in having iter and collect
|
||||||
/// around every fitler_map call.
|
/// around every fitler_map call.
|
||||||
|
#[macro_export]
|
||||||
macro_rules! filter_map_vec {
|
macro_rules! filter_map_vec {
|
||||||
($thing:expr, $mapfn:expr ) => {
|
($thing:expr, $mapfn:expr ) => {
|
||||||
$thing.iter()
|
$thing.iter()
|
||||||
|
@ -52,6 +53,7 @@ macro_rules! filter_map_vec {
|
||||||
/// Example:
|
/// Example:
|
||||||
/// let foo = vec![1,2,3]
|
/// let foo = vec![1,2,3]
|
||||||
/// println!(tee!(foo, foo.append(vec![3,4,5]))
|
/// println!(tee!(foo, foo.append(vec![3,4,5]))
|
||||||
|
#[macro_export]
|
||||||
macro_rules! tee {
|
macro_rules! tee {
|
||||||
($thing:ident, $thing_expr:expr) => {
|
($thing:ident, $thing_expr:expr) => {
|
||||||
{
|
{
|
||||||
|
|
|
@ -31,8 +31,6 @@ use consensus::EASINESS;
|
||||||
use core::BlockHeader;
|
use core::BlockHeader;
|
||||||
use core::hash::Hashed;
|
use core::hash::Hashed;
|
||||||
use core::Proof;
|
use core::Proof;
|
||||||
use global;
|
|
||||||
use global::{MiningParameterMode, MINING_PARAMETER_MODE};
|
|
||||||
use core::target::Difficulty;
|
use core::target::Difficulty;
|
||||||
use pow::cuckoo::{Cuckoo, Error};
|
use pow::cuckoo::{Cuckoo, Error};
|
||||||
|
|
||||||
|
@ -41,10 +39,10 @@ use pow::cuckoo::{Cuckoo, Error};
|
||||||
///
|
///
|
||||||
|
|
||||||
pub trait MiningWorker {
|
pub trait MiningWorker {
|
||||||
|
|
||||||
/// This only sets parameters and does initialisation work now
|
/// This only sets parameters and does initialisation work now
|
||||||
fn new(ease: u32, sizeshift: u32, proof_size:usize) -> Self;
|
fn new(ease: u32, sizeshift: u32, proof_size:usize) -> Self;
|
||||||
|
|
||||||
/// Actually perform a mining attempt on the given input and
|
/// Actually perform a mining attempt on the given input and
|
||||||
/// return a proof if found
|
/// return a proof if found
|
||||||
fn mine(&mut self, header: &[u8]) -> Result<Proof, Error>;
|
fn mine(&mut self, header: &[u8]) -> Result<Proof, Error>;
|
||||||
|
@ -70,8 +68,8 @@ pub fn pow20<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader, diff: Difficul
|
||||||
|
|
||||||
/// Runs a proof of work computation over the provided block using the provided Mining Worker,
|
/// Runs a proof of work computation over the provided block using the provided Mining Worker,
|
||||||
/// until the required difficulty target is reached. May take a while for a low target...
|
/// until the required difficulty target is reached. May take a while for a low target...
|
||||||
pub fn pow_size<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader,
|
pub fn pow_size<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader,
|
||||||
diff: Difficulty, sizeshift: u32) -> Result<(), Error> {
|
diff: Difficulty, _: u32) -> Result<(), Error> {
|
||||||
let start_nonce = bh.nonce;
|
let start_nonce = bh.nonce;
|
||||||
|
|
||||||
// try to find a cuckoo cycle on that header hash
|
// try to find a cuckoo cycle on that header hash
|
||||||
|
@ -104,9 +102,12 @@ pub fn pow_size<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader,
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod test {
|
mod test {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use global;
|
||||||
use core::target::Difficulty;
|
use core::target::Difficulty;
|
||||||
use genesis;
|
use genesis;
|
||||||
use consensus::MINIMUM_DIFFICULTY;
|
use consensus::MINIMUM_DIFFICULTY;
|
||||||
|
use global::MiningParameterMode;
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn genesis_pow() {
|
fn genesis_pow() {
|
||||||
|
|
|
@ -388,6 +388,7 @@ impl Writeable for [u8; 4] {
|
||||||
|
|
||||||
/// Useful marker trait on types that can be sized byte slices
|
/// Useful marker trait on types that can be sized byte slices
|
||||||
pub trait AsFixedBytes: Sized + AsRef<[u8]> {
|
pub trait AsFixedBytes: Sized + AsRef<[u8]> {
|
||||||
|
/// The length in bytes
|
||||||
fn len(&self) -> usize;
|
fn len(&self) -> usize;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -13,21 +13,19 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::ops::Deref;
|
use std::sync::{Arc, RwLock};
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
|
||||||
use chain::{self, ChainAdapter};
|
use chain::{self, ChainAdapter};
|
||||||
use core::core::{self, Output};
|
use core::core::{self, Output};
|
||||||
use core::core::hash::{Hash, Hashed};
|
use core::core::hash::{Hash, Hashed};
|
||||||
use core::core::target::Difficulty;
|
use core::core::target::Difficulty;
|
||||||
use p2p::{self, NetAdapter, Server, PeerStore, PeerData, Capabilities, State};
|
use p2p::{self, NetAdapter, Server, PeerStore, PeerData, State};
|
||||||
use pool;
|
use pool;
|
||||||
use secp::pedersen::Commitment;
|
use secp::pedersen::Commitment;
|
||||||
use util::OneTime;
|
use util::OneTime;
|
||||||
use store;
|
use store;
|
||||||
use sync;
|
use sync;
|
||||||
use core::global;
|
|
||||||
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
|
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
|
||||||
|
|
||||||
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
|
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
|
||||||
|
@ -210,9 +208,17 @@ impl NetToChainAdapter {
|
||||||
pub fn start_sync(&self, sync: sync::Syncer) {
|
pub fn start_sync(&self, sync: sync::Syncer) {
|
||||||
let arc_sync = Arc::new(sync);
|
let arc_sync = Arc::new(sync);
|
||||||
self.syncer.init(arc_sync.clone());
|
self.syncer.init(arc_sync.clone());
|
||||||
thread::Builder::new().name("syncer".to_string()).spawn(move || {
|
let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(move || {
|
||||||
arc_sync.run();
|
let sync_run_result = arc_sync.run();
|
||||||
|
match sync_run_result {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
match spawn_result {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Prepare options for the chain pipeline
|
/// Prepare options for the chain pipeline
|
||||||
|
|
|
@ -65,7 +65,7 @@ pub struct HeaderPartWriter {
|
||||||
|
|
||||||
impl Default for HeaderPartWriter {
|
impl Default for HeaderPartWriter {
|
||||||
fn default() -> HeaderPartWriter {
|
fn default() -> HeaderPartWriter {
|
||||||
HeaderPartWriter {
|
HeaderPartWriter {
|
||||||
bytes_written: 0,
|
bytes_written: 0,
|
||||||
writing_pre: true,
|
writing_pre: true,
|
||||||
pre_nonce: Vec::new(),
|
pre_nonce: Vec::new(),
|
||||||
|
@ -91,13 +91,13 @@ impl ser::Writer for HeaderPartWriter {
|
||||||
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> {
|
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> {
|
||||||
if self.writing_pre {
|
if self.writing_pre {
|
||||||
for i in 0..bytes_in.len() {self.pre_nonce.push(bytes_in.as_ref()[i])};
|
for i in 0..bytes_in.len() {self.pre_nonce.push(bytes_in.as_ref()[i])};
|
||||||
|
|
||||||
} else if self.bytes_written!=0 {
|
} else if self.bytes_written!=0 {
|
||||||
for i in 0..bytes_in.len() {self.post_nonce.push(bytes_in.as_ref()[i])};
|
for i in 0..bytes_in.len() {self.post_nonce.push(bytes_in.as_ref()[i])};
|
||||||
}
|
}
|
||||||
|
|
||||||
self.bytes_written+=bytes_in.len();
|
self.bytes_written+=bytes_in.len();
|
||||||
|
|
||||||
if self.bytes_written==PRE_NONCE_SIZE && self.writing_pre {
|
if self.bytes_written==PRE_NONCE_SIZE && self.writing_pre {
|
||||||
self.writing_pre=false;
|
self.writing_pre=false;
|
||||||
self.bytes_written=0;
|
self.bytes_written=0;
|
||||||
|
@ -140,20 +140,20 @@ impl Miner {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Inner part of the mining loop for cuckoo-miner asynch mode
|
/// Inner part of the mining loop for cuckoo-miner asynch mode
|
||||||
pub fn inner_loop_async(&self, plugin_miner:&mut PluginMiner,
|
pub fn inner_loop_async(&self, plugin_miner:&mut PluginMiner,
|
||||||
difficulty:Difficulty,
|
difficulty:Difficulty,
|
||||||
b:&mut Block,
|
b:&mut Block,
|
||||||
cuckoo_size: u32,
|
cuckoo_size: u32,
|
||||||
head:&BlockHeader,
|
head:&BlockHeader,
|
||||||
latest_hash:&Hash)
|
latest_hash:&Hash)
|
||||||
-> Option<Proof> {
|
-> Option<Proof> {
|
||||||
|
|
||||||
debug!("(Server ID: {}) Mining at Cuckoo{} for at most 2 secs at height {} and difficulty {}.",
|
debug!("(Server ID: {}) Mining at Cuckoo{} for at most 2 secs at height {} and difficulty {}.",
|
||||||
self.debug_output_id,
|
self.debug_output_id,
|
||||||
cuckoo_size,
|
cuckoo_size,
|
||||||
b.header.height,
|
b.header.height,
|
||||||
b.header.difficulty);
|
b.header.difficulty);
|
||||||
|
|
||||||
// look for a pow for at most 2 sec on the same block (to give a chance to new
|
// look for a pow for at most 2 sec on the same block (to give a chance to new
|
||||||
// transactions) and as long as the head hasn't changed
|
// transactions) and as long as the head hasn't changed
|
||||||
// Will change this to something else at some point
|
// Will change this to something else at some point
|
||||||
|
@ -189,12 +189,12 @@ impl Miner {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The inner part of mining loop for synchronous mode
|
/// The inner part of mining loop for synchronous mode
|
||||||
pub fn inner_loop_sync<T: MiningWorker>(&self,
|
pub fn inner_loop_sync<T: MiningWorker>(&self,
|
||||||
miner:&mut T,
|
miner:&mut T,
|
||||||
b:&mut Block,
|
b:&mut Block,
|
||||||
cuckoo_size: u32,
|
cuckoo_size: u32,
|
||||||
head:&BlockHeader,
|
head:&BlockHeader,
|
||||||
latest_hash:&mut Hash)
|
latest_hash:&mut Hash)
|
||||||
-> Option<Proof> {
|
-> Option<Proof> {
|
||||||
// look for a pow for at most 2 sec on the same block (to give a chance to new
|
// look for a pow for at most 2 sec on the same block (to give a chance to new
|
||||||
// transactions) and as long as the head hasn't changed
|
// transactions) and as long as the head hasn't changed
|
||||||
|
@ -206,7 +206,7 @@ impl Miner {
|
||||||
latest_hash,
|
latest_hash,
|
||||||
b.header.difficulty);
|
b.header.difficulty);
|
||||||
let mut iter_count = 0;
|
let mut iter_count = 0;
|
||||||
|
|
||||||
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
|
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
|
||||||
debug!("(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
|
debug!("(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
|
||||||
self.debug_output_id,
|
self.debug_output_id,
|
||||||
|
@ -215,7 +215,7 @@ impl Miner {
|
||||||
|
|
||||||
let mut sol=None;
|
let mut sol=None;
|
||||||
while head.hash() == *latest_hash && time::get_time().sec < deadline {
|
while head.hash() == *latest_hash && time::get_time().sec < deadline {
|
||||||
|
|
||||||
let pow_hash = b.hash();
|
let pow_hash = b.hash();
|
||||||
if let Ok(proof) = miner.mine(&pow_hash[..]) {
|
if let Ok(proof) = miner.mine(&pow_hash[..]) {
|
||||||
let proof_diff=proof.clone().to_difficulty();
|
let proof_diff=proof.clone().to_difficulty();
|
||||||
|
@ -250,9 +250,9 @@ impl Miner {
|
||||||
|
|
||||||
/// Starts the mining loop, building a new block on top of the existing
|
/// Starts the mining loop, building a new block on top of the existing
|
||||||
/// chain anytime required and looking for PoW solution.
|
/// chain anytime required and looking for PoW solution.
|
||||||
pub fn run_loop(&self,
|
pub fn run_loop(&self,
|
||||||
miner_config:MinerConfig,
|
miner_config:MinerConfig,
|
||||||
server_config:ServerConfig,
|
server_config:ServerConfig,
|
||||||
cuckoo_size:u32,
|
cuckoo_size:u32,
|
||||||
proof_size:usize) {
|
proof_size:usize) {
|
||||||
|
|
||||||
|
@ -283,28 +283,28 @@ impl Miner {
|
||||||
}
|
}
|
||||||
if let Some(mut p) = plugin_miner.as_mut() {
|
if let Some(mut p) = plugin_miner.as_mut() {
|
||||||
if use_async {
|
if use_async {
|
||||||
sol = self.inner_loop_async(&mut p,
|
sol = self.inner_loop_async(&mut p,
|
||||||
b.header.difficulty.clone(),
|
b.header.difficulty.clone(),
|
||||||
&mut b,
|
&mut b,
|
||||||
cuckoo_size,
|
cuckoo_size,
|
||||||
&head,
|
&head,
|
||||||
&latest_hash);
|
&latest_hash);
|
||||||
} else {
|
} else {
|
||||||
sol = self.inner_loop_sync(p,
|
sol = self.inner_loop_sync(p,
|
||||||
&mut b,
|
&mut b,
|
||||||
cuckoo_size,
|
cuckoo_size,
|
||||||
&head,
|
&head,
|
||||||
&mut latest_hash);
|
&mut latest_hash);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if let Some(mut m) = miner.as_mut() {
|
if let Some(mut m) = miner.as_mut() {
|
||||||
sol = self.inner_loop_sync(m,
|
sol = self.inner_loop_sync(m,
|
||||||
&mut b,
|
&mut b,
|
||||||
cuckoo_size,
|
cuckoo_size,
|
||||||
&head,
|
&head,
|
||||||
&mut latest_hash);
|
&mut latest_hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
// if we found a solution, push our block out
|
// if we found a solution, push our block out
|
||||||
if let Some(proof) = sol {
|
if let Some(proof) = sol {
|
||||||
info!("(Server ID: {}) Found valid proof of work, adding block {}.",
|
info!("(Server ID: {}) Found valid proof of work, adding block {}.",
|
||||||
|
@ -322,7 +322,7 @@ impl Miner {
|
||||||
} else {
|
} else {
|
||||||
coinbase = self.get_coinbase();
|
coinbase = self.get_coinbase();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,9 +22,7 @@ use std::env;
|
||||||
use core::pow::cuckoo;
|
use core::pow::cuckoo;
|
||||||
use core::pow::cuckoo::Error;
|
use core::pow::cuckoo::Error;
|
||||||
use core::pow::MiningWorker;
|
use core::pow::MiningWorker;
|
||||||
use core::consensus::DEFAULT_SIZESHIFT;
|
use core::global;
|
||||||
use core::global;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use core::core::Proof;
|
use core::core::Proof;
|
||||||
use types::{MinerConfig, ServerConfig};
|
use types::{MinerConfig, ServerConfig};
|
||||||
|
@ -35,9 +33,7 @@ use cuckoo_miner::{
|
||||||
CuckooMiner,
|
CuckooMiner,
|
||||||
CuckooPluginManager,
|
CuckooPluginManager,
|
||||||
CuckooMinerConfig,
|
CuckooMinerConfig,
|
||||||
CuckooMinerError,
|
CuckooMinerSolution};
|
||||||
CuckooMinerSolution,
|
|
||||||
CuckooPluginCapabilities};
|
|
||||||
|
|
||||||
//For now, we're just going to keep a static reference around to the loaded config
|
//For now, we're just going to keep a static reference around to the loaded config
|
||||||
//And not allow querying the plugin directory twice once a plugin has been selected
|
//And not allow querying the plugin directory twice once a plugin has been selected
|
||||||
|
@ -48,7 +44,9 @@ lazy_static!{
|
||||||
static ref LOADED_CONFIG: Mutex<Option<CuckooMinerConfig>> = Mutex::new(None);
|
static ref LOADED_CONFIG: Mutex<Option<CuckooMinerConfig>> = Mutex::new(None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// plugin miner
|
||||||
pub struct PluginMiner {
|
pub struct PluginMiner {
|
||||||
|
/// the miner
|
||||||
pub miner:Option<CuckooMiner>,
|
pub miner:Option<CuckooMiner>,
|
||||||
last_solution: CuckooMinerSolution,
|
last_solution: CuckooMinerSolution,
|
||||||
config: CuckooMinerConfig,
|
config: CuckooMinerConfig,
|
||||||
|
@ -65,7 +63,8 @@ impl Default for PluginMiner {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PluginMiner {
|
impl PluginMiner {
|
||||||
pub fn init(&mut self, miner_config: MinerConfig, server_config: ServerConfig){
|
/// Init the plugin miner
|
||||||
|
pub fn init(&mut self, miner_config: MinerConfig, _server_config: ServerConfig){
|
||||||
//Get directory of executable
|
//Get directory of executable
|
||||||
let mut exe_path=env::current_exe().unwrap();
|
let mut exe_path=env::current_exe().unwrap();
|
||||||
exe_path.pop();
|
exe_path.pop();
|
||||||
|
@ -83,8 +82,8 @@ impl PluginMiner {
|
||||||
|
|
||||||
//First, load and query the plugins in the given directory
|
//First, load and query the plugins in the given directory
|
||||||
//These should all be stored in 'deps' at the moment relative
|
//These should all be stored in 'deps' at the moment relative
|
||||||
//to the executable path, though they should appear somewhere else
|
//to the executable path, though they should appear somewhere else
|
||||||
//when packaging is more//thought out
|
//when packaging is more//thought out
|
||||||
|
|
||||||
let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap();
|
let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap();
|
||||||
|
|
||||||
|
@ -100,7 +99,7 @@ impl PluginMiner {
|
||||||
let mut plugin_manager = CuckooPluginManager::new().unwrap();
|
let mut plugin_manager = CuckooPluginManager::new().unwrap();
|
||||||
let result=plugin_manager.load_plugin_dir(plugin_install_path);
|
let result=plugin_manager.load_plugin_dir(plugin_install_path);
|
||||||
|
|
||||||
if let Err(e) = result {
|
if let Err(_) = result {
|
||||||
error!("Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/deps.");
|
error!("Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/deps.");
|
||||||
panic!("Unable to load plugin directory... Please check configuration values");
|
panic!("Unable to load plugin directory... Please check configuration values");
|
||||||
}
|
}
|
||||||
|
@ -115,7 +114,7 @@ impl PluginMiner {
|
||||||
//insert it into the miner configuration being created below
|
//insert it into the miner configuration being created below
|
||||||
|
|
||||||
let mut config = CuckooMinerConfig::new();
|
let mut config = CuckooMinerConfig::new();
|
||||||
|
|
||||||
info!("Mining using plugin: {}", caps[0].full_path.clone());
|
info!("Mining using plugin: {}", caps[0].full_path.clone());
|
||||||
config.plugin_full_path = caps[0].full_path.clone();
|
config.plugin_full_path = caps[0].full_path.clone();
|
||||||
if let Some(l) = miner_config.cuckoo_miner_parameter_list {
|
if let Some(l) = miner_config.cuckoo_miner_parameter_list {
|
||||||
|
@ -134,12 +133,13 @@ impl PluginMiner {
|
||||||
panic!("Unable to init mining plugin.");
|
panic!("Unable to init mining plugin.");
|
||||||
}
|
}
|
||||||
|
|
||||||
self.config=config.clone();
|
self.config=config.clone();
|
||||||
self.miner=Some(result.unwrap());
|
self.miner=Some(result.unwrap());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the miner
|
||||||
pub fn get_consumable(&mut self)->CuckooMiner{
|
pub fn get_consumable(&mut self)->CuckooMiner{
|
||||||
|
|
||||||
//this will load the associated plugin
|
//this will load the associated plugin
|
||||||
let result=CuckooMiner::new(self.config.clone());
|
let result=CuckooMiner::new(self.config.clone());
|
||||||
if let Err(e) = result {
|
if let Err(e) = result {
|
||||||
|
@ -148,7 +148,7 @@ impl PluginMiner {
|
||||||
}
|
}
|
||||||
result.unwrap()
|
result.unwrap()
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl MiningWorker for PluginMiner {
|
impl MiningWorker for PluginMiner {
|
||||||
|
@ -158,9 +158,9 @@ impl MiningWorker for PluginMiner {
|
||||||
/// version of the miner for now, though this should become
|
/// version of the miner for now, though this should become
|
||||||
/// configurable somehow
|
/// configurable somehow
|
||||||
|
|
||||||
fn new(ease: u32,
|
fn new(_ease: u32,
|
||||||
sizeshift: u32,
|
_sizeshift: u32,
|
||||||
proof_size: usize) -> Self {
|
_proof_size: usize) -> Self {
|
||||||
PluginMiner::default()
|
PluginMiner::default()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,4 +175,3 @@ impl MiningWorker for PluginMiner {
|
||||||
Err(Error::NoSolution)
|
Err(Error::NoSolution)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -19,10 +19,8 @@
|
||||||
use rand::{thread_rng, Rng};
|
use rand::{thread_rng, Rng};
|
||||||
use std::cmp::min;
|
use std::cmp::min;
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::ops::Deref;
|
|
||||||
use std::str::{self, FromStr};
|
use std::str::{self, FromStr};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
use std::thread;
|
|
||||||
use std::time;
|
use std::time;
|
||||||
|
|
||||||
use cpupool;
|
use cpupool;
|
||||||
|
@ -93,7 +91,12 @@ impl Seeder {
|
||||||
for p in disconnected {
|
for p in disconnected {
|
||||||
if p.is_banned() {
|
if p.is_banned() {
|
||||||
debug!("Marking peer {} as banned.", p.info.addr);
|
debug!("Marking peer {} as banned.", p.info.addr);
|
||||||
peer_store.update_state(p.info.addr, p2p::State::Banned);
|
let update_result = peer_store.update_state(
|
||||||
|
p.info.addr, p2p::State::Banned);
|
||||||
|
match update_result {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,11 +243,19 @@ fn connect_and_req(capab: p2p::Capabilities,
|
||||||
.then(move |p| {
|
.then(move |p| {
|
||||||
match p {
|
match p {
|
||||||
Ok(Some(p)) => {
|
Ok(Some(p)) => {
|
||||||
p.send_peer_request(capab);
|
let peer_result = p.send_peer_request(capab);
|
||||||
|
match peer_result {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
error!("Peer request error: {:?}", e);
|
error!("Peer request error: {:?}", e);
|
||||||
peer_store.update_state(addr, p2p::State::Defunct);
|
let update_result = peer_store.update_state(addr, p2p::State::Defunct);
|
||||||
|
match update_result {
|
||||||
|
Ok(()) => {}
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,36 +17,31 @@
|
||||||
//! as a facade.
|
//! as a facade.
|
||||||
|
|
||||||
use std::net::SocketAddr;
|
use std::net::SocketAddr;
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, RwLock};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time;
|
use std::time;
|
||||||
|
|
||||||
use futures::{future, Future, Stream};
|
use futures::{Future, Stream};
|
||||||
use tokio_core::reactor;
|
use tokio_core::reactor;
|
||||||
use tokio_timer::Timer;
|
use tokio_timer::Timer;
|
||||||
|
|
||||||
use adapters::*;
|
use adapters::*;
|
||||||
use api;
|
use api;
|
||||||
use chain;
|
use chain;
|
||||||
use chain::ChainStore;
|
|
||||||
use core::{self, consensus};
|
|
||||||
use core::core::hash::Hashed;
|
|
||||||
use core::pow::cuckoo;
|
|
||||||
use core::pow::MiningWorker;
|
|
||||||
use miner;
|
use miner;
|
||||||
use p2p;
|
use p2p;
|
||||||
use pool;
|
use pool;
|
||||||
use seed;
|
use seed;
|
||||||
use store;
|
|
||||||
use sync;
|
use sync;
|
||||||
use types::*;
|
use types::*;
|
||||||
|
|
||||||
use plugin::PluginMiner;
|
|
||||||
use core::global;
|
use core::global;
|
||||||
|
|
||||||
/// Grin server holding internal structures.
|
/// Grin server holding internal structures.
|
||||||
pub struct Server {
|
pub struct Server {
|
||||||
|
/// server config
|
||||||
pub config: ServerConfig,
|
pub config: ServerConfig,
|
||||||
|
/// event handle
|
||||||
evt_handle: reactor::Handle,
|
evt_handle: reactor::Handle,
|
||||||
/// handle to our network server
|
/// handle to our network server
|
||||||
p2p: Arc<p2p::Server>,
|
p2p: Arc<p2p::Server>,
|
||||||
|
@ -137,6 +132,7 @@ impl Server {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Number of peers
|
||||||
pub fn peer_count(&self) -> u32 {
|
pub fn peer_count(&self) -> u32 {
|
||||||
self.p2p.peer_count()
|
self.p2p.peer_count()
|
||||||
}
|
}
|
||||||
|
@ -152,10 +148,11 @@ impl Server {
|
||||||
miner.set_debug_output_id(format!("Port {}",self.config.p2p_config.unwrap().port));
|
miner.set_debug_output_id(format!("Port {}",self.config.p2p_config.unwrap().port));
|
||||||
let server_config = self.config.clone();
|
let server_config = self.config.clone();
|
||||||
thread::spawn(move || {
|
thread::spawn(move || {
|
||||||
miner.run_loop(config.clone(), server_config, cuckoo_size as u32, proof_size);
|
miner.run_loop(config.clone(), server_config, cuckoo_size as u32, proof_size);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The chain head
|
||||||
pub fn head(&self) -> chain::Tip {
|
pub fn head(&self) -> chain::Tip {
|
||||||
self.chain.head().unwrap()
|
self.chain.head().unwrap()
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
/// How many block bodies to download in parallel
|
/// How many block bodies to download in parallel
|
||||||
const MAX_BODY_DOWNLOADS: usize = 8;
|
const MAX_BODY_DOWNLOADS: usize = 8;
|
||||||
|
|
||||||
use std::ops::Deref;
|
|
||||||
use std::sync::{Arc, Mutex};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time::{Instant, Duration};
|
use std::time::{Instant, Duration};
|
||||||
|
@ -152,7 +151,11 @@ impl Syncer {
|
||||||
while blocks_to_download.len() > 0 && blocks_downloading.len() < MAX_BODY_DOWNLOADS {
|
while blocks_to_download.len() > 0 && blocks_downloading.len() < MAX_BODY_DOWNLOADS {
|
||||||
let h = blocks_to_download.pop().unwrap();
|
let h = blocks_to_download.pop().unwrap();
|
||||||
let peer = self.p2p.random_peer().unwrap();
|
let peer = self.p2p.random_peer().unwrap();
|
||||||
peer.send_block_request(h);
|
let send_result = peer.send_block_request(h);
|
||||||
|
match send_result {
|
||||||
|
Ok(_) => {}
|
||||||
|
Err(_) => {}
|
||||||
|
}
|
||||||
blocks_downloading.push((h, Instant::now()));
|
blocks_downloading.push((h, Instant::now()));
|
||||||
}
|
}
|
||||||
debug!("Requesting more full block hashes to download, total: {}.",
|
debug!("Requesting more full block hashes to download, total: {}.",
|
||||||
|
@ -199,7 +202,7 @@ impl Syncer {
|
||||||
}
|
}
|
||||||
// ask for more headers if we got as many as required
|
// ask for more headers if we got as many as required
|
||||||
if hs_len == (p2p::MAX_BLOCK_HEADERS as usize) {
|
if hs_len == (p2p::MAX_BLOCK_HEADERS as usize) {
|
||||||
self.request_headers();
|
self.request_headers().unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ pub struct ServerConfig {
|
||||||
|
|
||||||
/// Method used to get the list of seed nodes for initial bootstrap.
|
/// Method used to get the list of seed nodes for initial bootstrap.
|
||||||
pub seeding_type: Seeding,
|
pub seeding_type: Seeding,
|
||||||
|
|
||||||
/// The list of seed nodes, if using Seeding as a seed type
|
/// The list of seed nodes, if using Seeding as a seed type
|
||||||
pub seeds: Option<Vec<String>>,
|
pub seeds: Option<Vec<String>>,
|
||||||
|
|
||||||
|
@ -173,6 +173,8 @@ impl Default for MinerConfig {
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct ServerStats {
|
pub struct ServerStats {
|
||||||
|
/// Number of peers
|
||||||
pub peer_count:u32,
|
pub peer_count:u32,
|
||||||
|
/// Chain head
|
||||||
pub head: chain::Tip,
|
pub head: chain::Tip,
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,23 +30,15 @@ extern crate futures_cpupool;
|
||||||
use std::thread;
|
use std::thread;
|
||||||
use std::time;
|
use std::time;
|
||||||
use std::default::Default;
|
use std::default::Default;
|
||||||
use std::mem;
|
|
||||||
use std::fs;
|
use std::fs;
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
use std::sync::{Arc, Mutex};
|
||||||
use std::ops::Deref;
|
|
||||||
|
|
||||||
use futures::{Future};
|
|
||||||
use futures::future::join_all;
|
|
||||||
use futures::task::park;
|
|
||||||
use tokio_core::reactor;
|
use tokio_core::reactor;
|
||||||
use tokio_core::reactor::Remote;
|
|
||||||
use tokio_core::reactor::Handle;
|
|
||||||
use tokio_timer::Timer;
|
use tokio_timer::Timer;
|
||||||
|
|
||||||
use secp::Secp256k1;
|
use secp::Secp256k1;
|
||||||
|
|
||||||
use wallet::WalletConfig;
|
use wallet::WalletConfig;
|
||||||
use core::consensus;
|
|
||||||
|
|
||||||
|
|
||||||
/// Just removes all results from previous runs
|
/// Just removes all results from previous runs
|
||||||
|
@ -61,18 +53,19 @@ pub fn clean_all_output(test_name_dir:&str){
|
||||||
|
|
||||||
/// Errors that can be returned by LocalServerContainer
|
/// Errors that can be returned by LocalServerContainer
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
#[allow(dead_code)]
|
||||||
pub enum Error {
|
pub enum Error {
|
||||||
Internal(String),
|
Internal(String),
|
||||||
Argument(String),
|
Argument(String),
|
||||||
NotFound,
|
NotFound,
|
||||||
}
|
}
|
||||||
|
|
||||||
/// All-in-one server configuration struct, for convenience
|
/// All-in-one server configuration struct, for convenience
|
||||||
///
|
///
|
||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct LocalServerContainerConfig {
|
pub struct LocalServerContainerConfig {
|
||||||
|
|
||||||
//user friendly name for the server, also denotes what dir
|
//user friendly name for the server, also denotes what dir
|
||||||
//the data files will appear in
|
//the data files will appear in
|
||||||
pub name: String,
|
pub name: String,
|
||||||
|
@ -81,8 +74,8 @@ pub struct LocalServerContainerConfig {
|
||||||
pub base_addr: String,
|
pub base_addr: String,
|
||||||
|
|
||||||
//Port the server (p2p) is running on
|
//Port the server (p2p) is running on
|
||||||
pub p2p_server_port: u16,
|
pub p2p_server_port: u16,
|
||||||
|
|
||||||
//Port the API server is running on
|
//Port the API server is running on
|
||||||
pub api_server_port: u16,
|
pub api_server_port: u16,
|
||||||
|
|
||||||
|
@ -113,7 +106,7 @@ pub struct LocalServerContainerConfig {
|
||||||
pub coinbase_wallet_address: String,
|
pub coinbase_wallet_address: String,
|
||||||
|
|
||||||
//When running a wallet, the address to check inputs and send
|
//When running a wallet, the address to check inputs and send
|
||||||
//finalised transactions to,
|
//finalised transactions to,
|
||||||
pub wallet_validating_node_url:String,
|
pub wallet_validating_node_url:String,
|
||||||
|
|
||||||
|
|
||||||
|
@ -168,7 +161,7 @@ pub struct LocalServerContainer {
|
||||||
|
|
||||||
//the list of peers to connect to
|
//the list of peers to connect to
|
||||||
pub peer_list: Vec<String>,
|
pub peer_list: Vec<String>,
|
||||||
|
|
||||||
//base directory for the server instance
|
//base directory for the server instance
|
||||||
working_dir: String,
|
working_dir: String,
|
||||||
|
|
||||||
|
@ -193,8 +186,7 @@ impl LocalServerContainer {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn run_server(&mut self,
|
pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats
|
||||||
duration_in_seconds: u64) -> grin::ServerStats
|
|
||||||
{
|
{
|
||||||
let mut event_loop = reactor::Core::new().unwrap();
|
let mut event_loop = reactor::Core::new().unwrap();
|
||||||
|
|
||||||
|
@ -208,7 +200,7 @@ impl LocalServerContainer {
|
||||||
seeds=vec![self.config.seed_addr.to_string()];
|
seeds=vec![self.config.seed_addr.to_string()];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
let s = grin::Server::future(
|
let s = grin::Server::future(
|
||||||
grin::ServerConfig{
|
grin::ServerConfig{
|
||||||
api_http_addr: api_addr,
|
api_http_addr: api_addr,
|
||||||
|
@ -227,7 +219,7 @@ impl LocalServerContainer {
|
||||||
thread::sleep(time::Duration::from_millis(1000));
|
thread::sleep(time::Duration::from_millis(1000));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut miner_config = grin::MinerConfig {
|
let miner_config = grin::MinerConfig {
|
||||||
enable_mining: self.config.start_miner,
|
enable_mining: self.config.start_miner,
|
||||||
burn_reward: self.config.burn_mining_rewards,
|
burn_reward: self.config.burn_mining_rewards,
|
||||||
use_cuckoo_miner: false,
|
use_cuckoo_miner: false,
|
||||||
|
@ -238,7 +230,7 @@ impl LocalServerContainer {
|
||||||
slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()),
|
slow_down_in_millis: Some(self.config.miner_slowdown_in_millis.clone()),
|
||||||
..Default::default()
|
..Default::default()
|
||||||
};
|
};
|
||||||
|
|
||||||
if self.config.start_miner == true {
|
if self.config.start_miner == true {
|
||||||
println!("starting Miner on port {}", self.config.p2p_server_port);
|
println!("starting Miner on port {}", self.config.p2p_server_port);
|
||||||
s.start_miner(miner_config);
|
s.start_miner(miner_config);
|
||||||
|
@ -251,7 +243,7 @@ impl LocalServerContainer {
|
||||||
|
|
||||||
let timeout = Timer::default().sleep(time::Duration::from_secs(duration_in_seconds));
|
let timeout = Timer::default().sleep(time::Duration::from_secs(duration_in_seconds));
|
||||||
|
|
||||||
event_loop.run(timeout);
|
event_loop.run(timeout).unwrap();
|
||||||
|
|
||||||
if self.wallet_is_running{
|
if self.wallet_is_running{
|
||||||
self.stop_wallet();
|
self.stop_wallet();
|
||||||
|
@ -260,15 +252,15 @@ impl LocalServerContainer {
|
||||||
s.get_server_stats().unwrap()
|
s.get_server_stats().unwrap()
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Starts a wallet daemon to receive and returns the
|
/// Starts a wallet daemon to receive and returns the
|
||||||
/// listening server url
|
/// listening server url
|
||||||
|
|
||||||
pub fn run_wallet(&mut self, duration_in_seconds: u64) {
|
pub fn run_wallet(&mut self, _duration_in_seconds: u64) {
|
||||||
|
|
||||||
//URL on which to start the wallet listener (i.e. api server)
|
//URL on which to start the wallet listener (i.e. api server)
|
||||||
let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
|
let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
|
||||||
|
|
||||||
//Just use the name of the server for a seed for now
|
//Just use the name of the server for a seed for now
|
||||||
let seed = format!("{}", self.config.name);
|
let seed = format!("{}", self.config.name);
|
||||||
|
|
||||||
|
@ -277,18 +269,18 @@ impl LocalServerContainer {
|
||||||
let s = Secp256k1::new();
|
let s = Secp256k1::new();
|
||||||
let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes())
|
let key = wallet::ExtendedKey::from_seed(&s, seed.as_bytes())
|
||||||
.expect("Error deriving extended key from seed.");
|
.expect("Error deriving extended key from seed.");
|
||||||
|
|
||||||
println!("Starting the Grin wallet receiving daemon on {} ", self.config.wallet_port );
|
println!("Starting the Grin wallet receiving daemon on {} ", self.config.wallet_port );
|
||||||
|
|
||||||
let mut wallet_config = WalletConfig::default();
|
let mut wallet_config = WalletConfig::default();
|
||||||
|
|
||||||
wallet_config.api_http_addr = format!("http://{}", url);
|
wallet_config.api_http_addr = format!("http://{}", url);
|
||||||
wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone();
|
wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone();
|
||||||
wallet_config.data_file_dir=self.working_dir.clone();
|
wallet_config.data_file_dir=self.working_dir.clone();
|
||||||
|
|
||||||
let mut api_server = api::ApiServer::new("/v1".to_string());
|
let mut api_server = api::ApiServer::new("/v1".to_string());
|
||||||
|
|
||||||
api_server.register_endpoint("/receive".to_string(), wallet::WalletReceiver {
|
api_server.register_endpoint("/receive".to_string(), wallet::WalletReceiver {
|
||||||
key: key,
|
key: key,
|
||||||
config: wallet_config,
|
config: wallet_config,
|
||||||
});
|
});
|
||||||
|
@ -301,9 +293,9 @@ impl LocalServerContainer {
|
||||||
self.wallet_is_running = true;
|
self.wallet_is_running = true;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Stops the running wallet server
|
/// Stops the running wallet server
|
||||||
|
|
||||||
pub fn stop_wallet(&mut self){
|
pub fn stop_wallet(&mut self){
|
||||||
let mut api_server = self.api_server.as_mut().unwrap();
|
let mut api_server = self.api_server.as_mut().unwrap();
|
||||||
api_server.stop();
|
api_server.stop();
|
||||||
|
@ -314,7 +306,7 @@ impl LocalServerContainer {
|
||||||
pub fn add_peer(&mut self, addr:String){
|
pub fn add_peer(&mut self, addr:String){
|
||||||
self.peer_list.push(addr);
|
self.peer_list.push(addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Configuration values for container pool
|
/// Configuration values for container pool
|
||||||
|
@ -428,16 +420,16 @@ impl LocalServerContainerPool {
|
||||||
self.is_seeding=true;
|
self.is_seeding=true;
|
||||||
}
|
}
|
||||||
|
|
||||||
let server_address = format!("{}:{}",
|
let _server_address = format!("{}:{}",
|
||||||
server_config.base_addr,
|
server_config.base_addr,
|
||||||
server_config.p2p_server_port);
|
server_config.p2p_server_port);
|
||||||
|
|
||||||
let mut server_container = LocalServerContainer::new(server_config.clone()).unwrap();
|
let server_container = LocalServerContainer::new(server_config.clone()).unwrap();
|
||||||
//self.server_containers.push(server_arc);
|
//self.server_containers.push(server_arc);
|
||||||
|
|
||||||
//Create a future that runs the server for however many seconds
|
//Create a future that runs the server for however many seconds
|
||||||
//collect them all and run them in the run_all_servers
|
//collect them all and run them in the run_all_servers
|
||||||
let run_time = self.config.run_length_in_seconds;
|
let _run_time = self.config.run_length_in_seconds;
|
||||||
|
|
||||||
self.server_containers.push(server_container);
|
self.server_containers.push(server_container);
|
||||||
|
|
||||||
|
@ -447,9 +439,9 @@ impl LocalServerContainerPool {
|
||||||
/// adds n servers, ready to run
|
/// adds n servers, ready to run
|
||||||
///
|
///
|
||||||
///
|
///
|
||||||
|
#[allow(dead_code)]
|
||||||
pub fn create_servers(&mut self, number: u16){
|
pub fn create_servers(&mut self, number: u16){
|
||||||
for n in 0..number {
|
for _ in 0..number {
|
||||||
//self.create_server();
|
//self.create_server();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -489,7 +481,7 @@ impl LocalServerContainerPool {
|
||||||
|
|
||||||
for handle in handles {
|
for handle in handles {
|
||||||
match handle.join() {
|
match handle.join() {
|
||||||
Ok(v) => {}
|
Ok(_) => {}
|
||||||
Err(e) => {
|
Err(e) => {
|
||||||
println!("Error starting server thread: {:?}", e);
|
println!("Error starting server thread: {:?}", e);
|
||||||
panic!(e);
|
panic!(e);
|
||||||
|
@ -522,5 +514,4 @@ impl LocalServerContainerPool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,9 +25,6 @@ extern crate futures;
|
||||||
extern crate tokio_core;
|
extern crate tokio_core;
|
||||||
extern crate tokio_timer;
|
extern crate tokio_timer;
|
||||||
|
|
||||||
use std::sync::{Arc, Mutex, RwLock};
|
|
||||||
use std::fs;
|
|
||||||
|
|
||||||
mod framework;
|
mod framework;
|
||||||
|
|
||||||
use std::thread;
|
use std::thread;
|
||||||
|
@ -35,7 +32,7 @@ use std::time;
|
||||||
use std::default::Default;
|
use std::default::Default;
|
||||||
|
|
||||||
use futures::{Future, Poll, Async};
|
use futures::{Future, Poll, Async};
|
||||||
use futures::task::park;
|
use futures::task::current;
|
||||||
use tokio_core::reactor;
|
use tokio_core::reactor;
|
||||||
use tokio_timer::Timer;
|
use tokio_timer::Timer;
|
||||||
|
|
||||||
|
@ -51,7 +48,7 @@ use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerCon
|
||||||
/// Block and mining into a wallet for a bit
|
/// Block and mining into a wallet for a bit
|
||||||
#[test]
|
#[test]
|
||||||
fn basic_genesis_mine() {
|
fn basic_genesis_mine() {
|
||||||
env_logger::init();
|
let _ = env_logger::init();
|
||||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||||
|
|
||||||
let test_name_dir = "genesis_mine";
|
let test_name_dir = "genesis_mine";
|
||||||
|
@ -82,7 +79,7 @@ fn basic_genesis_mine() {
|
||||||
/// messages they all end up connected.
|
/// messages they all end up connected.
|
||||||
#[test]
|
#[test]
|
||||||
fn simulate_seeding() {
|
fn simulate_seeding() {
|
||||||
env_logger::init();
|
let _ = env_logger::init();
|
||||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||||
|
|
||||||
let test_name_dir = "simulate_seeding";
|
let test_name_dir = "simulate_seeding";
|
||||||
|
@ -116,13 +113,13 @@ fn simulate_seeding() {
|
||||||
server_config.p2p_server_port
|
server_config.p2p_server_port
|
||||||
));
|
));
|
||||||
|
|
||||||
for i in 0..4 {
|
for _ in 0..4 {
|
||||||
pool.create_server(&mut server_config);
|
pool.create_server(&mut server_config);
|
||||||
}
|
}
|
||||||
|
|
||||||
pool.connect_all_peers();
|
pool.connect_all_peers();
|
||||||
|
|
||||||
let result_vec = pool.run_all_servers();
|
let _ = pool.run_all_servers();
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create 1 server, start it mining, then connect 4 other peers mining and
|
/// Create 1 server, start it mining, then connect 4 other peers mining and
|
||||||
|
@ -136,8 +133,9 @@ fn simulate_seeding() {
|
||||||
// being,
|
// being,
|
||||||
// As it's more for actively testing and hurts CI a lot
|
// As it's more for actively testing and hurts CI a lot
|
||||||
//#[test]
|
//#[test]
|
||||||
|
#[allow(dead_code)]
|
||||||
fn simulate_parallel_mining() {
|
fn simulate_parallel_mining() {
|
||||||
env_logger::init();
|
let _ = env_logger::init();
|
||||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||||
|
|
||||||
let test_name_dir = "simulate_parallel_mining";
|
let test_name_dir = "simulate_parallel_mining";
|
||||||
|
@ -179,7 +177,7 @@ fn simulate_parallel_mining() {
|
||||||
|
|
||||||
pool.connect_all_peers();
|
pool.connect_all_peers();
|
||||||
|
|
||||||
let result_vec = pool.run_all_servers();
|
let _ = pool.run_all_servers();
|
||||||
|
|
||||||
// Check mining difficulty here?, though I'd think it's more valuable
|
// Check mining difficulty here?, though I'd think it's more valuable
|
||||||
// to simply output it. Can at least see the evolution of the difficulty target
|
// to simply output it. Can at least see the evolution of the difficulty target
|
||||||
|
@ -335,7 +333,7 @@ impl<'a> Future for HeadChange<'a> {
|
||||||
Ok(Async::Ready(new_head))
|
Ok(Async::Ready(new_head))
|
||||||
} else {
|
} else {
|
||||||
// egregious polling, asking the task to schedule us every iteration
|
// egregious polling, asking the task to schedule us every iteration
|
||||||
park().unpark();
|
current().notify();
|
||||||
Ok(Async::NotReady)
|
Ok(Async::NotReady)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,12 +24,12 @@ use futures;
|
||||||
use futures::{Stream, Future};
|
use futures::{Stream, Future};
|
||||||
use futures::stream;
|
use futures::stream;
|
||||||
use futures::sync::mpsc::{Sender, UnboundedSender, UnboundedReceiver};
|
use futures::sync::mpsc::{Sender, UnboundedSender, UnboundedReceiver};
|
||||||
use tokio_core::io::{WriteHalf, ReadHalf, write_all, read_exact};
|
|
||||||
use tokio_core::net::TcpStream;
|
use tokio_core::net::TcpStream;
|
||||||
|
use tokio_io::{AsyncRead, AsyncWrite};
|
||||||
|
use tokio_io::io::{read_exact, write_all};
|
||||||
use tokio_timer::{Timer, TimerError};
|
use tokio_timer::{Timer, TimerError};
|
||||||
use tokio_io::*;
|
|
||||||
|
|
||||||
use core::core::hash::{Hash, ZERO_HASH};
|
use core::core::hash::Hash;
|
||||||
use core::ser;
|
use core::ser;
|
||||||
use msg::*;
|
use msg::*;
|
||||||
use types::Error;
|
use types::Error;
|
||||||
|
@ -65,6 +65,7 @@ impl<F> Handler for F
|
||||||
/// A higher level connection wrapping the TcpStream. Maintains the amount of
|
/// A higher level connection wrapping the TcpStream. Maintains the amount of
|
||||||
/// data transmitted and deals with the low-level task of sending and
|
/// data transmitted and deals with the low-level task of sending and
|
||||||
/// receiving data, parsing message headers and timeouts.
|
/// receiving data, parsing message headers and timeouts.
|
||||||
|
#[allow(dead_code)]
|
||||||
pub struct Connection {
|
pub struct Connection {
|
||||||
// Channel to push bytes to the remote peer
|
// Channel to push bytes to the remote peer
|
||||||
outbound_chan: UnboundedSender<Vec<u8>>,
|
outbound_chan: UnboundedSender<Vec<u8>>,
|
||||||
|
@ -150,7 +151,7 @@ impl Connection {
|
||||||
})
|
})
|
||||||
// write the data and make sure the future returns the right types
|
// write the data and make sure the future returns the right types
|
||||||
.fold(writer, |writer, data| {
|
.fold(writer, |writer, data| {
|
||||||
write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, buf)| writer)
|
write_all(writer, data).map_err(|e| Error::Connection(e)).map(|(writer, _)| writer)
|
||||||
});
|
});
|
||||||
Box::new(send_data)
|
Box::new(send_data)
|
||||||
}
|
}
|
||||||
|
@ -287,7 +288,7 @@ impl TimeoutConnection {
|
||||||
underlying: conn,
|
underlying: conn,
|
||||||
expected_responses: expects,
|
expected_responses: expects,
|
||||||
};
|
};
|
||||||
(me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, e2)| e1)))
|
(me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sends a request and registers a timer on the provided message type and
|
/// Sends a request and registers a timer on the provided message type and
|
||||||
|
@ -298,7 +299,7 @@ impl TimeoutConnection {
|
||||||
body: &W,
|
body: &W,
|
||||||
expect_h: Option<(Hash)>)
|
expect_h: Option<(Hash)>)
|
||||||
-> Result<(), Error> {
|
-> Result<(), Error> {
|
||||||
let sent = try!(self.underlying.send_msg(t, body));
|
let _sent = try!(self.underlying.send_msg(t, body));
|
||||||
|
|
||||||
let mut expects = self.expected_responses.lock().unwrap();
|
let mut expects = self.expected_responses.lock().unwrap();
|
||||||
expects.push((rt, expect_h, Instant::now()));
|
expects.push((rt, expect_h, Instant::now()));
|
||||||
|
|
|
@ -31,7 +31,6 @@ extern crate grin_util as util;
|
||||||
#[macro_use]
|
#[macro_use]
|
||||||
extern crate log;
|
extern crate log;
|
||||||
extern crate futures;
|
extern crate futures;
|
||||||
#[macro_use]
|
|
||||||
extern crate tokio_core;
|
extern crate tokio_core;
|
||||||
extern crate tokio_io;
|
extern crate tokio_io;
|
||||||
extern crate bytes;
|
extern crate bytes;
|
||||||
|
|
|
@ -19,7 +19,7 @@ use num::FromPrimitive;
|
||||||
|
|
||||||
use futures::future::{Future, ok};
|
use futures::future::{Future, ok};
|
||||||
use tokio_core::net::TcpStream;
|
use tokio_core::net::TcpStream;
|
||||||
use tokio_core::io::{write_all, read_exact};
|
use tokio_io::io::{read_exact, write_all};
|
||||||
|
|
||||||
use core::consensus::MAX_MSG_LEN;
|
use core::consensus::MAX_MSG_LEN;
|
||||||
use core::core::BlockHeader;
|
use core::core::BlockHeader;
|
||||||
|
@ -42,6 +42,7 @@ const MAGIC: [u8; 2] = [0x1e, 0xc5];
|
||||||
pub const HEADER_LEN: u64 = 11;
|
pub const HEADER_LEN: u64 = 11;
|
||||||
|
|
||||||
/// Codes for each error that can be produced reading a message.
|
/// Codes for each error that can be produced reading a message.
|
||||||
|
#[allow(dead_code)]
|
||||||
pub enum ErrCodes {
|
pub enum ErrCodes {
|
||||||
UnsupportedVersion = 100,
|
UnsupportedVersion = 100,
|
||||||
}
|
}
|
||||||
|
@ -105,12 +106,12 @@ pub fn write_msg<T>(conn: TcpStream,
|
||||||
let write_msg = ok((conn)).and_then(move |conn| {
|
let write_msg = ok((conn)).and_then(move |conn| {
|
||||||
// prepare the body first so we know its serialized length
|
// prepare the body first so we know its serialized length
|
||||||
let mut body_buf = vec![];
|
let mut body_buf = vec![];
|
||||||
ser::serialize(&mut body_buf, &msg);
|
ser::serialize(&mut body_buf, &msg).unwrap();
|
||||||
|
|
||||||
// build and serialize the header using the body size
|
// build and serialize the header using the body size
|
||||||
let mut header_buf = vec![];
|
let mut header_buf = vec![];
|
||||||
let blen = body_buf.len() as u64;
|
let blen = body_buf.len() as u64;
|
||||||
ser::serialize(&mut header_buf, &MsgHeader::new(msg_type, blen));
|
ser::serialize(&mut header_buf, &MsgHeader::new(msg_type, blen)).unwrap();
|
||||||
|
|
||||||
// send the whole thing
|
// send the whole thing
|
||||||
write_all(conn, header_buf)
|
write_all(conn, header_buf)
|
||||||
|
@ -202,9 +203,9 @@ impl Writeable for Hand {
|
||||||
[write_u32, self.version],
|
[write_u32, self.version],
|
||||||
[write_u32, self.capabilities.bits()],
|
[write_u32, self.capabilities.bits()],
|
||||||
[write_u64, self.nonce]);
|
[write_u64, self.nonce]);
|
||||||
self.total_difficulty.write(writer);
|
self.total_difficulty.write(writer).unwrap();
|
||||||
self.sender_addr.write(writer);
|
self.sender_addr.write(writer).unwrap();
|
||||||
self.receiver_addr.write(writer);
|
self.receiver_addr.write(writer).unwrap();
|
||||||
writer.write_bytes(&self.user_agent)
|
writer.write_bytes(&self.user_agent)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -250,8 +251,8 @@ impl Writeable for Shake {
|
||||||
ser_multiwrite!(writer,
|
ser_multiwrite!(writer,
|
||||||
[write_u32, self.version],
|
[write_u32, self.version],
|
||||||
[write_u32, self.capabilities.bits()]);
|
[write_u32, self.capabilities.bits()]);
|
||||||
self.total_difficulty.write(writer);
|
self.total_difficulty.write(writer).unwrap();
|
||||||
writer.write_bytes(&self.user_agent);
|
writer.write_bytes(&self.user_agent).unwrap();
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -302,7 +303,7 @@ impl Writeable for PeerAddrs {
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||||
try!(writer.write_u32(self.peers.len() as u32));
|
try!(writer.write_u32(self.peers.len() as u32));
|
||||||
for p in &self.peers {
|
for p in &self.peers {
|
||||||
p.write(writer);
|
p.write(writer).unwrap();
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
@ -464,13 +465,13 @@ impl Readable for Headers {
|
||||||
pub struct Empty {}
|
pub struct Empty {}
|
||||||
|
|
||||||
impl Writeable for Empty {
|
impl Writeable for Empty {
|
||||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
fn write<W: Writer>(&self, _: &mut W) -> Result<(), ser::Error> {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Readable for Empty {
|
impl Readable for Empty {
|
||||||
fn read(reader: &mut Reader) -> Result<Empty, ser::Error> {
|
fn read(_: &mut Reader) -> Result<Empty, ser::Error> {
|
||||||
Ok(Empty {})
|
Ok(Empty {})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -91,7 +91,7 @@ impl Peer {
|
||||||
// handle disconnection, standard disconnections aren't considered an error
|
// handle disconnection, standard disconnections aren't considered an error
|
||||||
let mut state = state.write().unwrap();
|
let mut state = state.write().unwrap();
|
||||||
match res {
|
match res {
|
||||||
Ok(res) => {
|
Ok(_) => {
|
||||||
*state = State::Disconnected;
|
*state = State::Disconnected;
|
||||||
info!("Client {} disconnected.", addr);
|
info!("Client {} disconnected.", addr);
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
|
@ -14,9 +14,7 @@
|
||||||
|
|
||||||
use std::sync::{Mutex, Arc};
|
use std::sync::{Mutex, Arc};
|
||||||
|
|
||||||
use futures;
|
|
||||||
use futures::Future;
|
use futures::Future;
|
||||||
use futures::stream;
|
|
||||||
use futures::sync::mpsc::UnboundedSender;
|
use futures::sync::mpsc::UnboundedSender;
|
||||||
use tokio_core::net::TcpStream;
|
use tokio_core::net::TcpStream;
|
||||||
|
|
||||||
|
@ -28,6 +26,7 @@ use msg::*;
|
||||||
use types::*;
|
use types::*;
|
||||||
use util::OneTime;
|
use util::OneTime;
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
pub struct ProtocolV1 {
|
pub struct ProtocolV1 {
|
||||||
conn: OneTime<TimeoutConnection>,
|
conn: OneTime<TimeoutConnection>,
|
||||||
|
|
||||||
|
@ -128,7 +127,7 @@ fn handle_payload(adapter: &NetAdapter,
|
||||||
match header.msg_type {
|
match header.msg_type {
|
||||||
Type::Ping => {
|
Type::Ping => {
|
||||||
let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?;
|
let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?;
|
||||||
sender.send(data);
|
sender.send(data).unwrap();
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
Type::Pong => Ok(None),
|
Type::Pong => Ok(None),
|
||||||
|
@ -148,7 +147,7 @@ fn handle_payload(adapter: &NetAdapter,
|
||||||
try!(ser::serialize(&mut data,
|
try!(ser::serialize(&mut data,
|
||||||
&MsgHeader::new(Type::Block, body_data.len() as u64)));
|
&MsgHeader::new(Type::Block, body_data.len() as u64)));
|
||||||
data.append(&mut body_data);
|
data.append(&mut body_data);
|
||||||
sender.send(data);
|
sender.send(data).unwrap();
|
||||||
}
|
}
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
@ -170,7 +169,7 @@ fn handle_payload(adapter: &NetAdapter,
|
||||||
try!(ser::serialize(&mut data,
|
try!(ser::serialize(&mut data,
|
||||||
&MsgHeader::new(Type::Headers, body_data.len() as u64)));
|
&MsgHeader::new(Type::Headers, body_data.len() as u64)));
|
||||||
data.append(&mut body_data);
|
data.append(&mut body_data);
|
||||||
sender.send(data);
|
sender.send(data).unwrap();
|
||||||
|
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
@ -193,7 +192,7 @@ fn handle_payload(adapter: &NetAdapter,
|
||||||
try!(ser::serialize(&mut data,
|
try!(ser::serialize(&mut data,
|
||||||
&MsgHeader::new(Type::PeerAddrs, body_data.len() as u64)));
|
&MsgHeader::new(Type::PeerAddrs, body_data.len() as u64)));
|
||||||
data.append(&mut body_data);
|
data.append(&mut body_data);
|
||||||
sender.send(data);
|
sender.send(data).unwrap();
|
||||||
|
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,12 +14,11 @@
|
||||||
|
|
||||||
//! Provides wrappers for throttling readers and writers
|
//! Provides wrappers for throttling readers and writers
|
||||||
|
|
||||||
use std::time::{Instant, Duration};
|
use std::time::Instant;
|
||||||
use std::io;
|
use std::io;
|
||||||
|
|
||||||
use futures::*;
|
use futures::*;
|
||||||
use tokio_io::*;
|
use tokio_io::*;
|
||||||
use bytes::{Buf, BytesMut, BufMut};
|
|
||||||
|
|
||||||
/// A Rate Limited Reader
|
/// A Rate Limited Reader
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
|
@ -32,6 +31,7 @@ pub struct ThrottledReader<R: AsyncRead> {
|
||||||
last_check: Instant,
|
last_check: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
impl<R: AsyncRead> ThrottledReader<R> {
|
impl<R: AsyncRead> ThrottledReader<R> {
|
||||||
/// Adds throttling to a reader.
|
/// Adds throttling to a reader.
|
||||||
/// The resulting reader will read at most `max` amount of bytes per second
|
/// The resulting reader will read at most `max` amount of bytes per second
|
||||||
|
@ -105,6 +105,7 @@ pub struct ThrottledWriter<W: AsyncWrite> {
|
||||||
last_check: Instant,
|
last_check: Instant,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
impl<W: AsyncWrite> ThrottledWriter<W> {
|
impl<W: AsyncWrite> ThrottledWriter<W> {
|
||||||
/// Adds throttling to a writer.
|
/// Adds throttling to a writer.
|
||||||
/// The resulting writer will write at most `max` amount of bytes per second
|
/// The resulting writer will write at most `max` amount of bytes per second
|
||||||
|
@ -188,7 +189,7 @@ mod test {
|
||||||
for _ in 0..16 {
|
for _ in 0..16 {
|
||||||
let _ = t_buf.write_buf(&mut Cursor::new(vec![1; 8]));
|
let _ = t_buf.write_buf(&mut Cursor::new(vec![1; 8]));
|
||||||
}
|
}
|
||||||
|
|
||||||
let cursor = t_buf.into_inner();
|
let cursor = t_buf.into_inner();
|
||||||
assert_eq!(cursor.position(), 8);
|
assert_eq!(cursor.position(), 8);
|
||||||
}
|
}
|
||||||
|
@ -203,7 +204,7 @@ mod test {
|
||||||
for _ in 0..16 {
|
for _ in 0..16 {
|
||||||
let _ = t_buf.read_buf(&mut dst);
|
let _ = t_buf.read_buf(&mut dst);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(dst.position(), 8);
|
assert_eq!(dst.position(), 8);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -41,20 +41,20 @@ impl NetAdapter for DummyAdapter {
|
||||||
fn total_difficulty(&self) -> Difficulty {
|
fn total_difficulty(&self) -> Difficulty {
|
||||||
Difficulty::one()
|
Difficulty::one()
|
||||||
}
|
}
|
||||||
fn transaction_received(&self, tx: core::Transaction) {}
|
fn transaction_received(&self, _: core::Transaction) {}
|
||||||
fn block_received(&self, b: core::Block) {}
|
fn block_received(&self, _: core::Block) {}
|
||||||
fn headers_received(&self, bh: Vec<core::BlockHeader>) {}
|
fn headers_received(&self, _: Vec<core::BlockHeader>) {}
|
||||||
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
|
fn locate_headers(&self, _: Vec<Hash>) -> Vec<core::BlockHeader> {
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
fn get_block(&self, h: Hash) -> Option<core::Block> {
|
fn get_block(&self, _: Hash) -> Option<core::Block> {
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
|
fn find_peer_addrs(&self, _: Capabilities) -> Vec<SocketAddr> {
|
||||||
vec![]
|
vec![]
|
||||||
}
|
}
|
||||||
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {}
|
fn peer_addrs_received(&self, _: Vec<SocketAddr>) {}
|
||||||
fn peer_connected(&self, pi: &PeerInfo) {}
|
fn peer_connected(&self, _: &PeerInfo) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// P2P server implementation, handling bootstrapping to find and connect to
|
/// P2P server implementation, handling bootstrapping to find and connect to
|
||||||
|
@ -97,7 +97,7 @@ impl Server {
|
||||||
|
|
||||||
// main peer acceptance future handling handshake
|
// main peer acceptance future handling handshake
|
||||||
let hp = h.clone();
|
let hp = h.clone();
|
||||||
let peers = socket.incoming().map_err(From::from).map(move |(conn, addr)| {
|
let peers = socket.incoming().map_err(From::from).map(move |(conn, _)| {
|
||||||
let adapter = adapter.clone();
|
let adapter = adapter.clone();
|
||||||
let total_diff = adapter.total_difficulty();
|
let total_diff = adapter.total_difficulty();
|
||||||
let peers = peers.clone();
|
let peers = peers.clone();
|
||||||
|
@ -275,7 +275,7 @@ impl Server {
|
||||||
for p in peers.deref() {
|
for p in peers.deref() {
|
||||||
p.stop();
|
p.stop();
|
||||||
}
|
}
|
||||||
self.stop.into_inner().unwrap().complete(());
|
self.stop.into_inner().unwrap().send(()).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -33,6 +33,7 @@ pub const MAX_LOCATORS: u32 = 10;
|
||||||
pub const MAX_BLOCK_HEADERS: u32 = 512;
|
pub const MAX_BLOCK_HEADERS: u32 = 512;
|
||||||
|
|
||||||
/// Maximum number of block bodies a peer should ever ask for and send
|
/// Maximum number of block bodies a peer should ever ask for and send
|
||||||
|
#[allow(dead_code)]
|
||||||
pub const MAX_BLOCK_BODIES: u32 = 16;
|
pub const MAX_BLOCK_BODIES: u32 = 16;
|
||||||
|
|
||||||
/// Maximum number of peer addresses a peer should ever send
|
/// Maximum number of peer addresses a peer should ever send
|
||||||
|
@ -57,7 +58,7 @@ impl From<io::Error> for Error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl From<TimerError> for Error {
|
impl From<TimerError> for Error {
|
||||||
fn from(e: TimerError) -> Error {
|
fn from(_: TimerError) -> Error {
|
||||||
Error::Timeout
|
Error::Timeout
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ use futures::future::Future;
|
||||||
use tokio_core::net::TcpStream;
|
use tokio_core::net::TcpStream;
|
||||||
use tokio_core::reactor::{self, Core};
|
use tokio_core::reactor::{self, Core};
|
||||||
|
|
||||||
use core::ser;
|
|
||||||
use core::core::target::Difficulty;
|
use core::core::target::Difficulty;
|
||||||
use p2p::Peer;
|
use p2p::Peer;
|
||||||
|
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
// This file is (hopefully) temporary.
|
// This file is (hopefully) temporary.
|
||||||
//
|
//
|
||||||
// It contains a trait based on (but not exactly equal to) the trait defined
|
// It contains a trait based on (but not exactly equal to) the trait defined
|
||||||
// for the blockchain UTXO set, discussed at
|
// for the blockchain UTXO set, discussed at
|
||||||
// https://github.com/ignopeverell/grin/issues/29, and a dummy implementation
|
// https://github.com/ignopeverell/grin/issues/29, and a dummy implementation
|
||||||
// of said trait.
|
// of said trait.
|
||||||
// Notably, UtxoDiff has been left off, and the question of how to handle
|
// Notably, UtxoDiff has been left off, and the question of how to handle
|
||||||
|
@ -20,11 +20,12 @@ use std::sync::RwLock;
|
||||||
|
|
||||||
use types::BlockChain;
|
use types::BlockChain;
|
||||||
|
|
||||||
/// A DummyUtxoSet for mocking up the chain
|
/// A DummyUtxoSet for mocking up the chain
|
||||||
pub struct DummyUtxoSet {
|
pub struct DummyUtxoSet {
|
||||||
outputs : HashMap<Commitment, transaction::Output>
|
outputs : HashMap<Commitment, transaction::Output>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
impl DummyUtxoSet {
|
impl DummyUtxoSet {
|
||||||
pub fn empty() -> DummyUtxoSet{
|
pub fn empty() -> DummyUtxoSet{
|
||||||
DummyUtxoSet{outputs: HashMap::new()}
|
DummyUtxoSet{outputs: HashMap::new()}
|
||||||
|
@ -50,7 +51,7 @@ impl DummyUtxoSet {
|
||||||
self.outputs.insert(output.commitment(), output.clone());
|
self.outputs.insert(output.commitment(), output.clone());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub fn rewind(&self, b: &block::Block) -> DummyUtxoSet {
|
pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet {
|
||||||
DummyUtxoSet{outputs: HashMap::new()}
|
DummyUtxoSet{outputs: HashMap::new()}
|
||||||
}
|
}
|
||||||
pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> {
|
pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> {
|
||||||
|
@ -75,10 +76,12 @@ impl DummyUtxoSet {
|
||||||
|
|
||||||
/// A DummyChain is the mocked chain for playing with what methods we would
|
/// A DummyChain is the mocked chain for playing with what methods we would
|
||||||
/// need
|
/// need
|
||||||
|
#[allow(dead_code)]
|
||||||
pub struct DummyChainImpl {
|
pub struct DummyChainImpl {
|
||||||
utxo: RwLock<DummyUtxoSet>
|
utxo: RwLock<DummyUtxoSet>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(dead_code)]
|
||||||
impl DummyChainImpl {
|
impl DummyChainImpl {
|
||||||
pub fn new() -> DummyChainImpl {
|
pub fn new() -> DummyChainImpl {
|
||||||
DummyChainImpl{
|
DummyChainImpl{
|
||||||
|
|
|
@ -15,15 +15,9 @@
|
||||||
//! Base types for the transaction pool's Directed Acyclic Graphs
|
//! Base types for the transaction pool's Directed Acyclic Graphs
|
||||||
|
|
||||||
use std::vec::Vec;
|
use std::vec::Vec;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::RwLock;
|
|
||||||
use std::sync::Weak;
|
|
||||||
use std::cell::RefCell;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use secp::pedersen::Commitment;
|
use secp::pedersen::Commitment;
|
||||||
use secp::{Secp256k1, ContextFlag};
|
|
||||||
use secp::key;
|
|
||||||
|
|
||||||
use time;
|
use time;
|
||||||
use rand;
|
use rand;
|
||||||
|
@ -36,24 +30,28 @@ use core::core;
|
||||||
/// These are the vertices of both of the graph structures
|
/// These are the vertices of both of the graph structures
|
||||||
pub struct PoolEntry {
|
pub struct PoolEntry {
|
||||||
// Core data
|
// Core data
|
||||||
// Unique identifier of this pool entry and the corresponding transaction
|
/// Unique identifier of this pool entry and the corresponding transaction
|
||||||
pub transaction_hash: core::hash::Hash,
|
pub transaction_hash: core::hash::Hash,
|
||||||
|
|
||||||
// Metadata
|
// Metadata
|
||||||
size_estimate: u64,
|
/// Size estimate
|
||||||
|
pub size_estimate: u64,
|
||||||
|
/// Receive timestamp
|
||||||
pub receive_ts: time::Tm,
|
pub receive_ts: time::Tm,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl PoolEntry {
|
impl PoolEntry {
|
||||||
|
/// Create new transaction pool entry
|
||||||
pub fn new(tx: &core::transaction::Transaction) -> PoolEntry {
|
pub fn new(tx: &core::transaction::Transaction) -> PoolEntry {
|
||||||
PoolEntry{
|
PoolEntry{
|
||||||
transaction_hash: transaction_identifier(tx),
|
transaction_hash: transaction_identifier(tx),
|
||||||
size_estimate : estimate_transaction_size(tx),
|
size_estimate : estimate_transaction_size(tx),
|
||||||
receive_ts: time::now()}
|
receive_ts: time::now()}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn estimate_transaction_size(tx: &core::transaction::Transaction) -> u64 {
|
/// TODO guessing this needs implementing
|
||||||
|
fn estimate_transaction_size(_tx: &core::transaction::Transaction) -> u64 {
|
||||||
0
|
0
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,24 +70,32 @@ pub struct Edge {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Edge{
|
impl Edge{
|
||||||
|
/// Create new edge
|
||||||
pub fn new(source: Option<core::hash::Hash>, destination: Option<core::hash::Hash>, output: Commitment) -> Edge {
|
pub fn new(source: Option<core::hash::Hash>, destination: Option<core::hash::Hash>, output: Commitment) -> Edge {
|
||||||
Edge{source: source, destination: destination, output: output}
|
Edge{source: source, destination: destination, output: output}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create new edge with a source
|
||||||
pub fn with_source(&self, src: Option<core::hash::Hash>) -> Edge {
|
pub fn with_source(&self, src: Option<core::hash::Hash>) -> Edge {
|
||||||
Edge{source: src, destination: self.destination, output: self.output}
|
Edge{source: src, destination: self.destination, output: self.output}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Create new edge with destination
|
||||||
pub fn with_destination(&self, dst: Option<core::hash::Hash>) -> Edge {
|
pub fn with_destination(&self, dst: Option<core::hash::Hash>) -> Edge {
|
||||||
Edge{source: self.source, destination: dst, output: self.output}
|
Edge{source: self.source, destination: dst, output: self.output}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The output commitment of the edge
|
||||||
pub fn output_commitment(&self) -> Commitment {
|
pub fn output_commitment(&self) -> Commitment {
|
||||||
self.output
|
self.output
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The destination hash of the edge
|
||||||
pub fn destination_hash(&self) -> Option<core::hash::Hash> {
|
pub fn destination_hash(&self) -> Option<core::hash::Hash> {
|
||||||
self.destination
|
self.destination
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The source hash of the edge
|
||||||
pub fn source_hash(&self) -> Option<core::hash::Hash> {
|
pub fn source_hash(&self) -> Option<core::hash::Hash> {
|
||||||
self.source
|
self.source
|
||||||
}
|
}
|
||||||
|
@ -108,13 +114,14 @@ pub struct DirectedGraph {
|
||||||
edges: HashMap<Commitment, Edge>,
|
edges: HashMap<Commitment, Edge>,
|
||||||
vertices: Vec<PoolEntry>,
|
vertices: Vec<PoolEntry>,
|
||||||
|
|
||||||
// A small optimization: keeping roots (vertices with in-degree 0) in a
|
// A small optimization: keeping roots (vertices with in-degree 0) in a
|
||||||
// separate list makes topological sort a bit faster. (This is true for
|
// separate list makes topological sort a bit faster. (This is true for
|
||||||
// Kahn's, not sure about other implementations)
|
// Kahn's, not sure about other implementations)
|
||||||
roots: Vec<PoolEntry>,
|
roots: Vec<PoolEntry>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DirectedGraph {
|
impl DirectedGraph {
|
||||||
|
/// Create an empty directed graph
|
||||||
pub fn empty() -> DirectedGraph {
|
pub fn empty() -> DirectedGraph {
|
||||||
DirectedGraph{
|
DirectedGraph{
|
||||||
edges: HashMap::new(),
|
edges: HashMap::new(),
|
||||||
|
@ -123,14 +130,17 @@ impl DirectedGraph {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get an edge by its commitment
|
||||||
pub fn get_edge_by_commitment(&self, output_commitment: &Commitment) -> Option<&Edge> {
|
pub fn get_edge_by_commitment(&self, output_commitment: &Commitment) -> Option<&Edge> {
|
||||||
self.edges.get(output_commitment)
|
self.edges.get(output_commitment)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Remove an edge by its commitment
|
||||||
pub fn remove_edge_by_commitment(&mut self, output_commitment: &Commitment) -> Option<Edge> {
|
pub fn remove_edge_by_commitment(&mut self, output_commitment: &Commitment) -> Option<Edge> {
|
||||||
self.edges.remove(output_commitment)
|
self.edges.remove(output_commitment)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Remove a vertex by its hash
|
||||||
pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> {
|
pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> {
|
||||||
match self.roots.iter().position(|x| x.transaction_hash == tx_hash) {
|
match self.roots.iter().position(|x| x.transaction_hash == tx_hash) {
|
||||||
Some(i) => Some(self.roots.swap_remove(i)),
|
Some(i) => Some(self.roots.swap_remove(i)),
|
||||||
|
@ -163,8 +173,8 @@ impl DirectedGraph {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// add_vertex_only adds a vertex, meant to be complemented by add_edge_only
|
/// add_vertex_only adds a vertex, meant to be complemented by add_edge_only
|
||||||
// in cases where delivering a vector of edges is not feasible or efficient
|
/// in cases where delivering a vector of edges is not feasible or efficient
|
||||||
pub fn add_vertex_only(&mut self, vertex: PoolEntry, is_root: bool) {
|
pub fn add_vertex_only(&mut self, vertex: PoolEntry, is_root: bool) {
|
||||||
if is_root {
|
if is_root {
|
||||||
self.roots.push(vertex);
|
self.roots.push(vertex);
|
||||||
|
@ -173,6 +183,7 @@ impl DirectedGraph {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// add_edge_only adds an edge
|
||||||
pub fn add_edge_only(&mut self, edge: Edge) {
|
pub fn add_edge_only(&mut self, edge: Edge) {
|
||||||
self.edges.insert(edge.output_commitment(), edge);
|
self.edges.insert(edge.output_commitment(), edge);
|
||||||
}
|
}
|
||||||
|
@ -181,7 +192,7 @@ impl DirectedGraph {
|
||||||
pub fn len_vertices(&self) -> usize {
|
pub fn len_vertices(&self) -> usize {
|
||||||
self.vertices.len() + self.roots.len()
|
self.vertices.len() + self.roots.len()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Number of root vertices only
|
/// Number of root vertices only
|
||||||
pub fn len_roots(&self) -> usize {
|
pub fn len_roots(&self) -> usize {
|
||||||
self.roots.len()
|
self.roots.len()
|
||||||
|
@ -209,6 +220,8 @@ pub fn transaction_identifier(tx: &core::transaction::Transaction) -> core::hash
|
||||||
#[cfg(test)]
|
#[cfg(test)]
|
||||||
mod tests {
|
mod tests {
|
||||||
use super::*;
|
use super::*;
|
||||||
|
use secp::{Secp256k1, ContextFlag};
|
||||||
|
use secp::key;
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn test_add_entry() {
|
fn test_add_entry() {
|
||||||
|
@ -243,7 +256,7 @@ mod tests {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For testing/debugging: a random tx hash
|
/// For testing/debugging: a random tx hash
|
||||||
fn random_hash() -> core::hash::Hash {
|
pub fn random_hash() -> core::hash::Hash {
|
||||||
let hash_bytes: [u8;32]= rand::random();
|
let hash_bytes: [u8;32]= rand::random();
|
||||||
core::hash::Hash(hash_bytes)
|
core::hash::Hash(hash_bytes)
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,7 +28,6 @@ mod pool;
|
||||||
|
|
||||||
extern crate time;
|
extern crate time;
|
||||||
extern crate rand;
|
extern crate rand;
|
||||||
#[macro_use]
|
|
||||||
extern crate log;
|
extern crate log;
|
||||||
|
|
||||||
extern crate grin_core as core;
|
extern crate grin_core as core;
|
||||||
|
|
|
@ -20,22 +20,22 @@ pub use graph;
|
||||||
use core::core::transaction;
|
use core::core::transaction;
|
||||||
use core::core::block;
|
use core::core::block;
|
||||||
use core::core::hash;
|
use core::core::hash;
|
||||||
// Temporary blockchain dummy impls
|
|
||||||
use blockchain::{DummyChain, DummyChainImpl, DummyUtxoSet};
|
|
||||||
|
|
||||||
use secp;
|
use secp;
|
||||||
use secp::pedersen::Commitment;
|
use secp::pedersen::Commitment;
|
||||||
|
|
||||||
use std::sync::{Arc, RwLock, Weak};
|
use std::sync::Arc;
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
/// The pool itself.
|
/// The pool itself.
|
||||||
/// The transactions HashMap holds ownership of all transactions in the pool,
|
/// The transactions HashMap holds ownership of all transactions in the pool,
|
||||||
/// keyed by their transaction hash.
|
/// keyed by their transaction hash.
|
||||||
pub struct TransactionPool<T> {
|
pub struct TransactionPool<T> {
|
||||||
|
/// All transactions in the pool
|
||||||
pub transactions: HashMap<hash::Hash, Box<transaction::Transaction>>,
|
pub transactions: HashMap<hash::Hash, Box<transaction::Transaction>>,
|
||||||
|
/// The pool itself
|
||||||
pub pool : Pool,
|
pub pool : Pool,
|
||||||
|
/// Orphans in the pool
|
||||||
pub orphans: Orphans,
|
pub orphans: Orphans,
|
||||||
|
|
||||||
// blockchain is a DummyChain, for now, which mimics what the future
|
// blockchain is a DummyChain, for now, which mimics what the future
|
||||||
|
@ -44,6 +44,7 @@ pub struct TransactionPool<T> {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T> TransactionPool<T> where T: BlockChain {
|
impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
|
/// Create a new transaction pool
|
||||||
pub fn new(chain: Arc<T>) -> TransactionPool<T> {
|
pub fn new(chain: Arc<T>) -> TransactionPool<T> {
|
||||||
TransactionPool{
|
TransactionPool{
|
||||||
transactions: HashMap::new(),
|
transactions: HashMap::new(),
|
||||||
|
@ -53,15 +54,15 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Searches for an output, designated by its commitment, from the current
|
/// Searches for an output, designated by its commitment, from the current
|
||||||
/// best UTXO view, presented by taking the best blockchain UTXO set (as
|
/// best UTXO view, presented by taking the best blockchain UTXO set (as
|
||||||
/// determined by the blockchain component) and rectifying pool spent and
|
/// determined by the blockchain component) and rectifying pool spent and
|
||||||
/// unspents.
|
/// unspents.
|
||||||
/// Detects double spends and unknown references from the pool and
|
/// Detects double spends and unknown references from the pool and
|
||||||
/// blockchain only; any conflicts with entries in the orphans set must
|
/// blockchain only; any conflicts with entries in the orphans set must
|
||||||
/// be accounted for separately, if relevant.
|
/// be accounted for separately, if relevant.
|
||||||
pub fn search_for_best_output(&self, output_commitment: &Commitment) -> Parent {
|
pub fn search_for_best_output(&self, output_commitment: &Commitment) -> Parent {
|
||||||
// The current best unspent set is:
|
// The current best unspent set is:
|
||||||
// Pool unspent + (blockchain unspent - pool->blockchain spent)
|
// Pool unspent + (blockchain unspent - pool->blockchain spent)
|
||||||
// Pool unspents are unconditional so we check those first
|
// Pool unspents are unconditional so we check those first
|
||||||
self.pool.get_available_output(output_commitment).
|
self.pool.get_available_output(output_commitment).
|
||||||
|
@ -76,7 +77,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
// output designated by output_commitment.
|
// output designated by output_commitment.
|
||||||
fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option<Parent> {
|
fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option<Parent> {
|
||||||
self.blockchain.get_unspent(output_commitment).
|
self.blockchain.get_unspent(output_commitment).
|
||||||
map(|o| match self.pool.get_blockchain_spent(output_commitment) {
|
map(|_| match self.pool.get_blockchain_spent(output_commitment) {
|
||||||
Some(x) => Parent::AlreadySpent{other_tx: x.destination_hash().unwrap()},
|
Some(x) => Parent::AlreadySpent{other_tx: x.destination_hash().unwrap()},
|
||||||
None => Parent::BlockTransaction,
|
None => Parent::BlockTransaction,
|
||||||
})
|
})
|
||||||
|
@ -96,10 +97,12 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
self.pool.num_transactions()
|
self.pool.num_transactions()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the number of orphans in the pool
|
||||||
pub fn orphans_size(&self) -> usize {
|
pub fn orphans_size(&self) -> usize {
|
||||||
self.orphans.num_transactions()
|
self.orphans.num_transactions()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Get the total size (transactions + orphans) of the pool
|
||||||
pub fn total_size(&self) -> usize {
|
pub fn total_size(&self) -> usize {
|
||||||
self.pool.num_transactions() + self.orphans.num_transactions()
|
self.pool.num_transactions() + self.orphans.num_transactions()
|
||||||
}
|
}
|
||||||
|
@ -110,15 +113,15 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
/// if necessary, and performing any connection-related validity checks.
|
/// if necessary, and performing any connection-related validity checks.
|
||||||
/// Happens under an exclusive mutable reference gated by the write portion
|
/// Happens under an exclusive mutable reference gated by the write portion
|
||||||
/// of a RWLock.
|
/// of a RWLock.
|
||||||
pub fn add_to_memory_pool(&mut self, source: TxSource, tx: transaction::Transaction) -> Result<(), PoolError> {
|
pub fn add_to_memory_pool(&mut self, _: TxSource, tx: transaction::Transaction) -> Result<(), PoolError> {
|
||||||
// Making sure the transaction is valid before anything else.
|
// Making sure the transaction is valid before anything else.
|
||||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||||
tx.validate(&secp).map_err(|_| PoolError::Invalid)?;
|
tx.validate(&secp).map_err(|_| PoolError::Invalid)?;
|
||||||
|
|
||||||
// The first check invovles ensuring that an identical transaction is
|
// The first check invovles ensuring that an identical transaction is
|
||||||
// not already in the pool's transaction set.
|
// not already in the pool's transaction set.
|
||||||
// A non-authoritative similar check should be performed under the
|
// A non-authoritative similar check should be performed under the
|
||||||
// pool's read lock before we get to this point, which would catch the
|
// pool's read lock before we get to this point, which would catch the
|
||||||
// majority of duplicate cases. The race condition is caught here.
|
// majority of duplicate cases. The race condition is caught here.
|
||||||
// TODO: When the transaction identifier is finalized, the assumptions
|
// TODO: When the transaction identifier is finalized, the assumptions
|
||||||
// here may change depending on the exact coverage of the identifier.
|
// here may change depending on the exact coverage of the identifier.
|
||||||
|
@ -156,7 +159,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
|
|
||||||
// Next we examine the outputs this transaction creates and ensure
|
// Next we examine the outputs this transaction creates and ensure
|
||||||
// that they do not already exist.
|
// that they do not already exist.
|
||||||
// I believe its worth preventing duplicate outputs from being
|
// I believe its worth preventing duplicate outputs from being
|
||||||
// accepted, even though it is possible for them to be mined
|
// accepted, even though it is possible for them to be mined
|
||||||
// with strict ordering. In the future, if desirable, this could
|
// with strict ordering. In the future, if desirable, this could
|
||||||
// be node policy config or more intelligent.
|
// be node policy config or more intelligent.
|
||||||
|
@ -182,8 +185,8 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
// output is unique. No further checks are necessary.
|
// output is unique. No further checks are necessary.
|
||||||
self.pool.add_pool_transaction(pool_entry, blockchain_refs,
|
self.pool.add_pool_transaction(pool_entry, blockchain_refs,
|
||||||
pool_refs, new_unspents);
|
pool_refs, new_unspents);
|
||||||
|
|
||||||
self.reconcile_orphans();
|
self.reconcile_orphans().unwrap();
|
||||||
self.transactions.insert(tx_hash, Box::new(tx));
|
self.transactions.insert(tx_hash, Box::new(tx));
|
||||||
Ok(())
|
Ok(())
|
||||||
|
|
||||||
|
@ -194,11 +197,11 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
// checking above.
|
// checking above.
|
||||||
// First, any references resolved to the pool need to be compared
|
// First, any references resolved to the pool need to be compared
|
||||||
// against active orphan pool_connections.
|
// against active orphan pool_connections.
|
||||||
// Note that pool_connections here also does double duty to
|
// Note that pool_connections here also does double duty to
|
||||||
// account for blockchain connections.
|
// account for blockchain connections.
|
||||||
for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) {
|
for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) {
|
||||||
match self.orphans.get_external_spent_output(&pool_ref.output_commitment()){
|
match self.orphans.get_external_spent_output(&pool_ref.output_commitment()){
|
||||||
// Should the below err be subtyped to orphans somehow?
|
// Should the below err be subtyped to orphans somehow?
|
||||||
Some(x) => return Err(PoolError::DoubleSpend{other_tx: x.destination_hash().unwrap(), spent_output: x.output_commitment()}),
|
Some(x) => return Err(PoolError::DoubleSpend{other_tx: x.destination_hash().unwrap(), spent_output: x.output_commitment()}),
|
||||||
None => {},
|
None => {},
|
||||||
}
|
}
|
||||||
|
@ -223,8 +226,8 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
/// Check the output for a conflict with an existing output.
|
/// Check the output for a conflict with an existing output.
|
||||||
///
|
///
|
||||||
/// Checks the output (by commitment) against outputs in the blockchain
|
/// Checks the output (by commitment) against outputs in the blockchain
|
||||||
/// or in the pool. If the transaction is destined for orphans, the
|
/// or in the pool. If the transaction is destined for orphans, the
|
||||||
/// orphans set is checked as well.
|
/// orphans set is checked as well.
|
||||||
fn check_duplicate_outputs(&self, output : &transaction::Output, is_orphan: bool) -> Result<(), PoolError> {
|
fn check_duplicate_outputs(&self, output : &transaction::Output, is_orphan: bool) -> Result<(), PoolError> {
|
||||||
// Checking against current blockchain unspent outputs
|
// Checking against current blockchain unspent outputs
|
||||||
// We want outputs even if they're spent by pool txs, so we ignore
|
// We want outputs even if they're spent by pool txs, so we ignore
|
||||||
|
@ -249,7 +252,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
// If the transaction might go into orphans, perform the same
|
// If the transaction might go into orphans, perform the same
|
||||||
// checks as above but against the orphan set instead.
|
// checks as above but against the orphan set instead.
|
||||||
if is_orphan {
|
if is_orphan {
|
||||||
// Checking against orphan outputs
|
// Checking against orphan outputs
|
||||||
|
@ -295,7 +298,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
None => {
|
None => {
|
||||||
// The reference does not resolve to anything.
|
// The reference does not resolve to anything.
|
||||||
// Make sure this missing_output has not already
|
// Make sure this missing_output has not already
|
||||||
// been claimed, then add this entry to
|
// been claimed, then add this entry to
|
||||||
// missing_refs
|
// missing_refs
|
||||||
match self.orphans.get_unknown_output(&orphan_commitment) {
|
match self.orphans.get_unknown_output(&orphan_commitment) {
|
||||||
Some(x) => return Err(PoolError::DoubleSpend{
|
Some(x) => return Err(PoolError::DoubleSpend{
|
||||||
|
@ -311,7 +314,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
Ok(missing_refs)
|
Ok(missing_refs)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The primary goal of the reconcile_orphans method is to eliminate any
|
/// The primary goal of the reconcile_orphans method is to eliminate any
|
||||||
/// orphans who conflict with the recently accepted pool transaction.
|
/// orphans who conflict with the recently accepted pool transaction.
|
||||||
/// TODO: How do we handle fishing orphans out that look like they could
|
/// TODO: How do we handle fishing orphans out that look like they could
|
||||||
/// be freed? Current thought is to do so under a different lock domain
|
/// be freed? Current thought is to do so under a different lock domain
|
||||||
|
@ -332,9 +335,9 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
///
|
///
|
||||||
/// Returns a list of transactions which have been evicted from the pool
|
/// Returns a list of transactions which have been evicted from the pool
|
||||||
/// due to the recent block. Because transaction association information is
|
/// due to the recent block. Because transaction association information is
|
||||||
/// irreversibly lost in the blockchain, we must keep track of these
|
/// irreversibly lost in the blockchain, we must keep track of these
|
||||||
/// evicted transactions elsewhere so that we can make a best effort at
|
/// evicted transactions elsewhere so that we can make a best effort at
|
||||||
/// returning them to the pool in the event of a reorg that invalidates
|
/// returning them to the pool in the event of a reorg that invalidates
|
||||||
/// this block.
|
/// this block.
|
||||||
pub fn reconcile_block(&mut self, block: &block::Block) -> Result<Vec<Box<transaction::Transaction>>, PoolError> {
|
pub fn reconcile_block(&mut self, block: &block::Block) -> Result<Vec<Box<transaction::Transaction>>, PoolError> {
|
||||||
// If this pool has been kept in sync correctly, serializing all
|
// If this pool has been kept in sync correctly, serializing all
|
||||||
|
@ -350,7 +353,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
// consumes the same blockchain output.
|
// consumes the same blockchain output.
|
||||||
// If one exists, we mark the transaction and then examine its
|
// If one exists, we mark the transaction and then examine its
|
||||||
// children. Recursively, we mark each child until a child is
|
// children. Recursively, we mark each child until a child is
|
||||||
// fully satisfied by outputs in the updated utxo view (after
|
// fully satisfied by outputs in the updated utxo view (after
|
||||||
// reconciliation of the block), or there are no more children.
|
// reconciliation of the block), or there are no more children.
|
||||||
//
|
//
|
||||||
// Additionally, to protect our invariant dictating no duplicate
|
// Additionally, to protect our invariant dictating no duplicate
|
||||||
|
@ -358,11 +361,11 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
// against outputs generated by the pool and the corresponding
|
// against outputs generated by the pool and the corresponding
|
||||||
// transactions are also marked.
|
// transactions are also marked.
|
||||||
//
|
//
|
||||||
// After marking concludes, sweeping begins. In order, the marked
|
// After marking concludes, sweeping begins. In order, the marked
|
||||||
// transactions are removed, the vertexes corresponding to the
|
// transactions are removed, the vertexes corresponding to the
|
||||||
// transactions are removed, all the marked transactions' outputs are
|
// transactions are removed, all the marked transactions' outputs are
|
||||||
// removed, and all remaining non-blockchain inputs are returned to the
|
// removed, and all remaining non-blockchain inputs are returned to the
|
||||||
// unspent_outputs set.
|
// unspent_outputs set.
|
||||||
//
|
//
|
||||||
// After the pool has been successfully processed, an orphans
|
// After the pool has been successfully processed, an orphans
|
||||||
// reconciliation job is triggered.
|
// reconciliation job is triggered.
|
||||||
|
@ -389,7 +392,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
}
|
}
|
||||||
let freed_txs = self.sweep_transactions(marked_transactions);
|
let freed_txs = self.sweep_transactions(marked_transactions);
|
||||||
|
|
||||||
self.reconcile_orphans();
|
self.reconcile_orphans().unwrap();
|
||||||
|
|
||||||
Ok(freed_txs)
|
Ok(freed_txs)
|
||||||
}
|
}
|
||||||
|
@ -397,9 +400,9 @@ impl<T> TransactionPool<T> where T: BlockChain {
|
||||||
/// The mark portion of our mark-and-sweep pool cleanup.
|
/// The mark portion of our mark-and-sweep pool cleanup.
|
||||||
///
|
///
|
||||||
/// The transaction designated by conflicting_tx is immediately marked.
|
/// The transaction designated by conflicting_tx is immediately marked.
|
||||||
/// Each output of this transaction is then examined; if a transaction in
|
/// Each output of this transaction is then examined; if a transaction in
|
||||||
/// the pool spends this output and the output is not replaced by an
|
/// the pool spends this output and the output is not replaced by an
|
||||||
/// identical output included in the updated UTXO set, the child is marked
|
/// identical output included in the updated UTXO set, the child is marked
|
||||||
/// as well and the process continues recursively.
|
/// as well and the process continues recursively.
|
||||||
///
|
///
|
||||||
/// Marked transactions are added to the mutable marked_txs HashMap which
|
/// Marked transactions are added to the mutable marked_txs HashMap which
|
||||||
|
@ -466,6 +469,8 @@ mod tests {
|
||||||
use secp::{Secp256k1, ContextFlag, constants};
|
use secp::{Secp256k1, ContextFlag, constants};
|
||||||
use secp::key;
|
use secp::key;
|
||||||
use core::core::build;
|
use core::core::build;
|
||||||
|
use blockchain::{DummyChain, DummyChainImpl, DummyUtxoSet};
|
||||||
|
use std::sync::{Arc, RwLock};
|
||||||
|
|
||||||
macro_rules! expect_output_parent {
|
macro_rules! expect_output_parent {
|
||||||
($pool:expr, $expected:pat, $( $output:expr ),+ ) => {
|
($pool:expr, $expected:pat, $( $output:expr ),+ ) => {
|
||||||
|
@ -478,7 +483,6 @@ mod tests {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
/// A basic test; add a pair of transactions to the pool.
|
/// A basic test; add a pair of transactions to the pool.
|
||||||
fn test_basic_pool_add() {
|
fn test_basic_pool_add() {
|
||||||
|
@ -542,7 +546,8 @@ mod tests {
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#[test]
|
|
||||||
|
#[test]
|
||||||
/// Testing various expected error conditions
|
/// Testing various expected error conditions
|
||||||
pub fn test_pool_add_error() {
|
pub fn test_pool_add_error() {
|
||||||
let mut dummy_chain = DummyChainImpl::new();
|
let mut dummy_chain = DummyChainImpl::new();
|
||||||
|
@ -595,7 +600,7 @@ mod tests {
|
||||||
Ok(_) => panic!("Expected error when adding double spend, got Ok"),
|
Ok(_) => panic!("Expected error when adding double spend, got Ok"),
|
||||||
Err(x) => {
|
Err(x) => {
|
||||||
match x {
|
match x {
|
||||||
PoolError::DoubleSpend{other_tx, spent_output} => {
|
PoolError::DoubleSpend{other_tx: _, spent_output} => {
|
||||||
if spent_output != test_output(6).commitment() {
|
if spent_output != test_output(6).commitment() {
|
||||||
panic!("Unexpected parameter in DoubleSpend: {:?}", x);
|
panic!("Unexpected parameter in DoubleSpend: {:?}", x);
|
||||||
}
|
}
|
||||||
|
@ -647,7 +652,7 @@ mod tests {
|
||||||
let pool = RwLock::new(test_setup(&chain_ref));
|
let pool = RwLock::new(test_setup(&chain_ref));
|
||||||
|
|
||||||
// Preparation: We will introduce a three root pool transactions.
|
// Preparation: We will introduce a three root pool transactions.
|
||||||
// 1. A transaction that should be invalidated because it is exactly
|
// 1. A transaction that should be invalidated because it is exactly
|
||||||
// contained in the block.
|
// contained in the block.
|
||||||
// 2. A transaction that should be invalidated because the input is
|
// 2. A transaction that should be invalidated because the input is
|
||||||
// consumed in the block, although it is not exactly consumed.
|
// consumed in the block, although it is not exactly consumed.
|
||||||
|
@ -657,7 +662,7 @@ mod tests {
|
||||||
let valid_transaction = test_transaction(vec![30], vec![14,15]);
|
let valid_transaction = test_transaction(vec![30], vec![14,15]);
|
||||||
|
|
||||||
// We will also introduce a few children:
|
// We will also introduce a few children:
|
||||||
// 4. A transaction that descends from transaction 1, that is in
|
// 4. A transaction that descends from transaction 1, that is in
|
||||||
// turn exactly contained in the block.
|
// turn exactly contained in the block.
|
||||||
let block_child = test_transaction(vec![8], vec![4,3]);
|
let block_child = test_transaction(vec![8], vec![4,3]);
|
||||||
// 5. A transaction that descends from transaction 4, that is not
|
// 5. A transaction that descends from transaction 4, that is not
|
||||||
|
@ -681,9 +686,9 @@ mod tests {
|
||||||
// transaction 9
|
// transaction 9
|
||||||
let mixed_child = test_transaction(vec![11,13], vec![2]);
|
let mixed_child = test_transaction(vec![11,13], vec![2]);
|
||||||
|
|
||||||
// Add transactions.
|
// Add transactions.
|
||||||
// Note: There are some ordering constraints that must be followed here
|
// Note: There are some ordering constraints that must be followed here
|
||||||
// until orphans is 100% implemented. Once the orphans process has
|
// until orphans is 100% implemented. Once the orphans process has
|
||||||
// stabilized, we can mix these up to exercise that path a bit.
|
// stabilized, we can mix these up to exercise that path a bit.
|
||||||
let mut txs_to_add = vec![block_transaction, conflict_transaction,
|
let mut txs_to_add = vec![block_transaction, conflict_transaction,
|
||||||
valid_transaction, block_child, pool_child, conflict_child,
|
valid_transaction, block_child, pool_child, conflict_child,
|
||||||
|
@ -755,7 +760,7 @@ mod tests {
|
||||||
expect_output_parent!(read_pool,
|
expect_output_parent!(read_pool,
|
||||||
Parent::AlreadySpent{other_tx: _}, 15);
|
Parent::AlreadySpent{other_tx: _}, 15);
|
||||||
|
|
||||||
// We should have unspent pool references at 1, 13, 14
|
// We should have unspent pool references at 1, 13, 14
|
||||||
expect_output_parent!(read_pool,
|
expect_output_parent!(read_pool,
|
||||||
Parent::PoolTransaction{tx_ref: _}, 1, 13, 14);
|
Parent::PoolTransaction{tx_ref: _}, 1, 13, 14);
|
||||||
|
|
||||||
|
@ -765,9 +770,8 @@ mod tests {
|
||||||
// Evicted transactions should have unknown outputs
|
// Evicted transactions should have unknown outputs
|
||||||
expect_output_parent!(read_pool, Parent::Unknown, 2, 11);
|
expect_output_parent!(read_pool, Parent::Unknown, 2, 11);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
/// Test transaction selection and block building.
|
/// Test transaction selection and block building.
|
||||||
fn test_block_building() {
|
fn test_block_building() {
|
||||||
|
@ -819,7 +823,7 @@ mod tests {
|
||||||
txs = read_pool.prepare_mineable_transactions(3);
|
txs = read_pool.prepare_mineable_transactions(3);
|
||||||
assert_eq!(txs.len(), 3);
|
assert_eq!(txs.len(), 3);
|
||||||
// TODO: This is ugly, either make block::new take owned
|
// TODO: This is ugly, either make block::new take owned
|
||||||
// txs instead of mut refs, or change
|
// txs instead of mut refs, or change
|
||||||
// prepare_mineable_transactions to return mut refs
|
// prepare_mineable_transactions to return mut refs
|
||||||
let block_txs: Vec<transaction::Transaction> = txs.drain(..).map(|x| *x).collect();
|
let block_txs: Vec<transaction::Transaction> = txs.drain(..).map(|x| *x).collect();
|
||||||
let tx_refs = block_txs.iter().collect();
|
let tx_refs = block_txs.iter().collect();
|
||||||
|
@ -840,7 +844,7 @@ mod tests {
|
||||||
assert_eq!(write_pool.total_size(), 2);
|
assert_eq!(write_pool.total_size(), 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -856,7 +860,7 @@ mod tests {
|
||||||
/// Cobble together a test transaction for testing the transaction pool.
|
/// Cobble together a test transaction for testing the transaction pool.
|
||||||
///
|
///
|
||||||
/// Connectivity here is the most important element.
|
/// Connectivity here is the most important element.
|
||||||
/// Every output is given a blinding key equal to its value, so that the
|
/// Every output is given a blinding key equal to its value, so that the
|
||||||
/// entire commitment can be derived deterministically from just the value.
|
/// entire commitment can be derived deterministically from just the value.
|
||||||
///
|
///
|
||||||
/// Fees are the remainder between input and output values, so the numbers
|
/// Fees are the remainder between input and output values, so the numbers
|
||||||
|
|
|
@ -16,10 +16,6 @@
|
||||||
//! and its top-level members.
|
//! and its top-level members.
|
||||||
|
|
||||||
use std::vec::Vec;
|
use std::vec::Vec;
|
||||||
use std::sync::Arc;
|
|
||||||
use std::sync::RwLock;
|
|
||||||
use std::sync::Weak;
|
|
||||||
use std::cell::RefCell;
|
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
use std::iter::Iterator;
|
use std::iter::Iterator;
|
||||||
use std::fmt;
|
use std::fmt;
|
||||||
|
@ -28,19 +24,16 @@ use secp::pedersen::Commitment;
|
||||||
|
|
||||||
pub use graph;
|
pub use graph;
|
||||||
|
|
||||||
use time;
|
|
||||||
|
|
||||||
use core::core::transaction;
|
use core::core::transaction;
|
||||||
use core::core::block;
|
|
||||||
use core::core::hash;
|
use core::core::hash;
|
||||||
|
|
||||||
/// Placeholder: the data representing where we heard about a tx from.
|
/// Placeholder: the data representing where we heard about a tx from.
|
||||||
///
|
///
|
||||||
/// Used to make decisions based on transaction acceptance priority from
|
/// Used to make decisions based on transaction acceptance priority from
|
||||||
/// various sources. For example, a node may want to bypass pool size
|
/// various sources. For example, a node may want to bypass pool size
|
||||||
/// restrictions when accepting a transaction from a local wallet.
|
/// restrictions when accepting a transaction from a local wallet.
|
||||||
///
|
///
|
||||||
/// Most likely this will evolve to contain some sort of network identifier,
|
/// Most likely this will evolve to contain some sort of network identifier,
|
||||||
/// once we get a better sense of what transaction building might look like.
|
/// once we get a better sense of what transaction building might look like.
|
||||||
pub struct TxSource {
|
pub struct TxSource {
|
||||||
/// Human-readable name used for logging and errors.
|
/// Human-readable name used for logging and errors.
|
||||||
|
@ -71,14 +64,31 @@ impl fmt::Debug for Parent {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO document this enum more accurately
|
||||||
|
/// Enum of errors
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum PoolError {
|
pub enum PoolError {
|
||||||
|
/// An invalid pool entry
|
||||||
Invalid,
|
Invalid,
|
||||||
|
/// An entry already in the pool
|
||||||
AlreadyInPool,
|
AlreadyInPool,
|
||||||
DuplicateOutput{other_tx: Option<hash::Hash>, in_chain: bool,
|
/// A duplicate output
|
||||||
output: Commitment},
|
DuplicateOutput{
|
||||||
DoubleSpend{other_tx: hash::Hash, spent_output: Commitment},
|
/// The other transaction
|
||||||
// An orphan successfully added to the orphans set
|
other_tx: Option<hash::Hash>,
|
||||||
|
/// Is in chain?
|
||||||
|
in_chain: bool,
|
||||||
|
/// The output
|
||||||
|
output: Commitment
|
||||||
|
},
|
||||||
|
/// A double spend
|
||||||
|
DoubleSpend{
|
||||||
|
/// The other transaction
|
||||||
|
other_tx: hash::Hash,
|
||||||
|
/// The spent output
|
||||||
|
spent_output: Commitment
|
||||||
|
},
|
||||||
|
/// An orphan successfully added to the orphans set
|
||||||
OrphanTransaction,
|
OrphanTransaction,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,9 +105,9 @@ pub trait BlockChain {
|
||||||
/// the blockchain.
|
/// the blockchain.
|
||||||
/// Reservations of outputs by orphan transactions (not fully connected) are
|
/// Reservations of outputs by orphan transactions (not fully connected) are
|
||||||
/// not respected.
|
/// not respected.
|
||||||
/// Spending references (input -> output) exist in two structures: internal
|
/// Spending references (input -> output) exist in two structures: internal
|
||||||
/// graph references are contained in the pool edge sets, while references
|
/// graph references are contained in the pool edge sets, while references
|
||||||
/// sourced from the blockchain's UTXO set are contained in the
|
/// sourced from the blockchain's UTXO set are contained in the
|
||||||
/// blockchain_connections set.
|
/// blockchain_connections set.
|
||||||
/// Spent by references (output-> input) exist in two structures: pool-pool
|
/// Spent by references (output-> input) exist in two structures: pool-pool
|
||||||
/// connections are in the pool edge set, while unspent (dangling) references
|
/// connections are in the pool edge set, while unspent (dangling) references
|
||||||
|
@ -105,12 +115,12 @@ pub trait BlockChain {
|
||||||
pub struct Pool {
|
pub struct Pool {
|
||||||
graph : graph::DirectedGraph,
|
graph : graph::DirectedGraph,
|
||||||
|
|
||||||
// available_outputs are unspent outputs of the current pool set,
|
// available_outputs are unspent outputs of the current pool set,
|
||||||
// maintained as edges with empty destinations, keyed by the
|
// maintained as edges with empty destinations, keyed by the
|
||||||
// output's hash.
|
// output's hash.
|
||||||
available_outputs: HashMap<Commitment, graph::Edge>,
|
available_outputs: HashMap<Commitment, graph::Edge>,
|
||||||
|
|
||||||
// Consumed blockchain utxo's are kept in a separate map.
|
// Consumed blockchain utxo's are kept in a separate map.
|
||||||
consumed_blockchain_outputs: HashMap<Commitment, graph::Edge>
|
consumed_blockchain_outputs: HashMap<Commitment, graph::Edge>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,7 +219,7 @@ impl Pool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TransactionGraphContainer for Pool {
|
impl TransactionGraphContainer for Pool {
|
||||||
fn get_graph(&self) -> &graph::DirectedGraph {
|
fn get_graph(&self) -> &graph::DirectedGraph {
|
||||||
&self.graph
|
&self.graph
|
||||||
}
|
}
|
||||||
|
@ -225,21 +235,21 @@ impl TransactionGraphContainer for Pool {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Orphans contains the elements of the transaction graph that have not been
|
/// Orphans contains the elements of the transaction graph that have not been
|
||||||
/// connected in full to the blockchain.
|
/// connected in full to the blockchain.
|
||||||
pub struct Orphans {
|
pub struct Orphans {
|
||||||
graph : graph::DirectedGraph,
|
graph : graph::DirectedGraph,
|
||||||
|
|
||||||
// available_outputs are unspent outputs of the current orphan set,
|
// available_outputs are unspent outputs of the current orphan set,
|
||||||
// maintained as edges with empty destinations.
|
// maintained as edges with empty destinations.
|
||||||
available_outputs: HashMap<Commitment, graph::Edge>,
|
available_outputs: HashMap<Commitment, graph::Edge>,
|
||||||
|
|
||||||
// missing_outputs are spending references (inputs) with missing
|
// missing_outputs are spending references (inputs) with missing
|
||||||
// corresponding outputs, maintained as edges with empty sources.
|
// corresponding outputs, maintained as edges with empty sources.
|
||||||
missing_outputs: HashMap<Commitment, graph::Edge>,
|
missing_outputs: HashMap<Commitment, graph::Edge>,
|
||||||
|
|
||||||
// pool_connections are bidirectional edges which connect to the pool
|
// pool_connections are bidirectional edges which connect to the pool
|
||||||
// graph. They should map one-to-one to pool graph available_outputs.
|
// graph. They should map one-to-one to pool graph available_outputs.
|
||||||
// pool_connections should not be viewed authoritatively, they are
|
// pool_connections should not be viewed authoritatively, they are
|
||||||
// merely informational until the transaction is officially connected to
|
// merely informational until the transaction is officially connected to
|
||||||
// the pool.
|
// the pool.
|
||||||
pool_connections: HashMap<Commitment, graph::Edge>,
|
pool_connections: HashMap<Commitment, graph::Edge>,
|
||||||
|
@ -255,12 +265,12 @@ impl Orphans {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks for a double spent output, given the hash of the output,
|
/// Checks for a double spent output, given the hash of the output,
|
||||||
/// ONLY in the data maintained by the orphans set. This includes links
|
/// ONLY in the data maintained by the orphans set. This includes links
|
||||||
/// to the pool as well as links internal to orphan transactions.
|
/// to the pool as well as links internal to orphan transactions.
|
||||||
/// Returns the transaction hash corresponding to the conflicting
|
/// Returns the transaction hash corresponding to the conflicting
|
||||||
/// transaction.
|
/// transaction.
|
||||||
fn check_double_spend(&self, o: transaction::Output) -> Option<hash::Hash> {
|
pub fn check_double_spend(&self, o: transaction::Output) -> Option<hash::Hash> {
|
||||||
self.graph.get_edge_by_commitment(&o.commitment()).or(self.pool_connections.get(&o.commitment())).map(|x| x.destination_hash().unwrap())
|
self.graph.get_edge_by_commitment(&o.commitment()).or(self.pool_connections.get(&o.commitment())).map(|x| x.destination_hash().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -340,14 +350,14 @@ impl TransactionGraphContainer for Orphans {
|
||||||
/// consumed by another transaction in this graph,
|
/// consumed by another transaction in this graph,
|
||||||
/// 3) [External] Unspent: An output produced by a transaction in this graph
|
/// 3) [External] Unspent: An output produced by a transaction in this graph
|
||||||
/// that is not yet spent.
|
/// that is not yet spent.
|
||||||
///
|
///
|
||||||
/// There is no concept of an external "spent by" reference (output produced by
|
/// There is no concept of an external "spent by" reference (output produced by
|
||||||
/// a transaction in the graph spent by a transaction in another source), as
|
/// a transaction in the graph spent by a transaction in another source), as
|
||||||
/// these references are expected to be maintained by descendent graph. Outputs
|
/// these references are expected to be maintained by descendent graph. Outputs
|
||||||
/// follow a heirarchy (Blockchain -> Pool -> Orphans) where each descendent
|
/// follow a heirarchy (Blockchain -> Pool -> Orphans) where each descendent
|
||||||
/// exists at a lower priority than their parent. An output consumed by a
|
/// exists at a lower priority than their parent. An output consumed by a
|
||||||
/// child graph is marked as unspent in the parent graph and an external spent
|
/// child graph is marked as unspent in the parent graph and an external spent
|
||||||
/// in the child. This ensures that no descendent set must modify state in a
|
/// in the child. This ensures that no descendent set must modify state in a
|
||||||
/// set of higher priority.
|
/// set of higher priority.
|
||||||
pub trait TransactionGraphContainer {
|
pub trait TransactionGraphContainer {
|
||||||
/// Accessor for graph object
|
/// Accessor for graph object
|
||||||
|
@ -365,7 +375,7 @@ pub trait TransactionGraphContainer {
|
||||||
self.get_available_output(c).is_some()
|
self.get_available_output(c).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Checks if the pool has anything by this output already, between
|
/// Checks if the pool has anything by this output already, between
|
||||||
/// available outputs and internal ones.
|
/// available outputs and internal ones.
|
||||||
fn find_output(&self, c: &Commitment) -> Option<hash::Hash> {
|
fn find_output(&self, c: &Commitment) -> Option<hash::Hash> {
|
||||||
self.get_available_output(c).
|
self.get_available_output(c).
|
||||||
|
|
|
@ -143,6 +143,7 @@ impl AsRef<[u8]> for RangeProof {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl RangeProof {
|
impl RangeProof {
|
||||||
|
/// Create the zero range proof
|
||||||
pub fn zero() -> RangeProof {
|
pub fn zero() -> RangeProof {
|
||||||
RangeProof {
|
RangeProof {
|
||||||
proof: [0; constants::MAX_PROOF_SIZE],
|
proof: [0; constants::MAX_PROOF_SIZE],
|
||||||
|
|
|
@ -187,6 +187,8 @@ impl<'a> Batch<'a> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Delete a single key from the batch. The write function
|
||||||
|
/// must be called to "commit" the batch to storage.
|
||||||
pub fn delete(mut self, key: &[u8]) -> Result<Batch<'a>, Error> {
|
pub fn delete(mut self, key: &[u8]) -> Result<Batch<'a>, Error> {
|
||||||
self.batch.delete(key)?;
|
self.batch.delete(key)?;
|
||||||
Ok(self)
|
Ok(self)
|
||||||
|
|
|
@ -29,7 +29,7 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) {
|
||||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||||
|
|
||||||
// operate within a lock on wallet data
|
// operate within a lock on wallet data
|
||||||
WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
let _ = WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||||
|
|
||||||
// check each output that's not spent
|
// check each output that's not spent
|
||||||
for out in &mut wallet_data.outputs {
|
for out in &mut wallet_data.outputs {
|
||||||
|
@ -41,7 +41,7 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) {
|
||||||
// TODO check the pool for unconfirmed
|
// TODO check the pool for unconfirmed
|
||||||
|
|
||||||
let out_res = get_output_by_commitment(config, commitment);
|
let out_res = get_output_by_commitment(config, commitment);
|
||||||
|
|
||||||
if out_res.is_ok() {
|
if out_res.is_ok() {
|
||||||
// output is known, it's a new utxo
|
// output is known, it's a new utxo
|
||||||
out.status = OutputStatus::Unspent;
|
out.status = OutputStatus::Unspent;
|
||||||
|
|
|
@ -50,13 +50,13 @@
|
||||||
//! So we may as well have it in place already.
|
//! So we may as well have it in place already.
|
||||||
|
|
||||||
use std::convert::From;
|
use std::convert::From;
|
||||||
use secp::{self, Secp256k1};
|
use secp::{self};
|
||||||
use secp::key::SecretKey;
|
use secp::key::SecretKey;
|
||||||
|
|
||||||
use core::core::{Block, Transaction, TxKernel, Output, build};
|
use core::core::{Block, Transaction, TxKernel, Output, build};
|
||||||
use core::ser;
|
use core::ser;
|
||||||
use api::{self, ApiEndpoint, Operation, ApiResult};
|
use api::{self, ApiEndpoint, Operation, ApiResult};
|
||||||
use extkey::{self, ExtendedKey};
|
use extkey::ExtendedKey;
|
||||||
use types::*;
|
use types::*;
|
||||||
use util;
|
use util;
|
||||||
|
|
||||||
|
@ -70,13 +70,13 @@ struct TxWrapper {
|
||||||
/// transaction, adding our receiving output, to broadcast to the rest of the
|
/// transaction, adding our receiving output, to broadcast to the rest of the
|
||||||
/// network.
|
/// network.
|
||||||
pub fn receive_json_tx(config: &WalletConfig, ext_key: &ExtendedKey, partial_tx_str: &str) -> Result<(), Error> {
|
pub fn receive_json_tx(config: &WalletConfig, ext_key: &ExtendedKey, partial_tx_str: &str) -> Result<(), Error> {
|
||||||
|
|
||||||
let (amount, blinding, partial_tx) = partial_tx_from_json(partial_tx_str)?;
|
let (amount, blinding, partial_tx) = partial_tx_from_json(partial_tx_str)?;
|
||||||
let final_tx = receive_transaction(&config, ext_key, amount, blinding, partial_tx)?;
|
let final_tx = receive_transaction(&config, ext_key, amount, blinding, partial_tx)?;
|
||||||
let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap());
|
let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap());
|
||||||
|
|
||||||
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
|
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
|
||||||
api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })?;
|
let _: TxWrapper = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })?;
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,9 +135,9 @@ impl ApiEndpoint for WalletReceiver {
|
||||||
WalletReceiveRequest::PartialTransaction(partial_tx_str) => {
|
WalletReceiveRequest::PartialTransaction(partial_tx_str) => {
|
||||||
debug!("Operation {} with transaction {}", op, &partial_tx_str);
|
debug!("Operation {} with transaction {}", op, &partial_tx_str);
|
||||||
receive_json_tx(&self.config, &self.key, &partial_tx_str).map_err(|e| {
|
receive_json_tx(&self.config, &self.key, &partial_tx_str).map_err(|e| {
|
||||||
api::Error::Internal(format!("Error processing partial transaction: {:?}", e))
|
api::Error::Internal(format!("Error processing partial transaction: {:?}", e))
|
||||||
});
|
}).unwrap();
|
||||||
|
|
||||||
//TODO: Return emptiness for now, should be a proper enum return type
|
//TODO: Return emptiness for now, should be a proper enum return type
|
||||||
Ok(CbData {
|
Ok(CbData {
|
||||||
output: String::from(""),
|
output: String::from(""),
|
||||||
|
|
|
@ -13,7 +13,7 @@
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
use std::convert::From;
|
use std::convert::From;
|
||||||
use secp::{self, Secp256k1};
|
use secp::{self};
|
||||||
use secp::key::SecretKey;
|
use secp::key::SecretKey;
|
||||||
|
|
||||||
use checker;
|
use checker;
|
||||||
|
@ -29,21 +29,18 @@ use api;
|
||||||
/// recipients wallet receiver (to be implemented).
|
/// recipients wallet receiver (to be implemented).
|
||||||
pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64, dest: String) -> Result<(), Error> {
|
pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64, dest: String) -> Result<(), Error> {
|
||||||
checker::refresh_outputs(&config, ext_key);
|
checker::refresh_outputs(&config, ext_key);
|
||||||
|
|
||||||
let (tx, blind_sum) = build_send_tx(config, ext_key, amount)?;
|
let (tx, blind_sum) = build_send_tx(config, ext_key, amount)?;
|
||||||
let json_tx = partial_tx_to_json(amount, blind_sum, tx);
|
let json_tx = partial_tx_to_json(amount, blind_sum, tx);
|
||||||
|
|
||||||
if dest == "stdout" {
|
if dest == "stdout" {
|
||||||
println!("{}", json_tx);
|
println!("{}", json_tx);
|
||||||
} else if &dest[..4] == "http" {
|
} else if &dest[..4] == "http" {
|
||||||
let url = format!("{}/v1/receive/receive_json_tx",
|
let url = format!("{}/v1/receive/receive_json_tx", &dest);
|
||||||
&dest);
|
|
||||||
debug!("Posting partial transaction to {}", url);
|
debug!("Posting partial transaction to {}", url);
|
||||||
let request = WalletReceiveRequest::PartialTransaction(json_tx);
|
let request = WalletReceiveRequest::PartialTransaction(json_tx);
|
||||||
let res: CbData = api::client::post(url.as_str(),
|
let _: CbData = api::client::post(url.as_str(), &request)
|
||||||
&request)
|
.expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url));
|
||||||
.expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url));
|
|
||||||
|
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
|
@ -65,7 +65,7 @@ impl From<serde_json::Error> for Error {
|
||||||
}
|
}
|
||||||
|
|
||||||
impl From<num::ParseIntError> for Error {
|
impl From<num::ParseIntError> for Error {
|
||||||
fn from(e: num::ParseIntError) -> Error {
|
fn from(_: num::ParseIntError) -> Error {
|
||||||
Error::Format("Invalid hex".to_string())
|
Error::Format("Invalid hex".to_string())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -91,7 +91,7 @@ pub struct WalletConfig {
|
||||||
|
|
||||||
impl Default for WalletConfig {
|
impl Default for WalletConfig {
|
||||||
fn default() -> WalletConfig {
|
fn default() -> WalletConfig {
|
||||||
WalletConfig {
|
WalletConfig {
|
||||||
enable_wallet: false,
|
enable_wallet: false,
|
||||||
api_http_addr: "http://127.0.0.1:13415".to_string(),
|
api_http_addr: "http://127.0.0.1:13415".to_string(),
|
||||||
check_node_api_http_addr: "http://127.0.0.1:13415".to_string(),
|
check_node_api_http_addr: "http://127.0.0.1:13415".to_string(),
|
||||||
|
@ -161,12 +161,12 @@ impl WalletData {
|
||||||
fs::create_dir_all(data_file_dir).unwrap_or_else(|why| {
|
fs::create_dir_all(data_file_dir).unwrap_or_else(|why| {
|
||||||
info!("! {:?}", why.kind());
|
info!("! {:?}", why.kind());
|
||||||
});
|
});
|
||||||
|
|
||||||
let data_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, DAT_FILE);
|
let data_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, DAT_FILE);
|
||||||
let lock_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, LOCK_FILE);
|
let lock_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, LOCK_FILE);
|
||||||
|
|
||||||
// create the lock files, if it already exists, will produce an error
|
// create the lock files, if it already exists, will produce an error
|
||||||
OpenOptions::new().write(true).create_new(true).open(lock_file_path).map_err(|e| {
|
OpenOptions::new().write(true).create_new(true).open(lock_file_path).map_err(|_| {
|
||||||
Error::WalletData(format!("Could not create wallet lock file. Either \
|
Error::WalletData(format!("Could not create wallet lock file. Either \
|
||||||
some other process is using the wallet or there's a write access \
|
some other process is using the wallet or there's a write access \
|
||||||
issue."))
|
issue."))
|
||||||
|
@ -178,7 +178,7 @@ impl WalletData {
|
||||||
wdat.write(data_file_path)?;
|
wdat.write(data_file_path)?;
|
||||||
|
|
||||||
// delete the lock file
|
// delete the lock file
|
||||||
fs::remove_file(lock_file_path).map_err(|e| {
|
fs::remove_file(lock_file_path).map_err(|_| {
|
||||||
Error::WalletData(format!("Could not remove wallet lock file. Maybe insufficient \
|
Error::WalletData(format!("Could not remove wallet lock file. Maybe insufficient \
|
||||||
rights?"))
|
rights?"))
|
||||||
})?;
|
})?;
|
||||||
|
|
Loading…
Reference in a new issue