Cargo fmt all the things

This commit is contained in:
Ignotus Peverell 2017-09-29 18:44:25 +00:00
parent 3b51180359
commit 8504efb796
No known key found for this signature in database
GPG key ID: 99CD25F39F8F8211
57 changed files with 3678 additions and 2600 deletions

View file

@ -26,12 +26,14 @@ use rest::Error;
/// returns a JSON object. Handles request building, JSON deserialization and /// returns a JSON object. Handles request building, JSON deserialization and
/// response code checking. /// response code checking.
pub fn get<'a, T>(url: &'a str) -> Result<T, Error> pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
where for<'de> T: Deserialize<'de> where
for<'de> T: Deserialize<'de>,
{ {
let client = hyper::Client::new(); let client = hyper::Client::new();
let res = check_error(client.get(url).send())?; let res = check_error(client.get(url).send())?;
serde_json::from_reader(res) serde_json::from_reader(res).map_err(|e| {
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e))) Error::Internal(format!("Server returned invalid JSON: {}", e))
})
} }
/// Helper function to easily issue a HTTP POST request with the provided JSON /// Helper function to easily issue a HTTP POST request with the provided JSON
@ -39,15 +41,18 @@ pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
/// building, JSON serialization and deserialization, and response code /// building, JSON serialization and deserialization, and response code
/// checking. /// checking.
pub fn post<'a, IN, OUT>(url: &'a str, input: &IN) -> Result<OUT, Error> pub fn post<'a, IN, OUT>(url: &'a str, input: &IN) -> Result<OUT, Error>
where IN: Serialize, where
for<'de> OUT: Deserialize<'de> IN: Serialize,
for<'de> OUT: Deserialize<'de>,
{ {
let in_json = serde_json::to_string(input) let in_json = serde_json::to_string(input).map_err(|e| {
.map_err(|e| Error::Internal(format!("Could not serialize data to JSON: {}", e)))?; Error::Internal(format!("Could not serialize data to JSON: {}", e))
})?;
let client = hyper::Client::new(); let client = hyper::Client::new();
let res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?; let res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?;
serde_json::from_reader(res) serde_json::from_reader(res).map_err(|e| {
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e))) Error::Internal(format!("Server returned invalid JSON: {}", e))
})
} }
// convert hyper error and check for non success response codes // convert hyper error and check for non success response codes

View file

@ -203,25 +203,35 @@ struct OpWrapper<E> {
} }
impl<E> Handler for OpWrapper<E> impl<E> Handler for OpWrapper<E>
where E: ApiEndpoint where
E: ApiEndpoint,
{ {
fn handle(&self, req: &mut Request) -> IronResult<Response> { fn handle(&self, req: &mut Request) -> IronResult<Response> {
let t: E::OP_IN = serde_json::from_reader(req.body.by_ref()) let t: E::OP_IN = serde_json::from_reader(req.body.by_ref()).map_err(|e| {
.map_err(|e| IronError::new(e, status::BadRequest))?; IronError::new(e, status::BadRequest)
})?;
let res = self.endpoint.operation(self.operation.clone(), t)?; let res = self.endpoint.operation(self.operation.clone(), t)?;
let res_json = serde_json::to_string(&res) let res_json = serde_json::to_string(&res).map_err(|e| {
.map_err(|e| IronError::new(e, status::InternalServerError))?; IronError::new(e, status::InternalServerError)
})?;
Ok(Response::with((status::Ok, res_json))) Ok(Response::with((status::Ok, res_json)))
} }
} }
fn extract_param<ID>(req: &mut Request, param: &'static str) -> IronResult<ID> fn extract_param<ID>(req: &mut Request, param: &'static str) -> IronResult<ID>
where ID: ToString + FromStr, where
<ID as FromStr>::Err: Debug + Send + error::Error + 'static ID: ToString + FromStr,
<ID as FromStr>::Err: Debug + Send + error::Error + 'static,
{ {
let id = req.extensions.get::<Router>().unwrap().find(param).unwrap_or(""); let id = req.extensions
id.parse::<ID>().map_err(|e| IronError::new(e, status::BadRequest)) .get::<Router>()
.unwrap()
.find(param)
.unwrap_or("");
id.parse::<ID>().map_err(
|e| IronError::new(e, status::BadRequest),
)
} }
/// HTTP server allowing the registration of ApiEndpoint implementations. /// HTTP server allowing the registration of ApiEndpoint implementations.
@ -229,7 +239,6 @@ pub struct ApiServer {
root: String, root: String,
router: Router, router: Router,
server_listener: Option<Listening>, server_listener: Option<Listening>,
} }
impl ApiServer { impl ApiServer {
@ -245,7 +254,7 @@ impl ApiServer {
/// Starts the ApiServer at the provided address. /// Starts the ApiServer at the provided address.
pub fn start<A: ToSocketAddrs>(&mut self, addr: A) -> Result<(), String> { pub fn start<A: ToSocketAddrs>(&mut self, addr: A) -> Result<(), String> {
//replace this value to satisfy borrow checker // replace this value to satisfy borrow checker
let r = mem::replace(&mut self.router, Router::new()); let r = mem::replace(&mut self.router, Router::new());
let result = Iron::new(r).http(addr); let result = Iron::new(r).http(addr);
let return_value = result.as_ref().map(|_| ()).map_err(|e| e.to_string()); let return_value = result.as_ref().map(|_| ()).map_err(|e| e.to_string());
@ -254,7 +263,7 @@ impl ApiServer {
} }
/// Stops the API server /// Stops the API server
pub fn stop(&mut self){ pub fn stop(&mut self) {
let r = mem::replace(&mut self.server_listener, None); let r = mem::replace(&mut self.server_listener, None);
r.unwrap().close().unwrap(); r.unwrap().close().unwrap();
} }
@ -262,8 +271,9 @@ impl ApiServer {
/// Register a new API endpoint, providing a relative URL for the new /// Register a new API endpoint, providing a relative URL for the new
/// endpoint. /// endpoint.
pub fn register_endpoint<E>(&mut self, subpath: String, endpoint: E) pub fn register_endpoint<E>(&mut self, subpath: String, endpoint: E)
where E: ApiEndpoint, where
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error E: ApiEndpoint,
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error,
{ {
assert_eq!(subpath.chars().nth(0).unwrap(), '/'); assert_eq!(subpath.chars().nth(0).unwrap(), '/');
@ -281,7 +291,12 @@ impl ApiServer {
endpoint: endpoint.clone(), endpoint: endpoint.clone(),
}; };
let full_path = format!("{}/{}", root.clone(), op_s.clone()); let full_path = format!("{}/{}", root.clone(), op_s.clone());
self.router.route(op.to_method(), full_path.clone(), wrapper, route_name); self.router.route(
op.to_method(),
full_path.clone(),
wrapper,
route_name,
);
info!("route: POST {}", full_path); info!("route: POST {}", full_path);
} else { } else {
@ -294,15 +309,21 @@ impl ApiServer {
_ => panic!("unreachable"), _ => panic!("unreachable"),
}; };
let wrapper = ApiWrapper(endpoint.clone()); let wrapper = ApiWrapper(endpoint.clone());
self.router.route(op.to_method(), full_path.clone(), wrapper, route_name); self.router.route(
op.to_method(),
full_path.clone(),
wrapper,
route_name,
);
info!("route: {} {}", op.to_method(), full_path); info!("route: {} {}", op.to_method(), full_path);
} }
} }
// support for the HTTP Options method by differentiating what's on the // support for the HTTP Options method by differentiating what's on the
// root resource vs the id resource // root resource vs the id resource
let (root_opts, sub_opts) = let (root_opts, sub_opts) = endpoint.operations().iter().fold(
endpoint.operations().iter().fold((vec![], vec![]), |mut acc, op| { (vec![], vec![]),
|mut acc, op| {
let m = op.to_method(); let m = op.to_method();
if m == Method::Post { if m == Method::Post {
acc.0.push(m); acc.0.push(m);
@ -310,19 +331,26 @@ impl ApiServer {
acc.1.push(m); acc.1.push(m);
} }
acc acc
});
self.router.options(root.clone(),
move |_: &mut Request| {
Ok(Response::with((status::Ok,
Header(headers::Allow(root_opts.clone())))))
}, },
"option_".to_string() + route_postfix); );
self.router.options(root.clone() + "/:id", self.router.options(
root.clone(),
move |_: &mut Request| { move |_: &mut Request| {
Ok(Response::with((status::Ok, Ok(Response::with(
Header(headers::Allow(sub_opts.clone()))))) (status::Ok, Header(headers::Allow(root_opts.clone()))),
))
}, },
"option_id_".to_string() + route_postfix); "option_".to_string() + route_postfix,
);
self.router.options(
root.clone() + "/:id",
move |_: &mut Request| {
Ok(Response::with(
(status::Ok, Header(headers::Allow(sub_opts.clone()))),
))
},
"option_id_".to_string() + route_postfix,
);
} }
} }

View file

@ -30,9 +30,7 @@ pub struct Tip {
impl Tip { impl Tip {
pub fn from_tip(tip: chain::Tip) -> Tip { pub fn from_tip(tip: chain::Tip) -> Tip {
Tip { Tip { height: tip.height }
height: tip.height,
}
} }
} }
@ -60,8 +58,11 @@ impl Output {
pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> Output { pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> Output {
let (output_type, lock_height) = match output.features { let (output_type, lock_height) = match output.features {
x if x.contains(core::transaction::COINBASE_OUTPUT) => { x if x.contains(core::transaction::COINBASE_OUTPUT) => {
(OutputType::Coinbase, block_header.height + consensus::COINBASE_MATURITY) (
}, OutputType::Coinbase,
block_header.height + consensus::COINBASE_MATURITY,
)
}
_ => (OutputType::Transaction, 0), _ => (OutputType::Transaction, 0),
}; };
Output { Output {

View file

@ -29,7 +29,7 @@ use store;
use sumtree; use sumtree;
use types::*; use types::*;
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE}; use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
const MAX_ORPHANS: usize = 20; const MAX_ORPHANS: usize = 20;
@ -44,7 +44,7 @@ pub struct Chain {
orphans: Arc<Mutex<VecDeque<(Options, Block)>>>, orphans: Arc<Mutex<VecDeque<(Options, Block)>>>,
sumtrees: Arc<RwLock<sumtree::SumTrees>>, sumtrees: Arc<RwLock<sumtree::SumTrees>>,
//POW verification function // POW verification function
pow_verifier: fn(&BlockHeader, u32) -> bool, pow_verifier: fn(&BlockHeader, u32) -> bool,
} }
@ -52,14 +52,13 @@ unsafe impl Sync for Chain {}
unsafe impl Send for Chain {} unsafe impl Send for Chain {}
impl Chain { impl Chain {
/// Check whether the chain exists. If not, the call to 'init' will /// Check whether the chain exists. If not, the call to 'init' will
/// expect an already mined genesis block. This keeps the chain free /// expect an already mined genesis block. This keeps the chain free
/// from needing to know about the mining implementation /// from needing to know about the mining implementation
pub fn chain_exists(db_root: String)->bool { pub fn chain_exists(db_root: String) -> bool {
let chain_store = store::ChainKVStore::new(db_root).unwrap(); let chain_store = store::ChainKVStore::new(db_root).unwrap();
match chain_store.head() { match chain_store.head() {
Ok(_) => {true}, Ok(_) => true,
Err(NotFoundErr) => false, Err(NotFoundErr) => false,
Err(_) => false, Err(_) => false,
} }
@ -138,7 +137,12 @@ impl Chain {
orphans.truncate(MAX_ORPHANS); orphans.truncate(MAX_ORPHANS);
} }
Err(ref e) => { Err(ref e) => {
info!("Rejected block {} at {} : {:?}", b.hash(), b.header.height, e); info!(
"Rejected block {} at {} : {:?}",
b.hash(),
b.header.height,
e
);
} }
} }
@ -161,7 +165,7 @@ impl Chain {
fn ctx_from_head(&self, head: Tip, opts: Options) -> pipe::BlockContext { fn ctx_from_head(&self, head: Tip, opts: Options) -> pipe::BlockContext {
let opts_in = opts; let opts_in = opts;
let param_ref=MINING_PARAMETER_MODE.read().unwrap(); let param_ref = MINING_PARAMETER_MODE.read().unwrap();
let opts_in = match *param_ref { let opts_in = match *param_ref {
MiningParameterMode::AutomatedTesting => opts_in | EASY_POW, MiningParameterMode::AutomatedTesting => opts_in | EASY_POW,
MiningParameterMode::UserTesting => opts_in | EASY_POW, MiningParameterMode::UserTesting => opts_in | EASY_POW,
@ -209,7 +213,9 @@ impl Chain {
let sumtrees = self.sumtrees.read().unwrap(); let sumtrees = self.sumtrees.read().unwrap();
let is_unspent = sumtrees.is_unspent(output_ref)?; let is_unspent = sumtrees.is_unspent(output_ref)?;
if is_unspent { if is_unspent {
self.store.get_output_by_commit(output_ref).map_err(&Error::StoreErr) self.store.get_output_by_commit(output_ref).map_err(
&Error::StoreErr,
)
} else { } else {
Err(Error::OutputNotFound) Err(Error::OutputNotFound)
} }
@ -266,10 +272,13 @@ impl Chain {
} }
/// Gets the block header by the provided output commitment /// Gets the block header by the provided output commitment
pub fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, Error> { pub fn get_block_header_by_output_commit(
self.store.get_block_header_by_output_commit(commit).map_err( &self,
&Error::StoreErr, commit: &Commitment,
) ) -> Result<BlockHeader, Error> {
self.store
.get_block_header_by_output_commit(commit)
.map_err(&Error::StoreErr)
} }
/// Get the tip of the header chain /// Get the tip of the header chain

View file

@ -139,7 +139,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
if header.height != prev.height + 1 { if header.height != prev.height + 1 {
return Err(Error::InvalidBlockHeight); return Err(Error::InvalidBlockHeight);
} }
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode(){ if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
// prevent time warp attacks and some timestamp manipulations by forcing strict // prevent time warp attacks and some timestamp manipulations by forcing strict
// time progression (but not in CI mode) // time progression (but not in CI mode)
return Err(Error::InvalidBlockTime); return Err(Error::InvalidBlockTime);
@ -182,7 +182,11 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
} }
/// Fully validate the block content. /// Fully validate the block content.
fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extension) -> Result<(), Error> { fn validate_block(
b: &Block,
ctx: &mut BlockContext,
ext: &mut sumtree::Extension,
) -> Result<(), Error> {
if b.header.height > ctx.head.height + 1 { if b.header.height > ctx.head.height + 1 {
return Err(Error::Orphan); return Err(Error::Orphan);
} }
@ -194,10 +198,13 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio
// check that all the outputs of the block are "new" - // check that all the outputs of the block are "new" -
// that they do not clobber any existing unspent outputs (by their commitment) // that they do not clobber any existing unspent outputs (by their commitment)
// //
// TODO - do we need to do this here (and can we do this here if we need access to the chain) // TODO - do we need to do this here (and can we do this here if we need access
// see check_duplicate_outputs in pool for the analogous operation on transaction outputs // to the chain)
// see check_duplicate_outputs in pool for the analogous operation on
// transaction outputs
// for output in &block.outputs { // for output in &block.outputs {
// here we would check that the output is not a duplicate output based on the current chain // here we would check that the output is not a duplicate output based on the
// current chain
// }; // };
@ -228,7 +235,11 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio
if forked_block.header.height > 0 { if forked_block.header.height > 0 {
let last_output = &forked_block.outputs[forked_block.outputs.len() - 1]; let last_output = &forked_block.outputs[forked_block.outputs.len() - 1];
let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1]; let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1];
ext.rewind(forked_block.header.height, last_output, last_kernel)?; ext.rewind(
forked_block.header.height,
last_output,
last_kernel,
)?;
} }
// apply all forked blocks, including this new one // apply all forked blocks, including this new one
@ -240,27 +251,33 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio
} }
let (utxo_root, rproof_root, kernel_root) = ext.roots(); let (utxo_root, rproof_root, kernel_root) = ext.roots();
if utxo_root.hash != b.header.utxo_root || if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root ||
rproof_root.hash != b.header.range_proof_root || kernel_root.hash != b.header.kernel_root
kernel_root.hash != b.header.kernel_root { {
return Err(Error::InvalidRoot); return Err(Error::InvalidRoot);
} }
// check that any coinbase outputs are spendable (that they have matured sufficiently) // check that any coinbase outputs are spendable (that they have matured
// sufficiently)
for input in &b.inputs { for input in &b.inputs {
if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) { if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) {
if output.features.contains(transaction::COINBASE_OUTPUT) { if output.features.contains(transaction::COINBASE_OUTPUT) {
if let Ok(output_header) = ctx.store.get_block_header_by_output_commit(&input.commitment()) { if let Ok(output_header) =
ctx.store.get_block_header_by_output_commit(
&input.commitment(),
)
{
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction validation rule // TODO - make sure we are not off-by-1 here vs. the equivalent tansaction
// validation rule
if b.header.height <= output_header.height + consensus::COINBASE_MATURITY { if b.header.height <= output_header.height + consensus::COINBASE_MATURITY {
return Err(Error::ImmatureCoinbase); return Err(Error::ImmatureCoinbase);
} }
}; };
}; };
}; };
}; }
Ok(()) Ok(())
} }

View file

@ -85,7 +85,9 @@ impl ChainStore for ChainKVStore {
} }
fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> { fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
option_to_not_found(self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec()))) option_to_not_found(self.db.get_ser(
&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec()),
))
} }
fn check_block_exists(&self, h: &Hash) -> Result<bool, Error> { fn check_block_exists(&self, h: &Hash) -> Result<bool, Error> {
@ -97,13 +99,30 @@ impl ChainStore for ChainKVStore {
let mut batch = self.db let mut batch = self.db
.batch() .batch()
.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)? .put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?
.put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..], &b.header)?; .put_ser(
&to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..],
&b.header,
)?;
// saving the full output under its hash, as well as a commitment to hash index // saving the full output under its hash, as well as a commitment to hash index
for out in &b.outputs { for out in &b.outputs {
batch = batch batch = batch
.put_ser(&to_key(OUTPUT_COMMIT_PREFIX, &mut out.commitment().as_ref().to_vec())[..], out)? .put_ser(
.put_ser(&to_key(HEADER_BY_OUTPUT_PREFIX, &mut out.commitment().as_ref().to_vec())[..], &b.hash())?; &to_key(
OUTPUT_COMMIT_PREFIX,
&mut out.commitment().as_ref().to_vec(),
)
[..],
out,
)?
.put_ser(
&to_key(
HEADER_BY_OUTPUT_PREFIX,
&mut out.commitment().as_ref().to_vec(),
)
[..],
&b.hash(),
)?;
} }
batch.write() batch.write()
} }
@ -111,11 +130,14 @@ impl ChainStore for ChainKVStore {
// lookup the block header hash by output commitment // lookup the block header hash by output commitment
// lookup the block header based on this hash // lookup the block header based on this hash
// to check the chain is correct compare this block header to // to check the chain is correct compare this block header to
// the block header currently indexed at the relevant block height (tbd if actually necessary) // the block header currently indexed at the relevant block height (tbd if
// actually necessary)
// //
// NOTE: This index is not exhaustive. // NOTE: This index is not exhaustive.
// This node may not have seen this full block, so may not have populated the index. // This node may not have seen this full block, so may not have populated the
// Block headers older than some threshold (2 months?) will not necessarily be included // index.
// Block headers older than some threshold (2 months?) will not necessarily be
// included
// in this index. // in this index.
// //
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, Error> { fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, Error> {
@ -133,13 +155,16 @@ impl ChainStore for ChainKVStore {
} else { } else {
Err(Error::NotFoundErr) Err(Error::NotFoundErr)
} }
}, }
None => Err(Error::NotFoundErr) None => Err(Error::NotFoundErr),
} }
} }
fn save_block_header(&self, bh: &BlockHeader) -> Result<(), Error> { fn save_block_header(&self, bh: &BlockHeader) -> Result<(), Error> {
self.db.put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut bh.hash().to_vec())[..], bh) self.db.put_ser(
&to_key(BLOCK_HEADER_PREFIX, &mut bh.hash().to_vec())[..],
bh,
)
} }
fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> { fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
@ -154,26 +179,44 @@ impl ChainStore for ChainKVStore {
} }
fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), Error> { fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), Error> {
self.db.put_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())[..], &pos) self.db.put_ser(
&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())[..],
&pos,
)
} }
fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> { fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
option_to_not_found(self.db.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec()))) option_to_not_found(self.db.get_ser(&to_key(
COMMIT_POS_PREFIX,
&mut commit.as_ref().to_vec(),
)))
} }
fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> { fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> {
self.db.put_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], &pos) self.db.put_ser(
&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..],
&pos,
)
} }
fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, Error> { fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, Error> {
option_to_not_found(self.db.get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec()))) option_to_not_found(self.db.get_ser(&to_key(
KERNEL_POS_PREFIX,
&mut excess.as_ref().to_vec(),
)))
} }
/// Maintain consistency of the "header_by_height" index by traversing back through the /// Maintain consistency of the "header_by_height" index by traversing back
/// current chain and updating "header_by_height" until we reach a block_header /// through the
/// that is consistent with its height (everything prior to this will be consistent) /// current chain and updating "header_by_height" until we reach a
/// block_header
/// that is consistent with its height (everything prior to this will be
/// consistent)
fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> { fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> {
self.db.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), bh)?; self.db.put_ser(
&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height),
bh,
)?;
if bh.height == 0 { if bh.height == 0 {
return Ok(()); return Ok(());
} }
@ -184,10 +227,12 @@ impl ChainStore for ChainKVStore {
let prev = self.get_header_by_height(prev_height)?; let prev = self.get_header_by_height(prev_height)?;
if prev.hash() != prev_h { if prev.hash() != prev_h {
let real_prev = self.get_block_header(&prev_h)?; let real_prev = self.get_block_header(&prev_h)?;
self.db.put_ser( self.db
.put_ser(
&u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height), &u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height),
&real_prev, &real_prev,
).unwrap(); )
.unwrap();
prev_h = real_prev.previous; prev_h = real_prev.previous;
prev_height = real_prev.height - 1; prev_height = real_prev.height - 1;
} else { } else {

View file

@ -35,12 +35,18 @@ const UTXO_SUBDIR: &'static str = "utxo";
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof"; const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
const KERNEL_SUBDIR: &'static str = "kernel"; const KERNEL_SUBDIR: &'static str = "kernel";
struct PMMRHandle<T> where T: Summable + Clone { struct PMMRHandle<T>
where
T: Summable + Clone,
{
backend: PMMRBackend<T>, backend: PMMRBackend<T>,
last_pos: u64, last_pos: u64,
} }
impl<T> PMMRHandle<T> where T: Summable + Clone { impl<T> PMMRHandle<T>
where
T: Summable + Clone,
{
fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> { fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name); let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
fs::create_dir_all(path.clone())?; fs::create_dir_all(path.clone())?;
@ -88,7 +94,7 @@ impl SumTrees {
match rpos { match rpos {
Ok(pos) => Ok(self.output_pmmr_h.backend.get(pos).is_some()), Ok(pos) => Ok(self.output_pmmr_h.backend.get(pos).is_some()),
Err(grin_store::Error::NotFoundErr) => Ok(false), Err(grin_store::Error::NotFoundErr) => Ok(false),
Err(e) => Err(Error::StoreErr(e)) Err(e) => Err(Error::StoreErr(e)),
} }
} }
} }
@ -101,7 +107,9 @@ impl SumTrees {
/// If the closure returns an error, modifications are canceled and the unit /// If the closure returns an error, modifications are canceled and the unit
/// of work is abandoned. Otherwise, the unit of work is permanently applied. /// of work is abandoned. Otherwise, the unit of work is permanently applied.
pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result<T, Error> pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result<T, Error>
where F: FnOnce(&mut Extension) -> Result<T, Error> { where
F: FnOnce(&mut Extension) -> Result<T, Error>,
{
let sizes: (u64, u64, u64); let sizes: (u64, u64, u64);
let res: Result<T, Error>; let res: Result<T, Error>;
@ -153,17 +161,25 @@ pub struct Extension<'a> {
commit_index: Arc<ChainStore>, commit_index: Arc<ChainStore>,
new_output_commits: HashMap<Commitment, u64>, new_output_commits: HashMap<Commitment, u64>,
new_kernel_excesses: HashMap<Commitment, u64>, new_kernel_excesses: HashMap<Commitment, u64>,
rollback: bool rollback: bool,
} }
impl<'a> Extension<'a> { impl<'a> Extension<'a> {
// constructor // constructor
fn new(trees: &'a mut SumTrees, commit_index: Arc<ChainStore>) -> Extension<'a> { fn new(trees: &'a mut SumTrees, commit_index: Arc<ChainStore>) -> Extension<'a> {
Extension { Extension {
output_pmmr: PMMR::at(&mut trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos), output_pmmr: PMMR::at(
rproof_pmmr: PMMR::at(&mut trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.last_pos), &mut trees.output_pmmr_h.backend,
kernel_pmmr: PMMR::at(&mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos), trees.output_pmmr_h.last_pos,
),
rproof_pmmr: PMMR::at(
&mut trees.rproof_pmmr_h.backend,
trees.rproof_pmmr_h.last_pos,
),
kernel_pmmr: PMMR::at(
&mut trees.kernel_pmmr_h.backend,
trees.kernel_pmmr_h.last_pos,
),
commit_index: commit_index, commit_index: commit_index,
new_output_commits: HashMap::new(), new_output_commits: HashMap::new(),
new_kernel_excesses: HashMap::new(), new_kernel_excesses: HashMap::new(),
@ -184,14 +200,17 @@ impl<'a> Extension<'a> {
if let Ok(pos) = pos_res { if let Ok(pos) = pos_res {
match self.output_pmmr.prune(pos, b.header.height as u32) { match self.output_pmmr.prune(pos, b.header.height as u32) {
Ok(true) => { Ok(true) => {
self.rproof_pmmr.prune(pos, b.header.height as u32) self.rproof_pmmr
.prune(pos, b.header.height as u32)
.map_err(|s| Error::SumTreeErr(s))?; .map_err(|s| Error::SumTreeErr(s))?;
}, }
Ok(false) => return Err(Error::AlreadySpent), Ok(false) => return Err(Error::AlreadySpent),
Err(s) => return Err(Error::SumTreeErr(s)), Err(s) => return Err(Error::SumTreeErr(s)),
} }
} else { } else {
return Err(Error::SumTreeErr(format!("Missing index for {:?}", input.commitment()))); return Err(Error::SumTreeErr(
format!("Missing index for {:?}", input.commitment()),
));
} }
} }
@ -200,15 +219,19 @@ impl<'a> Extension<'a> {
return Err(Error::DuplicateCommitment(out.commitment())); return Err(Error::DuplicateCommitment(out.commitment()));
} }
// push new outputs commitments in their MMR and save them in the index // push new outputs commitments in their MMR and save them in the index
let pos = self.output_pmmr.push(SumCommit { let pos = self.output_pmmr
.push(SumCommit {
commit: out.commitment(), commit: out.commitment(),
secp: secp.clone(), secp: secp.clone(),
}).map_err(&Error::SumTreeErr)?; })
.map_err(&Error::SumTreeErr)?;
self.new_output_commits.insert(out.commitment(), pos); self.new_output_commits.insert(out.commitment(), pos);
// push range proofs in their MMR // push range proofs in their MMR
self.rproof_pmmr.push(NoSum(out.proof)).map_err(&Error::SumTreeErr)?; self.rproof_pmmr.push(NoSum(out.proof)).map_err(
&Error::SumTreeErr,
)?;
} }
for kernel in &b.kernels { for kernel in &b.kernels {
@ -216,7 +239,9 @@ impl<'a> Extension<'a> {
return Err(Error::DuplicateKernel(kernel.excess.clone())); return Err(Error::DuplicateKernel(kernel.excess.clone()));
} }
// push kernels in their MMR // push kernels in their MMR
let pos = self.kernel_pmmr.push(NoSum(kernel.clone())).map_err(&Error::SumTreeErr)?; let pos = self.kernel_pmmr.push(NoSum(kernel.clone())).map_err(
&Error::SumTreeErr,
)?;
self.new_kernel_excesses.insert(kernel.excess, pos); self.new_kernel_excesses.insert(kernel.excess, pos);
} }
Ok(()) Ok(())
@ -238,16 +263,28 @@ impl<'a> Extension<'a> {
let out_pos_rew = self.commit_index.get_output_pos(&output.commitment())?; let out_pos_rew = self.commit_index.get_output_pos(&output.commitment())?;
let kern_pos_rew = self.commit_index.get_kernel_pos(&kernel.excess)?; let kern_pos_rew = self.commit_index.get_kernel_pos(&kernel.excess)?;
self.output_pmmr.rewind(out_pos_rew, height as u32).map_err(&Error::SumTreeErr)?; self.output_pmmr
self.rproof_pmmr.rewind(out_pos_rew, height as u32).map_err(&Error::SumTreeErr)?; .rewind(out_pos_rew, height as u32)
self.kernel_pmmr.rewind(kern_pos_rew, height as u32).map_err(&Error::SumTreeErr)?; .map_err(&Error::SumTreeErr)?;
self.rproof_pmmr
.rewind(out_pos_rew, height as u32)
.map_err(&Error::SumTreeErr)?;
self.kernel_pmmr
.rewind(kern_pos_rew, height as u32)
.map_err(&Error::SumTreeErr)?;
Ok(()) Ok(())
} }
/// Current root hashes and sums (if applicable) for the UTXO, range proof /// Current root hashes and sums (if applicable) for the UTXO, range proof
/// and kernel sum trees. /// and kernel sum trees.
pub fn roots(&self) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) { pub fn roots(
(self.output_pmmr.root(), self.rproof_pmmr.root(), self.kernel_pmmr.root()) &self,
) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
(
self.output_pmmr.root(),
self.rproof_pmmr.root(),
self.kernel_pmmr.root(),
)
} }
/// Force the rollback of this extension, no matter the result /// Force the rollback of this extension, no matter the result
@ -257,7 +294,10 @@ impl<'a> Extension<'a> {
// Sizes of the sum trees, used by `extending` on rollback. // Sizes of the sum trees, used by `extending` on rollback.
fn sizes(&self) -> (u64, u64, u64) { fn sizes(&self) -> (u64, u64, u64) {
(self.output_pmmr.unpruned_size(), self.rproof_pmmr.unpruned_size(), (
self.kernel_pmmr.unpruned_size()) self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size(),
)
} }
} }

View file

@ -208,7 +208,10 @@ pub trait ChainStore: Send + Sync {
fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, store::Error>; fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, store::Error>;
/// Gets a block_header for the given input commit /// Gets a block_header for the given input commit
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, store::Error>; fn get_block_header_by_output_commit(
&self,
commit: &Commitment,
) -> Result<BlockHeader, store::Error>;
/// Saves the position of an output, represented by its commitment, in the /// Saves the position of an output, represented by its commitment, in the
/// UTXO MMR. Used as an index for spending and pruning. /// UTXO MMR. Used as an index for spending and pruning.

View file

@ -35,7 +35,7 @@ use core::global::MiningParameterMode;
use pow::{types, cuckoo, MiningWorker}; use pow::{types, cuckoo, MiningWorker};
fn clean_output_dir(dir_name:&str){ fn clean_output_dir(dir_name: &str) {
let _ = fs::remove_dir_all(dir_name); let _ = fs::remove_dir_all(dir_name);
} }
@ -44,11 +44,15 @@ fn setup(dir_name: &str) -> Chain {
clean_output_dir(dir_name); clean_output_dir(dir_name);
global::set_mining_mode(MiningParameterMode::AutomatedTesting); global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let mut genesis_block = None; let mut genesis_block = None;
if !chain::Chain::chain_exists(dir_name.to_string()){ if !chain::Chain::chain_exists(dir_name.to_string()) {
genesis_block=pow::mine_genesis_block(None); genesis_block = pow::mine_genesis_block(None);
} }
chain::Chain::init(dir_name.to_string(), Arc::new(NoopAdapter {}), chain::Chain::init(
genesis_block, pow::verify_size).unwrap() dir_name.to_string(),
Arc::new(NoopAdapter {}),
genesis_block,
pow::verify_size,
).unwrap()
} }
#[test] #[test]
@ -67,7 +71,10 @@ fn mine_empty_chain() {
miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps")); miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps"));
let mut cuckoo_miner = cuckoo::Miner::new( let mut cuckoo_miner = cuckoo::Miner::new(
consensus::EASINESS, global::sizeshift() as u32, global::proofsize()); consensus::EASINESS,
global::sizeshift() as u32,
global::proofsize(),
);
for n in 1..4 { for n in 1..4 {
let prev = chain.head_header().unwrap(); let prev = chain.head_header().unwrap();
let reward_key = secp::key::SecretKey::new(&secp, &mut rng); let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
@ -110,8 +117,9 @@ fn mine_empty_chain() {
// now check the header output index // now check the header output index
let output = block.outputs[0]; let output = block.outputs[0];
let header_by_output_commit = chain. let header_by_output_commit = chain
get_block_header_by_output_commit(&output.commitment()).unwrap(); .get_block_header_by_output_commit(&output.commitment())
.unwrap();
assert_eq!(header_by_output_commit.hash(), bhash); assert_eq!(header_by_output_commit.hash(), bhash);
} }
} }
@ -141,7 +149,7 @@ fn mine_forks() {
// checking our new head // checking our new head
let head = chain.head().unwrap(); let head = chain.head().unwrap();
assert_eq!(head.height, (n+1) as u64); assert_eq!(head.height, (n + 1) as u64);
assert_eq!(head.last_block_h, bhash); assert_eq!(head.last_block_h, bhash);
assert_eq!(head.prev_block_h, prev.hash()); assert_eq!(head.prev_block_h, prev.hash());
@ -151,7 +159,7 @@ fn mine_forks() {
// checking head switch // checking head switch
let head = chain.head().unwrap(); let head = chain.head().unwrap();
assert_eq!(head.height, (n+1) as u64); assert_eq!(head.height, (n + 1) as u64);
assert_eq!(head.last_block_h, bhash); assert_eq!(head.last_block_h, bhash);
assert_eq!(head.prev_block_h, prev.hash()); assert_eq!(head.prev_block_h, prev.hash());
} }

View file

@ -33,7 +33,7 @@ use core::global::MiningParameterMode;
use pow::{types, cuckoo, MiningWorker}; use pow::{types, cuckoo, MiningWorker};
fn clean_output_dir(dir_name:&str){ fn clean_output_dir(dir_name: &str) {
let _ = fs::remove_dir_all(dir_name); let _ = fs::remove_dir_all(dir_name);
} }
@ -45,11 +45,15 @@ fn test_coinbase_maturity() {
let mut rng = OsRng::new().unwrap(); let mut rng = OsRng::new().unwrap();
let mut genesis_block = None; let mut genesis_block = None;
if !chain::Chain::chain_exists(".grin".to_string()){ if !chain::Chain::chain_exists(".grin".to_string()) {
genesis_block=pow::mine_genesis_block(None); genesis_block = pow::mine_genesis_block(None);
} }
let chain = chain::Chain::init(".grin".to_string(), Arc::new(NoopAdapter {}), let chain = chain::Chain::init(
genesis_block, pow::verify_size).unwrap(); ".grin".to_string(),
Arc::new(NoopAdapter {}),
genesis_block,
pow::verify_size,
).unwrap();
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
@ -60,7 +64,11 @@ fn test_coinbase_maturity() {
}; };
miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps")); miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps"));
let mut cuckoo_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize()); let mut cuckoo_miner = cuckoo::Miner::new(
consensus::EASINESS,
global::sizeshift() as u32,
global::proofsize(),
);
let prev = chain.head_header().unwrap(); let prev = chain.head_header().unwrap();
let reward_key = secp::key::SecretKey::new(&secp, &mut rng); let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
@ -79,7 +87,9 @@ fn test_coinbase_maturity() {
).unwrap(); ).unwrap();
assert_eq!(block.outputs.len(), 1); assert_eq!(block.outputs.len(), 1);
assert!(block.outputs[0].features.contains(transaction::COINBASE_OUTPUT)); assert!(block.outputs[0].features.contains(
transaction::COINBASE_OUTPUT,
));
chain.process_block(block, chain::EASY_POW).unwrap(); chain.process_block(block, chain::EASY_POW).unwrap();
@ -88,9 +98,9 @@ fn test_coinbase_maturity() {
let amount = consensus::REWARD; let amount = consensus::REWARD;
let (coinbase_txn, _) = build::transaction(vec![ let (coinbase_txn, _) = build::transaction(vec![
build::input(amount, reward_key), build::input(amount, reward_key),
build::output_rand(amount-1), build::output_rand(amount - 1),
build::with_fee(1)] build::with_fee(1),
).unwrap(); ]).unwrap();
let reward_key = secp::key::SecretKey::new(&secp, &mut rng); let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], reward_key).unwrap(); let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], reward_key).unwrap();
@ -135,7 +145,7 @@ fn test_coinbase_maturity() {
).unwrap(); ).unwrap();
chain.process_block(block, chain::EASY_POW).unwrap(); chain.process_block(block, chain::EASY_POW).unwrap();
}; }
let prev = chain.head_header().unwrap(); let prev = chain.head_header().unwrap();

View file

@ -27,7 +27,8 @@ use core::target::Difficulty;
pub const REWARD: u64 = 1_000_000_000; pub const REWARD: u64 = 1_000_000_000;
/// Number of blocks before a coinbase matures and can be spent /// Number of blocks before a coinbase matures and can be spent
/// TODO - reduced this for testing - need to investigate if we can lower this in test env /// TODO - reduced this for testing - need to investigate if we can lower this
/// in test env
// pub const COINBASE_MATURITY: u64 = 1_000; // pub const COINBASE_MATURITY: u64 = 1_000;
pub const COINBASE_MATURITY: u64 = 3; pub const COINBASE_MATURITY: u64 = 3;
@ -99,7 +100,8 @@ impl fmt::Display for TargetError {
/// difference between the median timestamps at the beginning and the end /// difference between the median timestamps at the beginning and the end
/// of the window. /// of the window.
pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError> pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
where T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>> where
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
{ {
// Block times at the begining and end of the adjustment window, used to // Block times at the begining and end of the adjustment window, used to
@ -155,8 +157,9 @@ pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
ts_damp ts_damp
}; };
Ok(diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / Ok(
Difficulty::from_num(adj_ts)) diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / Difficulty::from_num(adj_ts),
)
} }
#[cfg(test)] #[cfg(test)]
@ -171,24 +174,25 @@ mod test {
// Builds an iterator for next difficulty calculation with the provided // Builds an iterator for next difficulty calculation with the provided
// constant time interval, difficulty and total length. // constant time interval, difficulty and total length.
fn repeat(interval: u64, diff: u64, len: u64) -> Vec<Result<(u64, Difficulty), TargetError>> { fn repeat(interval: u64, diff: u64, len: u64) -> Vec<Result<(u64, Difficulty), TargetError>> {
//watch overflow here, length shouldn't be ridiculous anyhow // watch overflow here, length shouldn't be ridiculous anyhow
assert!(len < std::usize::MAX as u64); assert!(len < std::usize::MAX as u64);
let diffs = vec![Difficulty::from_num(diff); len as usize]; let diffs = vec![Difficulty::from_num(diff); len as usize];
let times = (0..(len as usize)).map(|n| n * interval as usize).rev(); let times = (0..(len as usize)).map(|n| n * interval as usize).rev();
let pairs = times.zip(diffs.iter()); let pairs = times.zip(diffs.iter());
pairs.map(|(t, d)| Ok((t as u64, d.clone()))).collect::<Vec<_>>() pairs
.map(|(t, d)| Ok((t as u64, d.clone())))
.collect::<Vec<_>>()
} }
fn repeat_offs(from: u64, fn repeat_offs(
from: u64,
interval: u64, interval: u64,
diff: u64, diff: u64,
len: u64) len: u64,
-> Vec<Result<(u64, Difficulty), TargetError>> { ) -> Vec<Result<(u64, Difficulty), TargetError>> {
map_vec!(repeat(interval, diff, len), |e| { map_vec!(repeat(interval, diff, len), |e| match e.clone() {
match e.clone() {
Err(e) => Err(e), Err(e) => Err(e),
Ok((t, d)) => Ok((t + from, d)), Ok((t, d)) => Ok((t + from, d)),
}
}) })
} }
@ -196,19 +200,28 @@ mod test {
#[test] #[test]
fn next_target_adjustment() { fn next_target_adjustment() {
// not enough data // not enough data
assert_eq!(next_difficulty(vec![]).unwrap(), Difficulty::from_num(MINIMUM_DIFFICULTY)); assert_eq!(
next_difficulty(vec![]).unwrap(),
Difficulty::from_num(MINIMUM_DIFFICULTY)
);
assert_eq!(next_difficulty(vec![Ok((60, Difficulty::one()))]).unwrap(), assert_eq!(
Difficulty::from_num(MINIMUM_DIFFICULTY)); next_difficulty(vec![Ok((60, Difficulty::one()))]).unwrap(),
Difficulty::from_num(MINIMUM_DIFFICULTY)
);
assert_eq!(next_difficulty(repeat(60, 10, DIFFICULTY_ADJUST_WINDOW)).unwrap(), assert_eq!(
Difficulty::from_num(MINIMUM_DIFFICULTY)); next_difficulty(repeat(60, 10, DIFFICULTY_ADJUST_WINDOW)).unwrap(),
Difficulty::from_num(MINIMUM_DIFFICULTY)
);
// just enough data, right interval, should stay constant // just enough data, right interval, should stay constant
let just_enough = DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW; let just_enough = DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW;
assert_eq!(next_difficulty(repeat(60, 1000, just_enough)).unwrap(), assert_eq!(
Difficulty::from_num(1000)); next_difficulty(repeat(60, 1000, just_enough)).unwrap(),
Difficulty::from_num(1000)
);
// checking averaging works, window length is odd so need to compensate a little // checking averaging works, window length is odd so need to compensate a little
let sec = DIFFICULTY_ADJUST_WINDOW / 2 + 1 + MEDIAN_TIME_WINDOW; let sec = DIFFICULTY_ADJUST_WINDOW / 2 + 1 + MEDIAN_TIME_WINDOW;
@ -218,28 +231,44 @@ mod test {
assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(999)); assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(999));
// too slow, diff goes down // too slow, diff goes down
assert_eq!(next_difficulty(repeat(90, 1000, just_enough)).unwrap(), assert_eq!(
Difficulty::from_num(889)); next_difficulty(repeat(90, 1000, just_enough)).unwrap(),
assert_eq!(next_difficulty(repeat(120, 1000, just_enough)).unwrap(), Difficulty::from_num(889)
Difficulty::from_num(800)); );
assert_eq!(
next_difficulty(repeat(120, 1000, just_enough)).unwrap(),
Difficulty::from_num(800)
);
// too fast, diff goes up // too fast, diff goes up
assert_eq!(next_difficulty(repeat(55, 1000, just_enough)).unwrap(), assert_eq!(
Difficulty::from_num(1021)); next_difficulty(repeat(55, 1000, just_enough)).unwrap(),
assert_eq!(next_difficulty(repeat(45, 1000, just_enough)).unwrap(), Difficulty::from_num(1021)
Difficulty::from_num(1067)); );
assert_eq!(
next_difficulty(repeat(45, 1000, just_enough)).unwrap(),
Difficulty::from_num(1067)
);
// hitting lower time bound, should always get the same result below // hitting lower time bound, should always get the same result below
assert_eq!(next_difficulty(repeat(20, 1000, just_enough)).unwrap(), assert_eq!(
Difficulty::from_num(1200)); next_difficulty(repeat(20, 1000, just_enough)).unwrap(),
assert_eq!(next_difficulty(repeat(10, 1000, just_enough)).unwrap(), Difficulty::from_num(1200)
Difficulty::from_num(1200)); );
assert_eq!(
next_difficulty(repeat(10, 1000, just_enough)).unwrap(),
Difficulty::from_num(1200)
);
// hitting higher time bound, should always get the same result above // hitting higher time bound, should always get the same result above
assert_eq!(next_difficulty(repeat(160, 1000, just_enough)).unwrap(), assert_eq!(
Difficulty::from_num(750)); next_difficulty(repeat(160, 1000, just_enough)).unwrap(),
assert_eq!(next_difficulty(repeat(200, 1000, just_enough)).unwrap(), Difficulty::from_num(750)
Difficulty::from_num(750)); );
assert_eq!(
next_difficulty(repeat(200, 1000, just_enough)).unwrap(),
Difficulty::from_num(750)
);
} }
} }

View file

@ -85,14 +85,16 @@ impl Default for BlockHeader {
/// Serialization of a block header /// Serialization of a block header
impl Writeable for BlockHeader { impl Writeable for BlockHeader {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u64, self.height], [write_u64, self.height],
[write_fixed_bytes, &self.previous], [write_fixed_bytes, &self.previous],
[write_i64, self.timestamp.to_timespec().sec], [write_i64, self.timestamp.to_timespec().sec],
[write_fixed_bytes, &self.utxo_root], [write_fixed_bytes, &self.utxo_root],
[write_fixed_bytes, &self.range_proof_root], [write_fixed_bytes, &self.range_proof_root],
[write_fixed_bytes, &self.kernel_root], [write_fixed_bytes, &self.kernel_root],
[write_u8, self.features.bits()]); [write_u8, self.features.bits()]
);
try!(writer.write_u64(self.nonce)); try!(writer.write_u64(self.nonce));
try!(self.difficulty.write(writer)); try!(self.difficulty.write(writer));
@ -129,7 +131,9 @@ impl Readable for BlockHeader {
utxo_root: utxo_root, utxo_root: utxo_root,
range_proof_root: rproof_root, range_proof_root: rproof_root,
kernel_root: kernel_root, kernel_root: kernel_root,
features: BlockFeatures::from_bits(features).ok_or(ser::Error::CorruptedData)?, features: BlockFeatures::from_bits(features).ok_or(
ser::Error::CorruptedData,
)?,
pow: pow, pow: pow,
nonce: nonce, nonce: nonce,
difficulty: difficulty, difficulty: difficulty,
@ -162,10 +166,12 @@ impl Writeable for Block {
try!(self.header.write(writer)); try!(self.header.write(writer));
if writer.serialization_mode() != ser::SerializationMode::Hash { if writer.serialization_mode() != ser::SerializationMode::Hash {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u64, self.inputs.len() as u64], [write_u64, self.inputs.len() as u64],
[write_u64, self.outputs.len() as u64], [write_u64, self.outputs.len() as u64],
[write_u64, self.kernels.len() as u64]); [write_u64, self.kernels.len() as u64]
);
for inp in &self.inputs { for inp in &self.inputs {
try!(inp.write(writer)); try!(inp.write(writer));
@ -234,10 +240,11 @@ impl Block {
/// Builds a new block from the header of the previous block, a vector of /// Builds a new block from the header of the previous block, a vector of
/// transactions and the private key that will receive the reward. Checks /// transactions and the private key that will receive the reward. Checks
/// that all transactions are valid and calculates the Merkle tree. /// that all transactions are valid and calculates the Merkle tree.
pub fn new(prev: &BlockHeader, pub fn new(
prev: &BlockHeader,
txs: Vec<&Transaction>, txs: Vec<&Transaction>,
reward_key: SecretKey) reward_key: SecretKey,
-> Result<Block, secp::Error> { ) -> Result<Block, secp::Error> {
let secp = Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = Secp256k1::with_caps(secp::ContextFlag::Commit);
let (reward_out, reward_proof) = try!(Block::reward_output(reward_key, &secp)); let (reward_out, reward_proof) = try!(Block::reward_output(reward_key, &secp));
@ -248,11 +255,12 @@ impl Block {
/// Builds a new block ready to mine from the header of the previous block, /// Builds a new block ready to mine from the header of the previous block,
/// a vector of transactions and the reward information. Checks /// a vector of transactions and the reward information. Checks
/// that all transactions are valid and calculates the Merkle tree. /// that all transactions are valid and calculates the Merkle tree.
pub fn with_reward(prev: &BlockHeader, pub fn with_reward(
prev: &BlockHeader,
txs: Vec<&Transaction>, txs: Vec<&Transaction>,
reward_out: Output, reward_out: Output,
reward_kern: TxKernel) reward_kern: TxKernel,
-> Result<Block, secp::Error> { ) -> Result<Block, secp::Error> {
// note: the following reads easily but may not be the most efficient due to // note: the following reads easily but may not be the most efficient due to
// repeated iterations, revisit if a problem // repeated iterations, revisit if a problem
let secp = Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = Secp256k1::with_caps(secp::ContextFlag::Commit);
@ -264,14 +272,12 @@ impl Block {
// build vectors with all inputs and all outputs, ordering them by hash // build vectors with all inputs and all outputs, ordering them by hash
// needs to be a fold so we don't end up with a vector of vectors and we // needs to be a fold so we don't end up with a vector of vectors and we
// want to fully own the refs (not just a pointer like flat_map). // want to fully own the refs (not just a pointer like flat_map).
let mut inputs = txs.iter() let mut inputs = txs.iter().fold(vec![], |mut acc, ref tx| {
.fold(vec![], |mut acc, ref tx| {
let mut inputs = tx.inputs.clone(); let mut inputs = tx.inputs.clone();
acc.append(&mut inputs); acc.append(&mut inputs);
acc acc
}); });
let mut outputs = txs.iter() let mut outputs = txs.iter().fold(vec![], |mut acc, ref tx| {
.fold(vec![], |mut acc, ref tx| {
let mut outputs = tx.outputs.clone(); let mut outputs = tx.outputs.clone();
acc.append(&mut outputs); acc.append(&mut outputs);
acc acc
@ -283,19 +289,24 @@ impl Block {
// calculate the overall Merkle tree and fees // calculate the overall Merkle tree and fees
Ok(Block { Ok(
Block {
header: BlockHeader { header: BlockHeader {
height: prev.height + 1, height: prev.height + 1,
timestamp: time::Tm { tm_nsec: 0, ..time::now_utc() }, timestamp: time::Tm {
tm_nsec: 0,
..time::now_utc()
},
previous: prev.hash(), previous: prev.hash(),
total_difficulty: prev.pow.clone().to_difficulty() + prev.total_difficulty.clone(), total_difficulty: prev.pow.clone().to_difficulty() +
prev.total_difficulty.clone(),
..Default::default() ..Default::default()
}, },
inputs: inputs, inputs: inputs,
outputs: outputs, outputs: outputs,
kernels: kernels, kernels: kernels,
} }.compact(),
.compact()) )
} }
@ -384,8 +395,7 @@ impl Block {
inputs: all_inputs, inputs: all_inputs,
outputs: all_outputs, outputs: all_outputs,
kernels: all_kernels, kernels: all_kernels,
} }.compact()
.compact()
} }
/// Validates all the elements in a block that can be checked without /// Validates all the elements in a block that can be checked without
@ -445,19 +455,21 @@ impl Block {
inputs: vec![], inputs: vec![],
outputs: cb_outs, outputs: cb_outs,
kernels: cb_kerns, kernels: cb_kerns,
} }.verify_kernels(secp)
.verify_kernels(secp)
} }
/// Builds the blinded output and related signature proof for the block /// Builds the blinded output and related signature proof for the block
/// reward. /// reward.
pub fn reward_output(skey: secp::key::SecretKey, pub fn reward_output(
secp: &Secp256k1) skey: secp::key::SecretKey,
-> Result<(Output, TxKernel), secp::Error> { secp: &Secp256k1,
let msg = try!(secp::Message::from_slice(&[0; secp::constants::MESSAGE_SIZE])); ) -> Result<(Output, TxKernel), secp::Error> {
let msg = try!(secp::Message::from_slice(
&[0; secp::constants::MESSAGE_SIZE],
));
let sig = try!(secp.sign(&msg, &skey)); let sig = try!(secp.sign(&msg, &skey));
let commit = secp.commit(REWARD, skey).unwrap(); let commit = secp.commit(REWARD, skey).unwrap();
//let switch_commit = secp.switch_commit(skey).unwrap(); // let switch_commit = secp.switch_commit(skey).unwrap();
let nonce = secp.nonce(); let nonce = secp.nonce();
let rproof = secp.range_proof(0, REWARD, skey, commit, nonce); let rproof = secp.range_proof(0, REWARD, skey, commit, nonce);
@ -583,7 +595,8 @@ mod test {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
assert_eq!(coinbase_kernels.len(), 1); assert_eq!(coinbase_kernels.len(), 1);
// the block should be valid here (single coinbase output with corresponding txn kernel) // the block should be valid here (single coinbase output with corresponding
// txn kernel)
assert_eq!(b.validate(&secp), Ok(())); assert_eq!(b.validate(&secp), Ok(()));
} }
@ -598,7 +611,10 @@ mod test {
assert!(b.outputs[0].features.contains(COINBASE_OUTPUT)); assert!(b.outputs[0].features.contains(COINBASE_OUTPUT));
b.outputs[0].features.remove(COINBASE_OUTPUT); b.outputs[0].features.remove(COINBASE_OUTPUT);
assert_eq!(b.verify_coinbase(&secp), Err(secp::Error::IncorrectCommitSum)); assert_eq!(
b.verify_coinbase(&secp),
Err(secp::Error::IncorrectCommitSum)
);
assert_eq!(b.verify_kernels(&secp), Ok(())); assert_eq!(b.verify_kernels(&secp), Ok(()));
assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum)); assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum));
@ -614,7 +630,10 @@ mod test {
assert!(b.kernels[0].features.contains(COINBASE_KERNEL)); assert!(b.kernels[0].features.contains(COINBASE_KERNEL));
b.kernels[0].features.remove(COINBASE_KERNEL); b.kernels[0].features.remove(COINBASE_KERNEL);
assert_eq!(b.verify_coinbase(&secp), Err(secp::Error::IncorrectCommitSum)); assert_eq!(
b.verify_coinbase(&secp),
Err(secp::Error::IncorrectCommitSum)
);
assert_eq!(b.verify_kernels(&secp), Ok(())); assert_eq!(b.verify_kernels(&secp), Ok(()));
assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum)); assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum));

View file

@ -112,12 +112,14 @@ pub fn output(value: u64, blinding: SecretKey) -> Box<Append> {
let commit = build.secp.commit(value, blinding).unwrap(); let commit = build.secp.commit(value, blinding).unwrap();
let nonce = build.secp.nonce(); let nonce = build.secp.nonce();
let rproof = build.secp.range_proof(0, value, blinding, commit, nonce); let rproof = build.secp.range_proof(0, value, blinding, commit, nonce);
(tx.with_output(Output { (
tx.with_output(Output {
features: DEFAULT_OUTPUT, features: DEFAULT_OUTPUT,
commit: commit, commit: commit,
proof: rproof, proof: rproof,
}), }),
sum.add(blinding)) sum.add(blinding),
)
}) })
} }
@ -130,30 +132,38 @@ pub fn output_rand(value: u64) -> Box<Append> {
let commit = build.secp.commit(value, blinding).unwrap(); let commit = build.secp.commit(value, blinding).unwrap();
let nonce = build.secp.nonce(); let nonce = build.secp.nonce();
let rproof = build.secp.range_proof(0, value, blinding, commit, nonce); let rproof = build.secp.range_proof(0, value, blinding, commit, nonce);
(tx.with_output(Output { (
tx.with_output(Output {
features: DEFAULT_OUTPUT, features: DEFAULT_OUTPUT,
commit: commit, commit: commit,
proof: rproof, proof: rproof,
}), }),
sum.add(blinding)) sum.add(blinding),
)
}) })
} }
/// Sets the fee on the transaction being built. /// Sets the fee on the transaction being built.
pub fn with_fee(fee: u64) -> Box<Append> { pub fn with_fee(fee: u64) -> Box<Append> {
Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) { (tx.with_fee(fee), sum) }) Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) {
(tx.with_fee(fee), sum)
})
} }
/// Sets a known excess value on the transaction being built. Usually used in /// Sets a known excess value on the transaction being built. Usually used in
/// combination with the initial_tx function when a new transaction is built /// combination with the initial_tx function when a new transaction is built
/// by adding to a pre-existing one. /// by adding to a pre-existing one.
pub fn with_excess(excess: SecretKey) -> Box<Append> { pub fn with_excess(excess: SecretKey) -> Box<Append> {
Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) { (tx, sum.add(excess)) }) Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) {
(tx, sum.add(excess))
})
} }
/// Sets an initial transaction to add to when building a new transaction. /// Sets an initial transaction to add to when building a new transaction.
pub fn initial_tx(tx: Transaction) -> Box<Append> { pub fn initial_tx(tx: Transaction) -> Box<Append> {
Box::new(move |_build, (_, sum)| -> (Transaction, BlindSum) { (tx.clone(), sum) }) Box::new(move |_build, (_, sum)| -> (Transaction, BlindSum) {
(tx.clone(), sum)
})
} }
/// Builds a new transaction by combining all the combinators provided in a /// Builds a new transaction by combining all the combinators provided in a
@ -171,8 +181,10 @@ pub fn transaction(elems: Vec<Box<Append>>) -> Result<(Transaction, SecretKey),
secp: Secp256k1::with_caps(secp::ContextFlag::Commit), secp: Secp256k1::with_caps(secp::ContextFlag::Commit),
rng: OsRng::new().unwrap(), rng: OsRng::new().unwrap(),
}; };
let (mut tx, sum) = elems.iter().fold((Transaction::empty(), BlindSum::new()), let (mut tx, sum) = elems.iter().fold(
|acc, elem| elem(&mut ctx, acc)); (Transaction::empty(), BlindSum::new()),
|acc, elem| elem(&mut ctx, acc),
);
let blind_sum = sum.sum(&ctx.secp)?; let blind_sum = sum.sum(&ctx.secp)?;
let msg = secp::Message::from_slice(&u64_to_32bytes(tx.fee))?; let msg = secp::Message::from_slice(&u64_to_32bytes(tx.fee))?;
@ -199,9 +211,12 @@ mod test {
#[test] #[test]
fn blind_simple_tx() { fn blind_simple_tx() {
let secp = Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = Secp256k1::with_caps(secp::ContextFlag::Commit);
let (tx, _) = let (tx, _) = transaction(vec![
transaction(vec![input_rand(10), input_rand(11), output_rand(20), with_fee(1)]) input_rand(10),
.unwrap(); input_rand(11),
output_rand(20),
with_fee(1),
]).unwrap();
tx.verify_sig(&secp).unwrap(); tx.verify_sig(&secp).unwrap();
} }
#[test] #[test]

View file

@ -20,7 +20,7 @@ pub mod hash;
pub mod pmmr; pub mod pmmr;
pub mod target; pub mod target;
pub mod transaction; pub mod transaction;
//pub mod txoset; // pub mod txoset;
#[allow(dead_code)] #[allow(dead_code)]
use std::fmt; use std::fmt;
@ -82,7 +82,7 @@ pub trait Committed {
/// Proof of work /// Proof of work
pub struct Proof { pub struct Proof {
/// The nonces /// The nonces
pub nonces:Vec<u32>, pub nonces: Vec<u32>,
/// The proof size /// The proof size
pub proof_size: usize, pub proof_size: usize,
@ -125,9 +125,8 @@ impl Clone for Proof {
} }
impl Proof { impl Proof {
/// Builds a proof with all bytes zeroed out /// Builds a proof with all bytes zeroed out
pub fn new(in_nonces:Vec<u32>) -> Proof { pub fn new(in_nonces: Vec<u32>) -> Proof {
Proof { Proof {
proof_size: in_nonces.len(), proof_size: in_nonces.len(),
nonces: in_nonces, nonces: in_nonces,
@ -135,10 +134,10 @@ impl Proof {
} }
/// Builds a proof with all bytes zeroed out /// Builds a proof with all bytes zeroed out
pub fn zero(proof_size:usize) -> Proof { pub fn zero(proof_size: usize) -> Proof {
Proof { Proof {
proof_size: proof_size, proof_size: proof_size,
nonces: vec![0;proof_size], nonces: vec![0; proof_size],
} }
} }
@ -251,9 +250,12 @@ mod test {
#[test] #[test]
fn hash_output() { fn hash_output() {
let (tx, _) = let (tx, _) = build::transaction(vec![
build::transaction(vec![input_rand(75), output_rand(42), output_rand(32), with_fee(1)]) input_rand(75),
.unwrap(); output_rand(42),
output_rand(32),
with_fee(1),
]).unwrap();
let h = tx.outputs[0].hash(); let h = tx.outputs[0].hash();
assert!(h != ZERO_HASH); assert!(h != ZERO_HASH);
let h2 = tx.outputs[1].hash(); let h2 = tx.outputs[1].hash();
@ -309,9 +311,11 @@ mod test {
// From now on, Bob only has the obscured transaction and the sum of // From now on, Bob only has the obscured transaction and the sum of
// blinding factors. He adds his output, finalizes the transaction so it's // blinding factors. He adds his output, finalizes the transaction so it's
// ready for broadcast. // ready for broadcast.
let (tx_final, _) = let (tx_final, _) = build::transaction(vec![
build::transaction(vec![initial_tx(tx_alice), with_excess(blind_sum), output_rand(5)]) initial_tx(tx_alice),
.unwrap(); with_excess(blind_sum),
output_rand(5),
]).unwrap();
tx_final.validate(&secp).unwrap(); tx_final.validate(&secp).unwrap();
} }
@ -357,8 +361,12 @@ mod test {
// utility producing a transaction with 2 inputs and a single outputs // utility producing a transaction with 2 inputs and a single outputs
pub fn tx2i1o() -> Transaction { pub fn tx2i1o() -> Transaction {
build::transaction(vec![input_rand(10), input_rand(11), output_rand(20), with_fee(1)]) build::transaction(vec![
.map(|(tx, _)| tx) input_rand(10),
input_rand(11),
output_rand(20),
with_fee(1),
]).map(|(tx, _)| tx)
.unwrap() .unwrap()
} }

View file

@ -15,7 +15,11 @@
//! Persistent and prunable Merkle Mountain Range implementation. For a high //! Persistent and prunable Merkle Mountain Range implementation. For a high
//! level description of MMRs, see: //! level description of MMRs, see:
//! //!
//! https://github.com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.md //! https://github.
//! com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.
//!
//!
//! md
//! //!
//! This implementation is built in two major parts: //! This implementation is built in two major parts:
//! //!
@ -91,7 +95,10 @@ impl<T> Summable for NoSum<T> {
return 0; return 0;
} }
} }
impl<T> Writeable for NoSum<T> where T: Writeable { impl<T> Writeable for NoSum<T>
where
T: Writeable,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.0.write(writer) self.0.write(writer)
} }
@ -100,14 +107,20 @@ impl<T> Writeable for NoSum<T> where T: Writeable {
/// A utility type to handle (Hash, Sum) pairs more conveniently. The addition /// A utility type to handle (Hash, Sum) pairs more conveniently. The addition
/// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum. /// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum.
#[derive(Debug, Clone, PartialEq, Eq)] #[derive(Debug, Clone, PartialEq, Eq)]
pub struct HashSum<T> where T: Summable { pub struct HashSum<T>
where
T: Summable,
{
/// The hash /// The hash
pub hash: Hash, pub hash: Hash,
/// The sum /// The sum
pub sum: T::Sum, pub sum: T::Sum,
} }
impl<T> HashSum<T> where T: Summable + Hashed { impl<T> HashSum<T>
where
T: Summable + Hashed,
{
/// Create a hash sum from a summable /// Create a hash sum from a summable
pub fn from_summable(idx: u64, elmt: &T) -> HashSum<T> { pub fn from_summable(idx: u64, elmt: &T) -> HashSum<T> {
let hash = elmt.hash(); let hash = elmt.hash();
@ -120,7 +133,10 @@ impl<T> HashSum<T> where T: Summable + Hashed {
} }
} }
impl<T> Readable for HashSum<T> where T: Summable { impl<T> Readable for HashSum<T>
where
T: Summable,
{
fn read(r: &mut Reader) -> Result<HashSum<T>, ser::Error> { fn read(r: &mut Reader) -> Result<HashSum<T>, ser::Error> {
Ok(HashSum { Ok(HashSum {
hash: Hash::read(r)?, hash: Hash::read(r)?,
@ -129,14 +145,20 @@ impl<T> Readable for HashSum<T> where T: Summable {
} }
} }
impl<T> Writeable for HashSum<T> where T: Summable { impl<T> Writeable for HashSum<T>
where
T: Summable,
{
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, w: &mut W) -> Result<(), ser::Error> {
self.hash.write(w)?; self.hash.write(w)?;
self.sum.write(w) self.sum.write(w)
} }
} }
impl<T> ops::Add for HashSum<T> where T: Summable { impl<T> ops::Add for HashSum<T>
where
T: Summable,
{
type Output = HashSum<T>; type Output = HashSum<T>;
fn add(self, other: HashSum<T>) -> HashSum<T> { fn add(self, other: HashSum<T>) -> HashSum<T> {
HashSum { HashSum {
@ -150,8 +172,10 @@ impl<T> ops::Add for HashSum<T> where T: Summable {
/// The PMMR itself does not need the Backend to be accurate on the existence /// The PMMR itself does not need the Backend to be accurate on the existence
/// of an element (i.e. remove could be a no-op) but layers above can /// of an element (i.e. remove could be a no-op) but layers above can
/// depend on an accurate Backend to check existence. /// depend on an accurate Backend to check existence.
pub trait Backend<T> where T: Summable { pub trait Backend<T>
where
T: Summable,
{
/// Append the provided HashSums to the backend storage. The position of the /// Append the provided HashSums to the backend storage. The position of the
/// first element of the Vec in the MMR is provided to help the /// first element of the Vec in the MMR is provided to help the
/// implementation. /// implementation.
@ -176,15 +200,22 @@ pub trait Backend<T> where T: Summable {
/// Heavily relies on navigation operations within a binary tree. In particular, /// Heavily relies on navigation operations within a binary tree. In particular,
/// all the implementation needs to keep track of the MMR structure is how far /// all the implementation needs to keep track of the MMR structure is how far
/// we are in the sequence of nodes making up the MMR. /// we are in the sequence of nodes making up the MMR.
pub struct PMMR<'a, T, B> where T: Summable, B: 'a + Backend<T> { pub struct PMMR<'a, T, B>
where
T: Summable,
B: 'a + Backend<T>,
{
last_pos: u64, last_pos: u64,
backend: &'a mut B, backend: &'a mut B,
// only needed for parameterizing Backend // only needed for parameterizing Backend
summable: PhantomData<T>, summable: PhantomData<T>,
} }
impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backend<T> { impl<'a, T, B> PMMR<'a, T, B>
where
T: Summable + Hashed + Clone,
B: 'a + Backend<T>,
{
/// Build a new prunable Merkle Mountain Range using the provided backend. /// Build a new prunable Merkle Mountain Range using the provided backend.
pub fn new(backend: &'a mut B) -> PMMR<T, B> { pub fn new(backend: &'a mut B) -> PMMR<T, B> {
PMMR { PMMR {
@ -194,7 +225,8 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
} }
} }
/// Build a new prunable Merkle Mountain Range pre-initlialized until last_pos /// Build a new prunable Merkle Mountain Range pre-initlialized until
/// last_pos
/// with the provided backend. /// with the provided backend.
pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR<T, B> { pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR<T, B> {
PMMR { PMMR {
@ -215,7 +247,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
ret = match (ret, peak) { ret = match (ret, peak) {
(None, x) => x, (None, x) => x,
(Some(hsum), None) => Some(hsum), (Some(hsum), None) => Some(hsum),
(Some(lhsum), Some(rhsum)) => Some(lhsum + rhsum) (Some(lhsum), Some(rhsum)) => Some(lhsum + rhsum),
} }
} }
ret.expect("no root, invalid tree") ret.expect("no root, invalid tree")
@ -234,10 +266,11 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
// height it means we have to build a higher peak by summing with a previous // height it means we have to build a higher peak by summing with a previous
// sibling. we do it iteratively in case the new peak itself allows the // sibling. we do it iteratively in case the new peak itself allows the
// creation of another parent. // creation of another parent.
while bintree_postorder_height(pos+1) > height { while bintree_postorder_height(pos + 1) > height {
let left_sibling = bintree_jump_left_sibling(pos); let left_sibling = bintree_jump_left_sibling(pos);
let left_hashsum = self.backend.get(left_sibling) let left_hashsum = self.backend.get(left_sibling).expect(
.expect("missing left sibling in tree, should not have been pruned"); "missing left sibling in tree, should not have been pruned",
);
current_hashsum = left_hashsum + current_hashsum; current_hashsum = left_hashsum + current_hashsum;
to_append.push(current_hashsum.clone()); to_append.push(current_hashsum.clone());
@ -259,7 +292,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
// position is a leaf, which may had some parent that needs to exist // position is a leaf, which may had some parent that needs to exist
// afterward for the MMR to be valid // afterward for the MMR to be valid
let mut pos = position; let mut pos = position;
while bintree_postorder_height(pos+1) > 0 { while bintree_postorder_height(pos + 1) > 0 {
pos += 1; pos += 1;
} }
@ -268,13 +301,14 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
Ok(()) Ok(())
} }
/// Prune an element from the tree given its position. Note that to be able to /// Prune an element from the tree given its position. Note that to be able
/// to
/// provide that position and prune, consumers of this API are expected to /// provide that position and prune, consumers of this API are expected to
/// keep an index of elements to positions in the tree. Prunes parent /// keep an index of elements to positions in the tree. Prunes parent
/// nodes as well when they become childless. /// nodes as well when they become childless.
pub fn prune(&mut self, position: u64, index: u32) -> Result<bool, String> { pub fn prune(&mut self, position: u64, index: u32) -> Result<bool, String> {
if let None = self.backend.get(position) { if let None = self.backend.get(position) {
return Ok(false) return Ok(false);
} }
let prunable_height = bintree_postorder_height(position); let prunable_height = bintree_postorder_height(position);
if prunable_height > 0 { if prunable_height > 0 {
@ -286,7 +320,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
// the tree. // the tree.
let mut to_prune = vec![]; let mut to_prune = vec![];
let mut current = position; let mut current = position;
while current+1 < self.last_pos { while current + 1 < self.last_pos {
let (parent, sibling) = family(current); let (parent, sibling) = family(current);
if parent > self.last_pos { if parent > self.last_pos {
// can't prune when our parent isn't here yet // can't prune when our parent isn't here yet
@ -330,7 +364,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
print!("{:>8} ", n + 1); print!("{:>8} ", n + 1);
} }
println!(""); println!("");
for n in 1..(sz+1) { for n in 1..(sz + 1) {
let ohs = self.get(n); let ohs = self.get(n);
match ohs { match ohs {
Some(hs) => print!("{} ", hs.hash), Some(hs) => print!("{} ", hs.hash),
@ -345,36 +379,45 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
/// compact the Vector itself but still frees the reference to the /// compact the Vector itself but still frees the reference to the
/// underlying HashSum. /// underlying HashSum.
#[derive(Clone)] #[derive(Clone)]
pub struct VecBackend<T> where T: Summable + Clone { pub struct VecBackend<T>
where
T: Summable + Clone,
{
pub elems: Vec<Option<HashSum<T>>>, pub elems: Vec<Option<HashSum<T>>>,
} }
impl<T> Backend<T> for VecBackend<T> where T: Summable + Clone { impl<T> Backend<T> for VecBackend<T>
where
T: Summable + Clone,
{
#[allow(unused_variables)] #[allow(unused_variables)]
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> { fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> {
self.elems.append(&mut map_vec!(data, |d| Some(d.clone()))); self.elems.append(&mut map_vec!(data, |d| Some(d.clone())));
Ok(()) Ok(())
} }
fn get(&self, position: u64) -> Option<HashSum<T>> { fn get(&self, position: u64) -> Option<HashSum<T>> {
self.elems[(position-1) as usize].clone() self.elems[(position - 1) as usize].clone()
} }
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> { fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> {
for n in positions { for n in positions {
self.elems[(n-1) as usize] = None self.elems[(n - 1) as usize] = None
} }
Ok(()) Ok(())
} }
#[allow(unused_variables)] #[allow(unused_variables)]
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> { fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
self.elems = self.elems[0..(position as usize)+1].to_vec(); self.elems = self.elems[0..(position as usize) + 1].to_vec();
Ok(()) Ok(())
} }
} }
impl<T> VecBackend<T> where T: Summable + Clone { impl<T> VecBackend<T>
where
T: Summable + Clone,
{
/// Instantiates a new VecBackend<T> /// Instantiates a new VecBackend<T>
pub fn new() -> VecBackend<T> { pub fn new() -> VecBackend<T> {
VecBackend{elems: vec![]} VecBackend { elems: vec![] }
} }
/// Current number of HashSum elements in the underlying Vec. /// Current number of HashSum elements in the underlying Vec.
@ -418,7 +461,7 @@ pub struct PruneList {
impl PruneList { impl PruneList {
/// Instantiate a new empty prune list /// Instantiate a new empty prune list
pub fn new() -> PruneList { pub fn new() -> PruneList {
PruneList{pruned_nodes: vec![]} PruneList { pruned_nodes: vec![] }
} }
/// Computes by how many positions a node at pos should be shifted given the /// Computes by how many positions a node at pos should be shifted given the
@ -501,7 +544,7 @@ fn peaks(num: u64) -> Vec<u64> {
// detecting an invalid mountain range, when siblings exist but no parent // detecting an invalid mountain range, when siblings exist but no parent
// exists // exists
if bintree_postorder_height(num+1) > bintree_postorder_height(num) { if bintree_postorder_height(num + 1) > bintree_postorder_height(num) {
return vec![]; return vec![];
} }
@ -616,7 +659,7 @@ pub fn family(pos: u64) -> (u64, u64) {
let parent: u64; let parent: u64;
let pos_height = bintree_postorder_height(pos); let pos_height = bintree_postorder_height(pos);
let next_height = bintree_postorder_height(pos+1); let next_height = bintree_postorder_height(pos + 1);
if next_height > pos_height { if next_height > pos_height {
sibling = bintree_jump_left_sibling(pos); sibling = bintree_jump_left_sibling(pos);
parent = pos + 1; parent = pos + 1;
@ -710,15 +753,19 @@ mod test {
#[test] #[test]
#[allow(unused_variables)] #[allow(unused_variables)]
fn first_50_mmr_heights() { fn first_50_mmr_heights() {
let first_100_str = let first_100_str = "0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 \
"0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 \
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 5 \ 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 5 \
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 0 0 1 0 0"; 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 0 0 1 0 0";
let first_100 = first_100_str.split(' ').map(|n| n.parse::<u64>().unwrap()); let first_100 = first_100_str.split(' ').map(|n| n.parse::<u64>().unwrap());
let mut count = 1; let mut count = 1;
for n in first_100 { for n in first_100 {
assert_eq!(n, bintree_postorder_height(count), "expected {}, got {}", assert_eq!(
n, bintree_postorder_height(count)); n,
bintree_postorder_height(count),
"expected {}, got {}",
n,
bintree_postorder_height(count)
);
count += 1; count += 1;
} }
} }
@ -785,7 +832,13 @@ mod test {
let hash = Hashed::hash(&elems[0]); let hash = Hashed::hash(&elems[0]);
let sum = elems[0].sum(); let sum = elems[0].sum();
let node_hash = (1 as u64, &sum, hash).hash(); let node_hash = (1 as u64, &sum, hash).hash();
assert_eq!(pmmr.root(), HashSum{hash: node_hash, sum: sum}); assert_eq!(
pmmr.root(),
HashSum {
hash: node_hash,
sum: sum,
}
);
assert_eq!(pmmr.unpruned_size(), 1); assert_eq!(pmmr.unpruned_size(), 1);
// two elements // two elements
@ -802,7 +855,8 @@ mod test {
// four elements // four elements
pmmr.push(elems[3]).unwrap(); pmmr.push(elems[3]).unwrap();
let sum4 = sum2 + (HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3])); let sum4 = sum2 +
(HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3]));
assert_eq!(pmmr.root(), sum4); assert_eq!(pmmr.root(), sum4);
assert_eq!(pmmr.unpruned_size(), 7); assert_eq!(pmmr.unpruned_size(), 7);
@ -814,7 +868,8 @@ mod test {
// six elements // six elements
pmmr.push(elems[5]).unwrap(); pmmr.push(elems[5]).unwrap();
let sum6 = sum4.clone() + (HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])); let sum6 = sum4.clone() +
(HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5]));
assert_eq!(pmmr.root(), sum6.clone()); assert_eq!(pmmr.root(), sum6.clone());
assert_eq!(pmmr.unpruned_size(), 10); assert_eq!(pmmr.unpruned_size(), 10);
@ -826,7 +881,9 @@ mod test {
// eight elements // eight elements
pmmr.push(elems[7]).unwrap(); pmmr.push(elems[7]).unwrap();
let sum8 = sum4 + ((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) + (HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7]))); let sum8 = sum4 +
((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) +
(HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7])));
assert_eq!(pmmr.root(), sum8); assert_eq!(pmmr.root(), sum8);
assert_eq!(pmmr.unpruned_size(), 15); assert_eq!(pmmr.unpruned_size(), 15);

View file

@ -59,8 +59,8 @@ impl Difficulty {
/// provided hash. /// provided hash.
pub fn from_hash(h: &Hash) -> Difficulty { pub fn from_hash(h: &Hash) -> Difficulty {
let max_target = BigEndian::read_u64(&MAX_TARGET); let max_target = BigEndian::read_u64(&MAX_TARGET);
//Use the first 64 bits of the given hash // Use the first 64 bits of the given hash
let mut in_vec=h.to_vec(); let mut in_vec = h.to_vec();
in_vec.truncate(8); in_vec.truncate(8);
let num = BigEndian::read_u64(&in_vec); let num = BigEndian::read_u64(&in_vec);
Difficulty { num: max_target / num } Difficulty { num: max_target / num }
@ -121,7 +121,8 @@ impl Readable for Difficulty {
impl Serialize for Difficulty { impl Serialize for Difficulty {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where S: Serializer where
S: Serializer,
{ {
serializer.serialize_u64(self.num) serializer.serialize_u64(self.num)
} }
@ -129,7 +130,8 @@ impl Serialize for Difficulty {
impl<'de> Deserialize<'de> for Difficulty { impl<'de> Deserialize<'de> for Difficulty {
fn deserialize<D>(deserializer: D) -> Result<Difficulty, D::Error> fn deserialize<D>(deserializer: D) -> Result<Difficulty, D::Error>
where D: Deserializer<'de> where
D: Deserializer<'de>,
{ {
deserializer.deserialize_u64(DiffVisitor) deserializer.deserialize_u64(DiffVisitor)
} }
@ -145,11 +147,15 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
} }
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E> fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
where E: de::Error where
E: de::Error,
{ {
let num_in = s.parse::<u64>(); let num_in = s.parse::<u64>();
if let Err(_)=num_in { if let Err(_) = num_in {
return Err(de::Error::invalid_value(de::Unexpected::Str(s), &"a value number")); return Err(de::Error::invalid_value(
de::Unexpected::Str(s),
&"a value number",
));
}; };
Ok(Difficulty { num: num_in.unwrap() }) Ok(Difficulty { num: num_in.unwrap() })
} }

View file

@ -54,11 +54,13 @@ pub struct TxKernel {
impl Writeable for TxKernel { impl Writeable for TxKernel {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u8, self.features.bits()], [write_u8, self.features.bits()],
[write_fixed_bytes, &self.excess], [write_fixed_bytes, &self.excess],
[write_bytes, &self.excess_sig], [write_bytes, &self.excess_sig],
[write_u64, self.fee]); [write_u64, self.fee]
);
Ok(()) Ok(())
} }
} }
@ -66,8 +68,9 @@ impl Writeable for TxKernel {
impl Readable for TxKernel { impl Readable for TxKernel {
fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> { fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> {
Ok(TxKernel { Ok(TxKernel {
features: features: KernelFeatures::from_bits(reader.read_u8()?).ok_or(
KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?, ser::Error::CorruptedData,
)?,
excess: Commitment::read(reader)?, excess: Commitment::read(reader)?,
excess_sig: reader.read_vec()?, excess_sig: reader.read_vec()?,
fee: reader.read_u64()?, fee: reader.read_u64()?,
@ -104,11 +107,13 @@ pub struct Transaction {
/// write the transaction as binary. /// write the transaction as binary.
impl Writeable for Transaction { impl Writeable for Transaction {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u64, self.fee], [write_u64, self.fee],
[write_bytes, &self.excess_sig], [write_bytes, &self.excess_sig],
[write_u64, self.inputs.len() as u64], [write_u64, self.inputs.len() as u64],
[write_u64, self.outputs.len() as u64]); [write_u64, self.outputs.len() as u64]
);
for inp in &self.inputs { for inp in &self.inputs {
try!(inp.write(writer)); try!(inp.write(writer));
} }
@ -185,7 +190,10 @@ impl Transaction {
pub fn with_input(self, input: Input) -> Transaction { pub fn with_input(self, input: Input) -> Transaction {
let mut new_ins = self.inputs; let mut new_ins = self.inputs;
new_ins.push(input); new_ins.push(input);
Transaction { inputs: new_ins, ..self } Transaction {
inputs: new_ins,
..self
}
} }
/// Builds a new transaction with the provided output added. Existing /// Builds a new transaction with the provided output added. Existing
@ -193,7 +201,10 @@ impl Transaction {
pub fn with_output(self, output: Output) -> Transaction { pub fn with_output(self, output: Output) -> Transaction {
let mut new_outs = self.outputs; let mut new_outs = self.outputs;
new_outs.push(output); new_outs.push(output);
Transaction { outputs: new_outs, ..self } Transaction {
outputs: new_outs,
..self
}
} }
/// Builds a new transaction with the provided fee. /// Builds a new transaction with the provided fee.
@ -304,9 +315,11 @@ pub struct Output {
/// an Output as binary. /// an Output as binary.
impl Writeable for Output { impl Writeable for Output {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u8, self.features.bits()], [write_u8, self.features.bits()],
[write_fixed_bytes, &self.commit]); [write_fixed_bytes, &self.commit]
);
// The hash of an output doesn't include the range proof // The hash of an output doesn't include the range proof
if writer.serialization_mode() == ser::SerializationMode::Full { if writer.serialization_mode() == ser::SerializationMode::Full {
writer.write_bytes(&self.proof)? writer.write_bytes(&self.proof)?
@ -320,8 +333,9 @@ impl Writeable for Output {
impl Readable for Output { impl Readable for Output {
fn read(reader: &mut Reader) -> Result<Output, ser::Error> { fn read(reader: &mut Reader) -> Result<Output, ser::Error> {
Ok(Output { Ok(Output {
features: features: OutputFeatures::from_bits(reader.read_u8()?).ok_or(
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?, ser::Error::CorruptedData,
)?,
commit: Commitment::read(reader)?, commit: Commitment::read(reader)?,
proof: RangeProof::read(reader)?, proof: RangeProof::read(reader)?,
}) })
@ -341,8 +355,6 @@ impl Output {
/// Validates the range proof using the commitment /// Validates the range proof using the commitment
pub fn verify_proof(&self, secp: &Secp256k1) -> Result<(), secp::Error> { pub fn verify_proof(&self, secp: &Secp256k1) -> Result<(), secp::Error> {
/// secp.verify_range_proof returns range if and only if both min_value and max_value less than 2^64
/// since group order is much larger (~2^256) we can be sure overflow is not the case
secp.verify_range_proof(self.commit, self.proof).map(|_| ()) secp.verify_range_proof(self.commit, self.proof).map(|_| ())
} }
} }
@ -392,7 +404,10 @@ impl ops::Add for SumCommit {
type Output = SumCommit; type Output = SumCommit;
fn add(self, other: SumCommit) -> SumCommit { fn add(self, other: SumCommit) -> SumCommit {
let sum = match self.secp.commit_sum(vec![self.commit.clone(), other.commit.clone()], vec![]) { let sum = match self.secp.commit_sum(
vec![self.commit.clone(), other.commit.clone()],
vec![],
) {
Ok(s) => s, Ok(s) => s,
Err(_) => Commitment::from_vec(vec![1; 33]), Err(_) => Commitment::from_vec(vec![1; 33]),
}; };

View file

@ -21,7 +21,7 @@
/// different sets of parameters for different purposes, /// different sets of parameters for different purposes,
/// e.g. CI, User testing, production values /// e.g. CI, User testing, production values
use std::sync::{RwLock}; use std::sync::RwLock;
use consensus::PROOFSIZE; use consensus::PROOFSIZE;
use consensus::DEFAULT_SIZESHIFT; use consensus::DEFAULT_SIZESHIFT;
@ -29,16 +29,16 @@ use consensus::DEFAULT_SIZESHIFT;
/// by users /// by users
/// Automated testing sizeshift /// Automated testing sizeshift
pub const AUTOMATED_TESTING_SIZESHIFT:u8 = 10; pub const AUTOMATED_TESTING_SIZESHIFT: u8 = 10;
/// Automated testing proof size /// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE:usize = 4; pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 4;
/// User testing sizeshift /// User testing sizeshift
pub const USER_TESTING_SIZESHIFT:u8 = 16; pub const USER_TESTING_SIZESHIFT: u8 = 16;
/// User testing proof size /// User testing proof size
pub const USER_TESTING_PROOF_SIZE:usize = 42; pub const USER_TESTING_PROOF_SIZE: usize = 42;
/// Mining parameter modes /// Mining parameter modes
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -55,18 +55,19 @@ pub enum MiningParameterMode {
lazy_static!{ lazy_static!{
/// The mining parameter mode /// The mining parameter mode
pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> = RwLock::new(MiningParameterMode::Production); pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> =
RwLock::new(MiningParameterMode::Production);
} }
/// Set the mining mode /// Set the mining mode
pub fn set_mining_mode(mode:MiningParameterMode){ pub fn set_mining_mode(mode: MiningParameterMode) {
let mut param_ref=MINING_PARAMETER_MODE.write().unwrap(); let mut param_ref = MINING_PARAMETER_MODE.write().unwrap();
*param_ref=mode; *param_ref = mode;
} }
/// The sizeshift /// The sizeshift
pub fn sizeshift() -> u8 { pub fn sizeshift() -> u8 {
let param_ref=MINING_PARAMETER_MODE.read().unwrap(); let param_ref = MINING_PARAMETER_MODE.read().unwrap();
match *param_ref { match *param_ref {
MiningParameterMode::AutomatedTesting => AUTOMATED_TESTING_SIZESHIFT, MiningParameterMode::AutomatedTesting => AUTOMATED_TESTING_SIZESHIFT,
MiningParameterMode::UserTesting => USER_TESTING_SIZESHIFT, MiningParameterMode::UserTesting => USER_TESTING_SIZESHIFT,
@ -76,7 +77,7 @@ pub fn sizeshift() -> u8 {
/// The proofsize /// The proofsize
pub fn proofsize() -> usize { pub fn proofsize() -> usize {
let param_ref=MINING_PARAMETER_MODE.read().unwrap(); let param_ref = MINING_PARAMETER_MODE.read().unwrap();
match *param_ref { match *param_ref {
MiningParameterMode::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE, MiningParameterMode::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
MiningParameterMode::UserTesting => USER_TESTING_PROOF_SIZE, MiningParameterMode::UserTesting => USER_TESTING_PROOF_SIZE,
@ -86,8 +87,8 @@ pub fn proofsize() -> usize {
/// Are we in automated testing mode? /// Are we in automated testing mode?
pub fn is_automated_testing_mode() -> bool { pub fn is_automated_testing_mode() -> bool {
let param_ref=MINING_PARAMETER_MODE.read().unwrap(); let param_ref = MINING_PARAMETER_MODE.read().unwrap();
if let MiningParameterMode::AutomatedTesting=*param_ref { if let MiningParameterMode::AutomatedTesting = *param_ref {
return true; return true;
} else { } else {
return false; return false;
@ -96,8 +97,8 @@ pub fn is_automated_testing_mode() -> bool {
/// Are we in production mode? /// Are we in production mode?
pub fn is_production_mode() -> bool { pub fn is_production_mode() -> bool {
let param_ref=MINING_PARAMETER_MODE.read().unwrap(); let param_ref = MINING_PARAMETER_MODE.read().unwrap();
if let MiningParameterMode::Production=*param_ref { if let MiningParameterMode::Production = *param_ref {
return true; return true;
} else { } else {
return false; return false;
@ -107,28 +108,70 @@ pub fn is_production_mode() -> bool {
/// Helper function to get a nonce known to create a valid POW on /// Helper function to get a nonce known to create a valid POW on
/// the genesis block, to prevent it taking ages. Should be fine for now /// the genesis block, to prevent it taking ages. Should be fine for now
/// as the genesis block POW solution turns out to be the same for every new block chain /// as the genesis block POW solution turns out to be the same for every new
/// block chain
/// at the moment /// at the moment
pub fn get_genesis_nonce() -> u64 { pub fn get_genesis_nonce() -> u64 {
let param_ref=MINING_PARAMETER_MODE.read().unwrap(); let param_ref = MINING_PARAMETER_MODE.read().unwrap();
match *param_ref { match *param_ref {
MiningParameterMode::AutomatedTesting => 0, //won't make a difference // won't make a difference
MiningParameterMode::UserTesting => 22141, //Magic nonce for current genesis block at cuckoo16 MiningParameterMode::AutomatedTesting => 0,
MiningParameterMode::Production => 1429942738856787200, //Magic nonce for current genesis at cuckoo30 // Magic nonce for current genesis block at cuckoo16
MiningParameterMode::UserTesting => 22141,
// Magic nonce for current genesis at cuckoo30
MiningParameterMode::Production => 1429942738856787200,
} }
} }
/// Returns the genesis POW for production, because it takes far too long to mine at production values /// Returns the genesis POW for production, because it takes far too long to
/// mine at production values
/// using the internal miner /// using the internal miner
pub fn get_genesis_pow() -> [u32;42]{ pub fn get_genesis_pow() -> [u32; 42] {
//TODO: This is diff 26, probably just want a 10: mine one // TODO: This is diff 26, probably just want a 10: mine one
[7444824, 11926557, 28520390, 30594072, 50854023, 52797085, 57882033, [
59816511, 61404804, 84947619, 87779345, 115270337, 162618676, 7444824,
166860710, 178656003, 178971372, 200454733, 209197630, 221231015, 11926557,
228598741, 241012783, 245401183, 279080304, 295848517, 327300943, 28520390,
329741709, 366394532, 382493153, 389329248, 404353381, 406012911, 30594072,
418813499, 426573907, 452566575, 456930760, 463021458, 474340589, 50854023,
476248039, 478197093, 487576917, 495653489, 501862896] 52797085,
57882033,
59816511,
61404804,
84947619,
87779345,
115270337,
162618676,
166860710,
178656003,
178971372,
200454733,
209197630,
221231015,
228598741,
241012783,
245401183,
279080304,
295848517,
327300943,
329741709,
366394532,
382493153,
389329248,
404353381,
406012911,
418813499,
426573907,
452566575,
456930760,
463021458,
474340589,
476248039,
478197093,
487576917,
495653489,
501862896,
]
} }

View file

@ -55,9 +55,10 @@ impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
Error::IOErr(ref e) => write!(f, "{}", e), Error::IOErr(ref e) => write!(f, "{}", e),
Error::UnexpectedData { expected: ref e, received: ref r } => { Error::UnexpectedData {
write!(f, "expected {:?}, got {:?}", e, r) expected: ref e,
} received: ref r,
} => write!(f, "expected {:?}, got {:?}", e, r),
Error::CorruptedData => f.write_str("corrupted data"), Error::CorruptedData => f.write_str("corrupted data"),
Error::TooLargeReadErr => f.write_str("too large read"), Error::TooLargeReadErr => f.write_str("too large read"),
} }
@ -75,7 +76,10 @@ impl error::Error for Error {
fn description(&self) -> &str { fn description(&self) -> &str {
match *self { match *self {
Error::IOErr(ref e) => error::Error::description(e), Error::IOErr(ref e) => error::Error::description(e),
Error::UnexpectedData { expected: _, received: _ } => "unexpected data", Error::UnexpectedData {
expected: _,
received: _,
} => "unexpected data",
Error::CorruptedData => "corrupted data", Error::CorruptedData => "corrupted data",
Error::TooLargeReadErr => "too large read", Error::TooLargeReadErr => "too large read",
} }
@ -180,7 +184,8 @@ pub trait Writeable {
/// Reads directly to a Reader, a utility type thinly wrapping an /// Reads directly to a Reader, a utility type thinly wrapping an
/// underlying Read implementation. /// underlying Read implementation.
pub trait Readable pub trait Readable
where Self: Sized where
Self: Sized,
{ {
/// Reads the data necessary to this Readable from the provided reader /// Reads the data necessary to this Readable from the provided reader
fn read(reader: &mut Reader) -> Result<Self, Error>; fn read(reader: &mut Reader) -> Result<Self, Error>;
@ -245,7 +250,9 @@ impl<'a> Reader for BinReader<'a> {
return Err(Error::TooLargeReadErr); return Err(Error::TooLargeReadErr);
} }
let mut buf = vec![0; length]; let mut buf = vec![0; length];
self.source.read_exact(&mut buf).map(move |_| buf).map_err(Error::IOErr) self.source.read_exact(&mut buf).map(move |_| buf).map_err(
Error::IOErr,
)
} }
fn expect_u8(&mut self, val: u8) -> Result<u8, Error> { fn expect_u8(&mut self, val: u8) -> Result<u8, Error> {
@ -338,14 +345,19 @@ impl_int!(u32, write_u32, read_u32);
impl_int!(u64, write_u64, read_u64); impl_int!(u64, write_u64, read_u64);
impl_int!(i64, write_i64, read_i64); impl_int!(i64, write_i64, read_i64);
impl<T> Readable for Vec<T> where T: Readable { impl<T> Readable for Vec<T>
where
T: Readable,
{
fn read(reader: &mut Reader) -> Result<Vec<T>, Error> { fn read(reader: &mut Reader) -> Result<Vec<T>, Error> {
let mut buf = Vec::new(); let mut buf = Vec::new();
loop { loop {
let elem = T::read(reader); let elem = T::read(reader);
match elem { match elem {
Ok(e) => buf.push(e), Ok(e) => buf.push(e),
Err(Error::IOErr(ref ioerr)) if ioerr.kind() == io::ErrorKind::UnexpectedEof => break, Err(Error::IOErr(ref ioerr)) if ioerr.kind() == io::ErrorKind::UnexpectedEof => {
break
}
Err(e) => return Err(e), Err(e) => return Err(e),
} }
} }
@ -353,7 +365,10 @@ impl<T> Readable for Vec<T> where T: Readable {
} }
} }
impl<T> Writeable for Vec<T> where T: Writeable { impl<T> Writeable for Vec<T>
where
T: Writeable,
{
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
for elmt in self { for elmt in self {
elmt.write(writer)?; elmt.write(writer)?;
@ -400,18 +415,22 @@ impl<A: Writeable, B: Writeable, C: Writeable, D: Writeable> Writeable for (A, B
impl<A: Readable, B: Readable, C: Readable> Readable for (A, B, C) { impl<A: Readable, B: Readable, C: Readable> Readable for (A, B, C) {
fn read(reader: &mut Reader) -> Result<(A, B, C), Error> { fn read(reader: &mut Reader) -> Result<(A, B, C), Error> {
Ok((try!(Readable::read(reader)), Ok((
try!(Readable::read(reader)), try!(Readable::read(reader)),
try!(Readable::read(reader)))) try!(Readable::read(reader)),
try!(Readable::read(reader)),
))
} }
} }
impl<A: Readable, B: Readable, C: Readable, D: Readable> Readable for (A, B, C, D) { impl<A: Readable, B: Readable, C: Readable, D: Readable> Readable for (A, B, C, D) {
fn read(reader: &mut Reader) -> Result<(A, B, C, D), Error> { fn read(reader: &mut Reader) -> Result<(A, B, C, D), Error> {
Ok((try!(Readable::read(reader)), Ok((
try!(Readable::read(reader)), try!(Readable::read(reader)),
try!(Readable::read(reader)), try!(Readable::read(reader)),
try!(Readable::read(reader)))) try!(Readable::read(reader)),
try!(Readable::read(reader)),
))
} }
} }

View file

@ -27,7 +27,7 @@ use secp::pedersen::Commitment;
use util::OneTime; use util::OneTime;
use store; use store;
use sync; use sync;
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE}; use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
/// Implementation of the NetAdapter for the blockchain. Gets notified when new /// Implementation of the NetAdapter for the blockchain. Gets notified when new
/// blocks and transactions are received and forwards to the chain and pool /// blocks and transactions are received and forwards to the chain and pool
@ -81,10 +81,12 @@ impl NetAdapter for NetToChainAdapter {
added_hs.push(bh.hash()); added_hs.push(bh.hash());
} }
Err(chain::Error::Unfit(s)) => { Err(chain::Error::Unfit(s)) => {
info!("Received unfit block header {} at {}: {}.", info!(
"Received unfit block header {} at {}: {}.",
bh.hash(), bh.hash(),
bh.height, bh.height,
s); s
);
} }
Err(chain::Error::StoreErr(e)) => { Err(chain::Error::StoreErr(e)) => {
error!("Store error processing block header {}: {:?}", bh.hash(), e); error!("Store error processing block header {}: {:?}", bh.hash(), e);
@ -150,7 +152,11 @@ impl NetAdapter for NetToChainAdapter {
/// Find good peers we know with the provided capability and return their /// Find good peers we know with the provided capability and return their
/// addresses. /// addresses.
fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec<SocketAddr> { fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec<SocketAddr> {
let peers = self.peer_store.find_peers(State::Healthy, capab, p2p::MAX_PEER_ADDRS as usize); let peers = self.peer_store.find_peers(
State::Healthy,
capab,
p2p::MAX_PEER_ADDRS as usize,
);
debug!("Got {} peer addrs to send.", peers.len()); debug!("Got {} peer addrs to send.", peers.len());
map_vec!(peers, |p| p.addr) map_vec!(peers, |p| p.addr)
} }
@ -192,10 +198,11 @@ impl NetAdapter for NetToChainAdapter {
} }
impl NetToChainAdapter { impl NetToChainAdapter {
pub fn new(chain_ref: Arc<chain::Chain>, pub fn new(
chain_ref: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>, tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
peer_store: Arc<PeerStore>) peer_store: Arc<PeerStore>,
-> NetToChainAdapter { ) -> NetToChainAdapter {
NetToChainAdapter { NetToChainAdapter {
chain: chain_ref, chain: chain_ref,
peer_store: peer_store, peer_store: peer_store,
@ -209,13 +216,15 @@ impl NetToChainAdapter {
pub fn start_sync(&self, sync: sync::Syncer) { pub fn start_sync(&self, sync: sync::Syncer) {
let arc_sync = Arc::new(sync); let arc_sync = Arc::new(sync);
self.syncer.init(arc_sync.clone()); self.syncer.init(arc_sync.clone());
let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(move || { let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(
move || {
let sync_run_result = arc_sync.run(); let sync_run_result = arc_sync.run();
match sync_run_result { match sync_run_result {
Ok(_) => {} Ok(_) => {}
Err(_) => {} Err(_) => {}
} }
}); },
);
match spawn_result { match spawn_result {
Ok(_) => {} Ok(_) => {}
Err(_) => {} Err(_) => {}
@ -229,7 +238,7 @@ impl NetToChainAdapter {
} else { } else {
chain::NONE chain::NONE
}; };
let param_ref=MINING_PARAMETER_MODE.read().unwrap(); let param_ref = MINING_PARAMETER_MODE.read().unwrap();
let opts = match *param_ref { let opts = match *param_ref {
MiningParameterMode::AutomatedTesting => opts | chain::EASY_POW, MiningParameterMode::AutomatedTesting => opts | chain::EASY_POW,
MiningParameterMode::UserTesting => opts | chain::EASY_POW, MiningParameterMode::UserTesting => opts | chain::EASY_POW,
@ -251,9 +260,11 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
fn block_accepted(&self, b: &core::Block) { fn block_accepted(&self, b: &core::Block) {
{ {
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) { if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
error!("Pool could not update itself at block {}: {:?}", error!(
"Pool could not update itself at block {}: {:?}",
b.hash(), b.hash(),
e); e
);
} }
} }
self.p2p.borrow().broadcast_block(b); self.p2p.borrow().broadcast_block(b);
@ -261,8 +272,9 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
} }
impl ChainToPoolAndNetAdapter { impl ChainToPoolAndNetAdapter {
pub fn new(tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>) pub fn new(
-> ChainToPoolAndNetAdapter { tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
) -> ChainToPoolAndNetAdapter {
ChainToPoolAndNetAdapter { ChainToPoolAndNetAdapter {
tx_pool: tx_pool, tx_pool: tx_pool,
p2p: OneTime::new(), p2p: OneTime::new(),
@ -294,21 +306,28 @@ impl PoolToChainAdapter {
impl pool::BlockChain for PoolToChainAdapter { impl pool::BlockChain for PoolToChainAdapter {
fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, pool::PoolError> { fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, pool::PoolError> {
self.chain.borrow().get_unspent(output_ref) self.chain.borrow().get_unspent(output_ref).map_err(
.map_err(|e| match e { |e| match e {
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound, chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent, chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
_ => pool::PoolError::GenericPoolError, _ => pool::PoolError::GenericPoolError,
}) },
)
} }
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, pool::PoolError> { fn get_block_header_by_output_commit(
self.chain.borrow().get_block_header_by_output_commit(commit) &self,
commit: &Commitment,
) -> Result<BlockHeader, pool::PoolError> {
self.chain
.borrow()
.get_block_header_by_output_commit(commit)
.map_err(|_| pool::PoolError::GenericPoolError) .map_err(|_| pool::PoolError::GenericPoolError)
} }
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> { fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
self.chain.borrow().head_header() self.chain.borrow().head_header().map_err(|_| {
.map_err(|_| pool::PoolError::GenericPoolError) pool::PoolError::GenericPoolError
})
} }
} }

View file

@ -55,5 +55,5 @@ mod sync;
mod types; mod types;
mod miner; mod miner;
pub use server::{Server}; pub use server::Server;
pub use types::{ServerConfig, Seeding, ServerStats}; pub use types::{ServerConfig, Seeding, ServerStats};

View file

@ -231,7 +231,7 @@ impl Miner {
next_stat_output = time::get_time().sec + stat_output_interval; next_stat_output = time::get_time().sec + stat_output_interval;
} }
} }
//avoid busy wait // avoid busy wait
let sleep_dur = std::time::Duration::from_millis(100); let sleep_dur = std::time::Duration::from_millis(100);
thread::sleep(sleep_dur); thread::sleep(sleep_dur);
} }
@ -540,7 +540,9 @@ impl Miner {
b.header.nonce = rng.gen(); b.header.nonce = rng.gen();
b.header.difficulty = difficulty; b.header.difficulty = difficulty;
b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0)); b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0));
self.chain.set_sumtree_roots(&mut b).expect("Error setting sum tree roots"); self.chain.set_sumtree_roots(&mut b).expect(
"Error setting sum tree roots",
);
b b
} }

View file

@ -44,10 +44,11 @@ pub struct Seeder {
} }
impl Seeder { impl Seeder {
pub fn new(capabilities: p2p::Capabilities, pub fn new(
capabilities: p2p::Capabilities,
peer_store: Arc<p2p::PeerStore>, peer_store: Arc<p2p::PeerStore>,
p2p: Arc<p2p::Server>) p2p: Arc<p2p::Server>,
-> Seeder { ) -> Seeder {
Seeder { Seeder {
peer_store: peer_store, peer_store: peer_store,
p2p: p2p, p2p: p2p,
@ -55,17 +56,20 @@ impl Seeder {
} }
} }
pub fn connect_and_monitor(&self, pub fn connect_and_monitor(
&self,
h: reactor::Handle, h: reactor::Handle,
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>) { seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>,
) {
// open a channel with a listener that connects every peer address sent below // open a channel with a listener that connects every peer address sent below
// max peer count // max peer count
let (tx, rx) = futures::sync::mpsc::unbounded(); let (tx, rx) = futures::sync::mpsc::unbounded();
h.spawn(self.listen_for_addrs(h.clone(), rx)); h.spawn(self.listen_for_addrs(h.clone(), rx));
// check seeds and start monitoring connections // check seeds and start monitoring connections
let seeder = self.connect_to_seeds(tx.clone(), seed_list) let seeder = self.connect_to_seeds(tx.clone(), seed_list).join(
.join(self.monitor_peers(tx.clone())); self.monitor_peers(tx.clone()),
);
h.spawn(seeder.map(|_| ()).map_err(|e| { h.spawn(seeder.map(|_| ()).map_err(|e| {
error!("Seeding or peer monitoring error: {}", e); error!("Seeding or peer monitoring error: {}", e);
@ -73,9 +77,10 @@ impl Seeder {
})); }));
} }
fn monitor_peers(&self, fn monitor_peers(
tx: mpsc::UnboundedSender<SocketAddr>) &self,
-> Box<Future<Item = (), Error = String>> { tx: mpsc::UnboundedSender<SocketAddr>,
) -> Box<Future<Item = (), Error = String>> {
let peer_store = self.peer_store.clone(); let peer_store = self.peer_store.clone();
let p2p_server = self.p2p.clone(); let p2p_server = self.p2p.clone();
@ -91,8 +96,8 @@ impl Seeder {
for p in disconnected { for p in disconnected {
if p.is_banned() { if p.is_banned() {
debug!("Marking peer {} as banned.", p.info.addr); debug!("Marking peer {} as banned.", p.info.addr);
let update_result = peer_store.update_state( let update_result =
p.info.addr, p2p::State::Banned); peer_store.update_state(p.info.addr, p2p::State::Banned);
match update_result { match update_result {
Ok(()) => {} Ok(()) => {}
Err(_) => {} Err(_) => {}
@ -102,9 +107,11 @@ impl Seeder {
// we don't have enough peers, getting more from db // we don't have enough peers, getting more from db
if p2p_server.peer_count() < PEER_PREFERRED_COUNT { if p2p_server.peer_count() < PEER_PREFERRED_COUNT {
let mut peers = peer_store.find_peers(p2p::State::Healthy, let mut peers = peer_store.find_peers(
p2p::State::Healthy,
p2p::UNKNOWN, p2p::UNKNOWN,
(2 * PEER_MAX_COUNT) as usize); (2 * PEER_MAX_COUNT) as usize,
);
peers.retain(|p| !p2p_server.is_known(p.addr)); peers.retain(|p| !p2p_server.is_known(p.addr));
if peers.len() > 0 { if peers.len() > 0 {
debug!("Got {} more peers from db, trying to connect.", peers.len()); debug!("Got {} more peers from db, trying to connect.", peers.len());
@ -124,20 +131,24 @@ impl Seeder {
// Check if we have any pre-existing peer in db. If so, start with those, // Check if we have any pre-existing peer in db. If so, start with those,
// otherwise use the seeds provided. // otherwise use the seeds provided.
fn connect_to_seeds(&self, fn connect_to_seeds(
&self,
tx: mpsc::UnboundedSender<SocketAddr>, tx: mpsc::UnboundedSender<SocketAddr>,
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>) seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>,
-> Box<Future<Item = (), Error = String>> { ) -> Box<Future<Item = (), Error = String>> {
let peer_store = self.peer_store.clone(); let peer_store = self.peer_store.clone();
// a thread pool is required so we don't block the event loop with a // a thread pool is required so we don't block the event loop with a
// db query // db query
let thread_pool = cpupool::CpuPool::new(1); let thread_pool = cpupool::CpuPool::new(1);
let seeder = thread_pool.spawn_fn(move || { let seeder = thread_pool
.spawn_fn(move || {
// check if we have some peers in db // check if we have some peers in db
let peers = peer_store.find_peers(p2p::State::Healthy, let peers = peer_store.find_peers(
p2p::State::Healthy,
p2p::FULL_HIST, p2p::FULL_HIST,
(2 * PEER_MAX_COUNT) as usize); (2 * PEER_MAX_COUNT) as usize,
);
Ok(peers) Ok(peers)
}) })
.and_then(|mut peers| { .and_then(|mut peers| {
@ -168,10 +179,11 @@ impl Seeder {
/// addresses to and initiate a connection if the max peer count isn't /// addresses to and initiate a connection if the max peer count isn't
/// exceeded. A request for more peers is also automatically sent after /// exceeded. A request for more peers is also automatically sent after
/// connection. /// connection.
fn listen_for_addrs(&self, fn listen_for_addrs(
&self,
h: reactor::Handle, h: reactor::Handle,
rx: mpsc::UnboundedReceiver<SocketAddr>) rx: mpsc::UnboundedReceiver<SocketAddr>,
-> Box<Future<Item = (), Error = ()>> { ) -> Box<Future<Item = (), Error = ()>> {
let capab = self.capabilities; let capab = self.capabilities;
let p2p_store = self.peer_store.clone(); let p2p_store = self.peer_store.clone();
let p2p_server = self.p2p.clone(); let p2p_server = self.p2p.clone();
@ -180,11 +192,13 @@ impl Seeder {
debug!("New peer address to connect to: {}.", peer_addr); debug!("New peer address to connect to: {}.", peer_addr);
let inner_h = h.clone(); let inner_h = h.clone();
if p2p_server.peer_count() < PEER_MAX_COUNT { if p2p_server.peer_count() < PEER_MAX_COUNT {
connect_and_req(capab, connect_and_req(
capab,
p2p_store.clone(), p2p_store.clone(),
p2p_server.clone(), p2p_server.clone(),
inner_h, inner_h,
peer_addr) peer_addr,
)
} else { } else {
Box::new(future::ok(())) Box::new(future::ok(()))
} }
@ -201,7 +215,8 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
let client = hyper::Client::new(&h); let client = hyper::Client::new(&h);
// http get, filtering out non 200 results // http get, filtering out non 200 results
client.get(url) client
.get(url)
.map_err(|e| e.to_string()) .map_err(|e| e.to_string())
.and_then(|res| { .and_then(|res| {
if res.status() != hyper::Ok { if res.status() != hyper::Ok {
@ -211,14 +226,17 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
}) })
.and_then(|res| { .and_then(|res| {
// collect all chunks and split around whitespace to get a list of SocketAddr // collect all chunks and split around whitespace to get a list of SocketAddr
res.body().collect().map_err(|e| e.to_string()).and_then(|chunks| { res.body().collect().map_err(|e| e.to_string()).and_then(
|chunks| {
let res = chunks.iter().fold("".to_string(), |acc, ref chunk| { let res = chunks.iter().fold("".to_string(), |acc, ref chunk| {
acc + str::from_utf8(&chunk[..]).unwrap() acc + str::from_utf8(&chunk[..]).unwrap()
}); });
let addrs = let addrs = res.split_whitespace()
res.split_whitespace().map(|s| s.parse().unwrap()).collect::<Vec<_>>(); .map(|s| s.parse().unwrap())
.collect::<Vec<_>>();
Ok(addrs) Ok(addrs)
}) },
)
}) })
}); });
Box::new(seeds) Box::new(seeds)
@ -226,21 +244,28 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
/// Convenience function when the seed list is immediately known. Mostly used /// Convenience function when the seed list is immediately known. Mostly used
/// for tests. /// for tests.
pub fn predefined_seeds(addrs_str: Vec<String>) pub fn predefined_seeds(
-> Box<Future<Item = Vec<SocketAddr>, Error = String>> { addrs_str: Vec<String>,
let seeds = future::ok(()) ) -> Box<Future<Item = Vec<SocketAddr>, Error = String>> {
.and_then(move |_| Ok(addrs_str.iter().map(|s| s.parse().unwrap()).collect::<Vec<_>>())); let seeds = future::ok(()).and_then(move |_| {
Ok(
addrs_str
.iter()
.map(|s| s.parse().unwrap())
.collect::<Vec<_>>(),
)
});
Box::new(seeds) Box::new(seeds)
} }
fn connect_and_req(capab: p2p::Capabilities, fn connect_and_req(
capab: p2p::Capabilities,
peer_store: Arc<p2p::PeerStore>, peer_store: Arc<p2p::PeerStore>,
p2p: Arc<p2p::Server>, p2p: Arc<p2p::Server>,
h: reactor::Handle, h: reactor::Handle,
addr: SocketAddr) addr: SocketAddr,
-> Box<Future<Item = (), Error = ()>> { ) -> Box<Future<Item = (), Error = ()>> {
let fut = p2p.connect_peer(addr, h) let fut = p2p.connect_peer(addr, h).then(move |p| {
.then(move |p| {
match p { match p {
Ok(Some(p)) => { Ok(Some(p)) => {
let peer_result = p.send_peer_request(capab); let peer_result = p.send_peer_request(capab);

View file

@ -79,35 +79,47 @@ impl Server {
pub fn future(mut config: ServerConfig, evt_handle: &reactor::Handle) -> Result<Server, Error> { pub fn future(mut config: ServerConfig, evt_handle: &reactor::Handle) -> Result<Server, Error> {
let pool_adapter = Arc::new(PoolToChainAdapter::new()); let pool_adapter = Arc::new(PoolToChainAdapter::new());
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(pool_adapter.clone()))); let tx_pool = Arc::new(RwLock::new(
pool::TransactionPool::new(pool_adapter.clone()),
));
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(tx_pool.clone())); let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(tx_pool.clone()));
let mut genesis_block = None; let mut genesis_block = None;
if !chain::Chain::chain_exists(config.db_root.clone()){ if !chain::Chain::chain_exists(config.db_root.clone()) {
genesis_block=pow::mine_genesis_block(config.mining_config.clone()); genesis_block = pow::mine_genesis_block(config.mining_config.clone());
} }
let shared_chain = Arc::new(chain::Chain::init(config.db_root.clone(), let shared_chain = Arc::new(chain::Chain::init(
config.db_root.clone(),
chain_adapter.clone(), chain_adapter.clone(),
genesis_block, genesis_block,
pow::verify_size)?); pow::verify_size,
)?);
pool_adapter.set_chain(shared_chain.clone()); pool_adapter.set_chain(shared_chain.clone());
let peer_store = Arc::new(p2p::PeerStore::new(config.db_root.clone())?); let peer_store = Arc::new(p2p::PeerStore::new(config.db_root.clone())?);
let net_adapter = Arc::new(NetToChainAdapter::new(shared_chain.clone(), let net_adapter = Arc::new(NetToChainAdapter::new(
shared_chain.clone(),
tx_pool.clone(), tx_pool.clone(),
peer_store.clone())); peer_store.clone(),
let p2p_server = ));
Arc::new(p2p::Server::new(config.capabilities, config.p2p_config.unwrap(), net_adapter.clone())); let p2p_server = Arc::new(p2p::Server::new(
config.capabilities,
config.p2p_config.unwrap(),
net_adapter.clone(),
));
chain_adapter.init(p2p_server.clone()); chain_adapter.init(p2p_server.clone());
let seed = seed::Seeder::new(config.capabilities, peer_store.clone(), p2p_server.clone()); let seed = seed::Seeder::new(config.capabilities, peer_store.clone(), p2p_server.clone());
match config.seeding_type.clone() { match config.seeding_type.clone() {
Seeding::None => {} Seeding::None => {}
Seeding::List => { Seeding::List => {
seed.connect_and_monitor(evt_handle.clone(), seed::predefined_seeds(config.seeds.as_mut().unwrap().clone())); seed.connect_and_monitor(
evt_handle.clone(),
seed::predefined_seeds(config.seeds.as_mut().unwrap().clone()),
);
} }
Seeding::WebStatic => { Seeding::WebStatic => {
seed.connect_and_monitor(evt_handle.clone(), seed::web_seeds(evt_handle.clone())); seed.connect_and_monitor(evt_handle.clone(), seed::web_seeds(evt_handle.clone()));
@ -121,9 +133,11 @@ impl Server {
info!("Starting rest apis at: {}", &config.api_http_addr); info!("Starting rest apis at: {}", &config.api_http_addr);
api::start_rest_apis(config.api_http_addr.clone(), api::start_rest_apis(
config.api_http_addr.clone(),
shared_chain.clone(), shared_chain.clone(),
tx_pool.clone()); tx_pool.clone(),
);
warn!("Grin server started."); warn!("Grin server started.");
Ok(Server { Ok(Server {
@ -138,7 +152,12 @@ impl Server {
/// Asks the server to connect to a peer at the provided network address. /// Asks the server to connect to a peer at the provided network address.
pub fn connect_peer(&self, addr: SocketAddr) -> Result<(), Error> { pub fn connect_peer(&self, addr: SocketAddr) -> Result<(), Error> {
let handle = self.evt_handle.clone(); let handle = self.evt_handle.clone();
handle.spawn(self.p2p.connect_peer(addr, handle.clone()).map(|_| ()).map_err(|_| ())); handle.spawn(
self.p2p
.connect_peer(addr, handle.clone())
.map(|_| ())
.map_err(|_| ()),
);
Ok(()) Ok(())
} }
@ -154,7 +173,7 @@ impl Server {
let proof_size = global::proofsize(); let proof_size = global::proofsize();
let mut miner = miner::Miner::new(config.clone(), self.chain.clone(), self.tx_pool.clone()); let mut miner = miner::Miner::new(config.clone(), self.chain.clone(), self.tx_pool.clone());
miner.set_debug_output_id(format!("Port {}",self.config.p2p_config.unwrap().port)); miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.unwrap().port));
thread::spawn(move || { thread::spawn(move || {
miner.run_loop(config.clone(), cuckoo_size as u32, proof_size); miner.run_loop(config.clone(), cuckoo_size as u32, proof_size);
}); });
@ -165,12 +184,14 @@ impl Server {
self.chain.head().unwrap() self.chain.head().unwrap()
} }
/// Returns a set of stats about this server. This and the ServerStats structure /// Returns a set of stats about this server. This and the ServerStats
/// can be updated over time to include any information needed by tests or other /// structure
/// can be updated over time to include any information needed by tests or
/// other
/// consumers /// consumers
pub fn get_server_stats(&self) -> Result<ServerStats, Error>{ pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
Ok(ServerStats{ Ok(ServerStats {
peer_count: self.peer_count(), peer_count: self.peer_count(),
head: self.head(), head: self.head(),
}) })

View file

@ -129,8 +129,10 @@ impl Syncer {
prev_h = header.previous; prev_h = header.previous;
} }
debug!("Added {} full block hashes to download.", debug!(
blocks_to_download.len()); "Added {} full block hashes to download.",
blocks_to_download.len()
);
Ok(()) Ok(())
} }
@ -141,7 +143,8 @@ impl Syncer {
if blocks_downloading.len() > MAX_BODY_DOWNLOADS { if blocks_downloading.len() > MAX_BODY_DOWNLOADS {
// clean up potentially dead downloads // clean up potentially dead downloads
let twenty_sec_ago = Instant::now() - Duration::from_secs(20); let twenty_sec_ago = Instant::now() - Duration::from_secs(20);
blocks_downloading.iter() blocks_downloading
.iter()
.position(|&h| h.1 < twenty_sec_ago) .position(|&h| h.1 < twenty_sec_ago)
.map(|n| blocks_downloading.remove(n)); .map(|n| blocks_downloading.remove(n));
} else { } else {
@ -158,8 +161,10 @@ impl Syncer {
} }
blocks_downloading.push((h, Instant::now())); blocks_downloading.push((h, Instant::now()));
} }
debug!("Requesting more full block hashes to download, total: {}.", debug!(
blocks_to_download.len()); "Requesting more full block hashes to download, total: {}.",
blocks_to_download.len()
);
} }
} }
@ -181,10 +186,12 @@ impl Syncer {
let peer = self.p2p.most_work_peer(); let peer = self.p2p.most_work_peer();
let locator = self.get_locator(&tip)?; let locator = self.get_locator(&tip)?;
if let Some(p) = peer { if let Some(p) = peer {
debug!("Asking peer {} for more block headers starting from {} at {}.", debug!(
"Asking peer {} for more block headers starting from {} at {}.",
p.info.addr, p.info.addr,
tip.last_block_h, tip.last_block_h,
tip.height); tip.height
);
p.send_header_request(locator)?; p.send_header_request(locator)?;
} else { } else {
warn!("Could not get most worked peer to request headers."); warn!("Could not get most worked peer to request headers.");

View file

@ -119,12 +119,10 @@ impl Default for ServerConfig {
/// ///
/// ///
/// ///
#[derive(Clone)] #[derive(Clone)]
pub struct ServerStats { pub struct ServerStats {
/// Number of peers /// Number of peers
pub peer_count:u32, pub peer_count: u32,
/// Chain head /// Chain head
pub head: chain::Tip, pub head: chain::Tip,
} }

View file

@ -42,22 +42,26 @@ pub trait Handler: Sync + Send {
/// Handle function to implement to process incoming messages. A sender to /// Handle function to implement to process incoming messages. A sender to
/// reply immediately as well as the message header and its unparsed body /// reply immediately as well as the message header and its unparsed body
/// are provided. /// are provided.
fn handle(&self, fn handle(
&self,
sender: UnboundedSender<Vec<u8>>, sender: UnboundedSender<Vec<u8>>,
header: MsgHeader, header: MsgHeader,
body: Vec<u8>) body: Vec<u8>,
-> Result<Option<Hash>, ser::Error>; ) -> Result<Option<Hash>, ser::Error>;
} }
impl<F> Handler for F impl<F> Handler for F
where F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>) -> Result<Option<Hash>, ser::Error>, where
F: Sync + Send F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>)
-> Result<Option<Hash>, ser::Error>,
F: Sync + Send,
{ {
fn handle(&self, fn handle(
&self,
sender: UnboundedSender<Vec<u8>>, sender: UnboundedSender<Vec<u8>>,
header: MsgHeader, header: MsgHeader,
body: Vec<u8>) body: Vec<u8>,
-> Result<Option<Hash>, ser::Error> { ) -> Result<Option<Hash>, ser::Error> {
self(sender, header, body) self(sender, header, body)
} }
} }
@ -87,10 +91,12 @@ impl Connection {
/// Start listening on the provided connection and wraps it. Does not hang /// Start listening on the provided connection and wraps it. Does not hang
/// the current thread, instead just returns a future and the Connection /// the current thread, instead just returns a future and the Connection
/// itself. /// itself.
pub fn listen<F>(conn: TcpStream, pub fn listen<F>(
handler: F) conn: TcpStream,
-> (Connection, Box<Future<Item = (), Error = Error>>) handler: F,
where F: Handler + 'static ) -> (Connection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{ {
let (reader, writer) = conn.split(); let (reader, writer) = conn.split();
@ -105,7 +111,9 @@ impl Connection {
// same for closing the connection // same for closing the connection
let (close_tx, close_rx) = futures::sync::mpsc::channel(1); let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
let close_conn = close_rx.for_each(|_| Ok(())).map_err(|_| Error::ConnectionClose); let close_conn = close_rx.for_each(|_| Ok(())).map_err(
|_| Error::ConnectionClose,
);
let me = Connection { let me = Connection {
outbound_chan: tx.clone(), outbound_chan: tx.clone(),
@ -123,21 +131,25 @@ impl Connection {
let write_msg = me.write_msg(rx, writer).map(|_| ()); let write_msg = me.write_msg(rx, writer).map(|_| ());
// select between our different futures and return them // select between our different futures and return them
let fut = let fut = Box::new(
Box::new(close_conn.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e)) close_conn
.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e))
.map(|_| ()) .map(|_| ())
.map_err(|(e, _)| e)); .map_err(|(e, _)| e),
);
(me, fut) (me, fut)
} }
/// Prepares the future that gets message data produced by our system and /// Prepares the future that gets message data produced by our system and
/// sends it to the peer connection /// sends it to the peer connection
fn write_msg<W>(&self, fn write_msg<W>(
&self,
rx: UnboundedReceiver<Vec<u8>>, rx: UnboundedReceiver<Vec<u8>>,
writer: W) writer: W,
-> Box<Future<Item = W, Error = Error>> ) -> Box<Future<Item = W, Error = Error>>
where W: AsyncWrite + 'static where
W: AsyncWrite + 'static,
{ {
let sent_bytes = self.sent_bytes.clone(); let sent_bytes = self.sent_bytes.clone();
@ -158,13 +170,15 @@ impl Connection {
/// Prepares the future reading from the peer connection, parsing each /// Prepares the future reading from the peer connection, parsing each
/// message and forwarding them appropriately based on their type /// message and forwarding them appropriately based on their type
fn read_msg<F, R>(&self, fn read_msg<F, R>(
&self,
sender: UnboundedSender<Vec<u8>>, sender: UnboundedSender<Vec<u8>>,
reader: R, reader: R,
handler: F) handler: F,
-> Box<Future<Item = R, Error = Error>> ) -> Box<Future<Item = R, Error = Error>>
where F: Handler + 'static, where
R: AsyncRead + 'static F: Handler + 'static,
R: AsyncRead + 'static,
{ {
// infinite iterator stream so we repeat the message reading logic until the // infinite iterator stream so we repeat the message reading logic until the
@ -218,10 +232,15 @@ impl Connection {
let mut body_data = vec![]; let mut body_data = vec![];
try!(ser::serialize(&mut body_data, body)); try!(ser::serialize(&mut body_data, body));
let mut data = vec![]; let mut data = vec![];
try!(ser::serialize(&mut data, &MsgHeader::new(t, body_data.len() as u64))); try!(ser::serialize(
&mut data,
&MsgHeader::new(t, body_data.len() as u64),
));
data.append(&mut body_data); data.append(&mut body_data);
self.outbound_chan.send(data).map_err(|_| Error::ConnectionClose) self.outbound_chan.send(data).map_err(
|_| Error::ConnectionClose,
)
} }
/// Bytes sent and received by this peer to the remote peer. /// Bytes sent and received by this peer to the remote peer.
@ -242,10 +261,12 @@ pub struct TimeoutConnection {
impl TimeoutConnection { impl TimeoutConnection {
/// Same as Connection /// Same as Connection
pub fn listen<F>(conn: TcpStream, pub fn listen<F>(
handler: F) conn: TcpStream,
-> (TimeoutConnection, Box<Future<Item = (), Error = Error>>) handler: F,
where F: Handler + 'static ) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
{ {
let expects = Arc::new(Mutex::new(vec![])); let expects = Arc::new(Mutex::new(vec![]));
@ -258,7 +279,8 @@ impl TimeoutConnection {
let recv_h = try!(handler.handle(sender, header, data)); let recv_h = try!(handler.handle(sender, header, data));
let mut expects = exp.lock().unwrap(); let mut expects = exp.lock().unwrap();
let filtered = expects.iter() let filtered = expects
.iter()
.filter(|&&(typ, h, _): &&(Type, Option<Hash>, Instant)| { .filter(|&&(typ, h, _): &&(Type, Option<Hash>, Instant)| {
msg_type != typ || h.is_some() && recv_h != h msg_type != typ || h.is_some() && recv_h != h
}) })
@ -288,17 +310,21 @@ impl TimeoutConnection {
underlying: conn, underlying: conn,
expected_responses: expects, expected_responses: expects,
}; };
(me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1))) (
me,
Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)),
)
} }
/// Sends a request and registers a timer on the provided message type and /// Sends a request and registers a timer on the provided message type and
/// optionally the hash of the sent data. /// optionally the hash of the sent data.
pub fn send_request<W: ser::Writeable>(&self, pub fn send_request<W: ser::Writeable>(
&self,
t: Type, t: Type,
rt: Type, rt: Type,
body: &W, body: &W,
expect_h: Option<(Hash)>) expect_h: Option<(Hash)>,
-> Result<(), Error> { ) -> Result<(), Error> {
let _sent = try!(self.underlying.send_msg(t, body)); let _sent = try!(self.underlying.send_msg(t, body));
let mut expects = self.expected_responses.lock().unwrap(); let mut expects = self.expected_responses.lock().unwrap();

View file

@ -47,12 +47,13 @@ impl Handshake {
} }
/// Handles connecting to a new remote peer, starting the version handshake. /// Handles connecting to a new remote peer, starting the version handshake.
pub fn connect(&self, pub fn connect(
&self,
capab: Capabilities, capab: Capabilities,
total_difficulty: Difficulty, total_difficulty: Difficulty,
self_addr: SocketAddr, self_addr: SocketAddr,
conn: TcpStream) conn: TcpStream,
-> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> { ) -> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
// prepare the first part of the hanshake // prepare the first part of the hanshake
let nonce = self.next_nonce(); let nonce = self.next_nonce();
let hand = Hand { let hand = Hand {
@ -66,7 +67,8 @@ impl Handshake {
}; };
// write and read the handshake response // write and read the handshake response
Box::new(write_msg(conn, hand, Type::Hand) Box::new(
write_msg(conn, hand, Type::Hand)
.and_then(|conn| read_msg::<Shake>(conn)) .and_then(|conn| read_msg::<Shake>(conn))
.and_then(|(conn, shake)| { .and_then(|(conn, shake)| {
if shake.version != 1 { if shake.version != 1 {
@ -87,18 +89,21 @@ impl Handshake {
// when more than one protocol version is supported, choosing should go here // when more than one protocol version is supported, choosing should go here
Ok((conn, ProtocolV1::new(), peer_info)) Ok((conn, ProtocolV1::new(), peer_info))
} }
})) }),
)
} }
/// Handles receiving a connection from a new remote peer that started the /// Handles receiving a connection from a new remote peer that started the
/// version handshake. /// version handshake.
pub fn handshake(&self, pub fn handshake(
&self,
capab: Capabilities, capab: Capabilities,
total_difficulty: Difficulty, total_difficulty: Difficulty,
conn: TcpStream) conn: TcpStream,
-> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> { ) -> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
let nonces = self.nonces.clone(); let nonces = self.nonces.clone();
Box::new(read_msg::<Hand>(conn) Box::new(
read_msg::<Hand>(conn)
.and_then(move |(conn, hand)| { .and_then(move |(conn, hand)| {
if hand.version != 1 { if hand.version != 1 {
return Err(Error::Serialization(ser::Error::UnexpectedData { return Err(Error::Serialization(ser::Error::UnexpectedData {
@ -138,7 +143,8 @@ impl Handshake {
write_msg(conn, shake, Type::Shake) write_msg(conn, shake, Type::Shake)
// when more than one protocol version is supported, choosing should go here // when more than one protocol version is supported, choosing should go here
.map(|conn| (conn, ProtocolV1::new(), peer_info)) .map(|conn| (conn, ProtocolV1::new(), peer_info))
})) }),
)
} }
/// Generate a new random nonce and store it in our ring buffer /// Generate a new random nonce and store it in our ring buffer

View file

@ -70,7 +70,8 @@ enum_from_primitive! {
/// the header first, handles its validation and then reads the Readable body, /// the header first, handles its validation and then reads the Readable body,
/// allocating buffers of the right size. /// allocating buffers of the right size.
pub fn read_msg<T>(conn: TcpStream) -> Box<Future<Item = (TcpStream, T), Error = Error>> pub fn read_msg<T>(conn: TcpStream) -> Box<Future<Item = (TcpStream, T), Error = Error>>
where T: Readable + 'static where
T: Readable + 'static,
{ {
let read_header = read_exact(conn, vec![0u8; HEADER_LEN as usize]) let read_header = read_exact(conn, vec![0u8; HEADER_LEN as usize])
.from_err() .from_err()
@ -84,7 +85,8 @@ pub fn read_msg<T>(conn: TcpStream) -> Box<Future<Item = (TcpStream, T), Error =
Ok((reader, header)) Ok((reader, header))
}); });
let read_msg = read_header.and_then(|(reader, header)| { let read_msg = read_header
.and_then(|(reader, header)| {
read_exact(reader, vec![0u8; header.msg_len as usize]).from_err() read_exact(reader, vec![0u8; header.msg_len as usize]).from_err()
}) })
.and_then(|(reader, buf)| { .and_then(|(reader, buf)| {
@ -97,11 +99,13 @@ pub fn read_msg<T>(conn: TcpStream) -> Box<Future<Item = (TcpStream, T), Error =
/// Future combinator to write a full message from a Writeable payload. /// Future combinator to write a full message from a Writeable payload.
/// Serializes the payload first and then sends the message header and that /// Serializes the payload first and then sends the message header and that
/// payload. /// payload.
pub fn write_msg<T>(conn: TcpStream, pub fn write_msg<T>(
conn: TcpStream,
msg: T, msg: T,
msg_type: Type) msg_type: Type,
-> Box<Future<Item = TcpStream, Error = Error>> ) -> Box<Future<Item = TcpStream, Error = Error>>
where T: Writeable + 'static where
T: Writeable + 'static,
{ {
let write_msg = ok((conn)).and_then(move |conn| { let write_msg = ok((conn)).and_then(move |conn| {
// prepare the body first so we know its serialized length // prepare the body first so we know its serialized length
@ -149,11 +153,13 @@ impl MsgHeader {
impl Writeable for MsgHeader { impl Writeable for MsgHeader {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u8, self.magic[0]], [write_u8, self.magic[0]],
[write_u8, self.magic[1]], [write_u8, self.magic[1]],
[write_u8, self.msg_type as u8], [write_u8, self.msg_type as u8],
[write_u64, self.msg_len]); [write_u64, self.msg_len]
);
Ok(()) Ok(())
} }
} }
@ -199,10 +205,12 @@ pub struct Hand {
impl Writeable for Hand { impl Writeable for Hand {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u32, self.version], [write_u32, self.version],
[write_u32, self.capabilities.bits()], [write_u32, self.capabilities.bits()],
[write_u64, self.nonce]); [write_u64, self.nonce]
);
self.total_difficulty.write(writer).unwrap(); self.total_difficulty.write(writer).unwrap();
self.sender_addr.write(writer).unwrap(); self.sender_addr.write(writer).unwrap();
self.receiver_addr.write(writer).unwrap(); self.receiver_addr.write(writer).unwrap();
@ -218,7 +226,9 @@ impl Readable for Hand {
let receiver_addr = try!(SockAddr::read(reader)); let receiver_addr = try!(SockAddr::read(reader));
let ua = try!(reader.read_vec()); let ua = try!(reader.read_vec());
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)); let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)); let capabilities = try!(Capabilities::from_bits(capab).ok_or(
ser::Error::CorruptedData,
));
Ok(Hand { Ok(Hand {
version: version, version: version,
capabilities: capabilities, capabilities: capabilities,
@ -248,9 +258,11 @@ pub struct Shake {
impl Writeable for Shake { impl Writeable for Shake {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u32, self.version], [write_u32, self.version],
[write_u32, self.capabilities.bits()]); [write_u32, self.capabilities.bits()]
);
self.total_difficulty.write(writer).unwrap(); self.total_difficulty.write(writer).unwrap();
writer.write_bytes(&self.user_agent).unwrap(); writer.write_bytes(&self.user_agent).unwrap();
Ok(()) Ok(())
@ -263,7 +275,9 @@ impl Readable for Shake {
let total_diff = try!(Difficulty::read(reader)); let total_diff = try!(Difficulty::read(reader));
let ua = try!(reader.read_vec()); let ua = try!(reader.read_vec());
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)); let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)); let capabilities = try!(Capabilities::from_bits(capab).ok_or(
ser::Error::CorruptedData,
));
Ok(Shake { Ok(Shake {
version: version, version: version,
capabilities: capabilities, capabilities: capabilities,
@ -288,7 +302,9 @@ impl Writeable for GetPeerAddrs {
impl Readable for GetPeerAddrs { impl Readable for GetPeerAddrs {
fn read(reader: &mut Reader) -> Result<GetPeerAddrs, ser::Error> { fn read(reader: &mut Reader) -> Result<GetPeerAddrs, ser::Error> {
let capab = try!(reader.read_u32()); let capab = try!(reader.read_u32());
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)); let capabilities = try!(Capabilities::from_bits(capab).ok_or(
ser::Error::CorruptedData,
));
Ok(GetPeerAddrs { capabilities: capabilities }) Ok(GetPeerAddrs { capabilities: capabilities })
} }
} }
@ -345,7 +361,9 @@ impl Writeable for PeerError {
impl Readable for PeerError { impl Readable for PeerError {
fn read(reader: &mut Reader) -> Result<PeerError, ser::Error> { fn read(reader: &mut Reader) -> Result<PeerError, ser::Error> {
let (code, msg) = ser_multiread!(reader, read_u32, read_vec); let (code, msg) = ser_multiread!(reader, read_u32, read_vec);
let message = try!(String::from_utf8(msg).map_err(|_| ser::Error::CorruptedData)); let message = try!(String::from_utf8(msg).map_err(
|_| ser::Error::CorruptedData,
));
Ok(PeerError { Ok(PeerError {
code: code, code: code,
message: message, message: message,
@ -362,10 +380,12 @@ impl Writeable for SockAddr {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
match self.0 { match self.0 {
SocketAddr::V4(sav4) => { SocketAddr::V4(sav4) => {
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u8, 0], [write_u8, 0],
[write_fixed_bytes, &sav4.ip().octets().to_vec()], [write_fixed_bytes, &sav4.ip().octets().to_vec()],
[write_u16, sav4.port()]); [write_u16, sav4.port()]
);
} }
SocketAddr::V6(sav6) => { SocketAddr::V6(sav6) => {
try!(writer.write_u8(1)); try!(writer.write_u8(1));
@ -385,25 +405,28 @@ impl Readable for SockAddr {
if v4_or_v6 == 0 { if v4_or_v6 == 0 {
let ip = try!(reader.read_fixed_bytes(4)); let ip = try!(reader.read_fixed_bytes(4));
let port = try!(reader.read_u16()); let port = try!(reader.read_u16());
Ok(SockAddr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(ip[0], Ok(SockAddr(SocketAddr::V4(SocketAddrV4::new(
ip[1], Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]),
ip[2], port,
ip[3]), ))))
port))))
} else { } else {
let ip = try_map_vec!([0..8], |_| reader.read_u16()); let ip = try_map_vec!([0..8], |_| reader.read_u16());
let port = try!(reader.read_u16()); let port = try!(reader.read_u16());
Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(ip[0], Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new(
Ipv6Addr::new(
ip[0],
ip[1], ip[1],
ip[2], ip[2],
ip[3], ip[3],
ip[4], ip[4],
ip[5], ip[5],
ip[6], ip[6],
ip[7]), ip[7],
),
port, port,
0, 0,
0)))) 0,
))))
} }
} }
} }

View file

@ -42,48 +42,58 @@ unsafe impl Send for Peer {}
impl Peer { impl Peer {
/// Initiates the handshake with another peer. /// Initiates the handshake with another peer.
pub fn connect(conn: TcpStream, pub fn connect(
conn: TcpStream,
capab: Capabilities, capab: Capabilities,
total_difficulty: Difficulty, total_difficulty: Difficulty,
self_addr: SocketAddr, self_addr: SocketAddr,
hs: &Handshake) hs: &Handshake,
-> Box<Future<Item = (TcpStream, Peer), Error = Error>> { ) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
let connect_peer = hs.connect(capab, total_difficulty, self_addr, conn) let connect_peer = hs.connect(capab, total_difficulty, self_addr, conn)
.and_then(|(conn, proto, info)| { .and_then(|(conn, proto, info)| {
Ok((conn, Ok((
conn,
Peer { Peer {
info: info, info: info,
proto: Box::new(proto), proto: Box::new(proto),
state: Arc::new(RwLock::new(State::Connected)), state: Arc::new(RwLock::new(State::Connected)),
})) },
))
}); });
Box::new(connect_peer) Box::new(connect_peer)
} }
/// Accept a handshake initiated by another peer. /// Accept a handshake initiated by another peer.
pub fn accept(conn: TcpStream, pub fn accept(
conn: TcpStream,
capab: Capabilities, capab: Capabilities,
total_difficulty: Difficulty, total_difficulty: Difficulty,
hs: &Handshake) hs: &Handshake,
-> Box<Future<Item = (TcpStream, Peer), Error = Error>> { ) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
let hs_peer = hs.handshake(capab, total_difficulty, conn) let hs_peer = hs.handshake(capab, total_difficulty, conn).and_then(
.and_then(|(conn, proto, info)| { |(conn,
Ok((conn, proto,
info)| {
Ok((
conn,
Peer { Peer {
info: info, info: info,
proto: Box::new(proto), proto: Box::new(proto),
state: Arc::new(RwLock::new(State::Connected)), state: Arc::new(RwLock::new(State::Connected)),
})) },
}); ))
},
);
Box::new(hs_peer) Box::new(hs_peer)
} }
/// Main peer loop listening for messages and forwarding to the rest of the /// Main peer loop listening for messages and forwarding to the rest of the
/// system. /// system.
pub fn run(&self, pub fn run(
&self,
conn: TcpStream, conn: TcpStream,
na: Arc<NetAdapter>) na: Arc<NetAdapter>,
-> Box<Future<Item = (), Error = Error>> { ) -> Box<Future<Item = (), Error = Error>> {
let addr = self.info.addr; let addr = self.info.addr;
let state = self.state.clone(); let state = self.state.clone();

View file

@ -44,10 +44,11 @@ impl ProtocolV1 {
impl Protocol for ProtocolV1 { impl Protocol for ProtocolV1 {
/// Sets up the protocol reading, writing and closing logic. /// Sets up the protocol reading, writing and closing logic.
fn handle(&self, fn handle(
&self,
conn: TcpStream, conn: TcpStream,
adapter: Arc<NetAdapter>) adapter: Arc<NetAdapter>,
-> Box<Future<Item = (), Error = Error>> { ) -> Box<Future<Item = (), Error = Error>> {
let (conn, listener) = TimeoutConnection::listen(conn, move |sender, header, data| { let (conn, listener) = TimeoutConnection::listen(conn, move |sender, header, data| {
let adapt = adapter.as_ref(); let adapt = adapter.as_ref();
@ -81,10 +82,12 @@ impl Protocol for ProtocolV1 {
} }
fn send_header_request(&self, locator: Vec<Hash>) -> Result<(), Error> { fn send_header_request(&self, locator: Vec<Hash>) -> Result<(), Error> {
self.send_request(Type::GetHeaders, self.send_request(
Type::GetHeaders,
Type::Headers, Type::Headers,
&Locator { hashes: locator }, &Locator { hashes: locator },
None) None,
)
} }
fn send_block_request(&self, h: Hash) -> Result<(), Error> { fn send_block_request(&self, h: Hash) -> Result<(), Error> {
@ -92,10 +95,12 @@ impl Protocol for ProtocolV1 {
} }
fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> { fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
self.send_request(Type::GetPeerAddrs, self.send_request(
Type::GetPeerAddrs,
Type::PeerAddrs, Type::PeerAddrs,
&GetPeerAddrs { capabilities: capab }, &GetPeerAddrs { capabilities: capab },
None) None,
)
} }
/// Close the connection to the remote peer /// Close the connection to the remote peer
@ -109,21 +114,23 @@ impl ProtocolV1 {
self.conn.borrow().send_msg(t, body) self.conn.borrow().send_msg(t, body)
} }
fn send_request<W: ser::Writeable>(&self, fn send_request<W: ser::Writeable>(
&self,
t: Type, t: Type,
rt: Type, rt: Type,
body: &W, body: &W,
expect_resp: Option<Hash>) expect_resp: Option<Hash>,
-> Result<(), Error> { ) -> Result<(), Error> {
self.conn.borrow().send_request(t, rt, body, expect_resp) self.conn.borrow().send_request(t, rt, body, expect_resp)
} }
} }
fn handle_payload(adapter: &NetAdapter, fn handle_payload(
adapter: &NetAdapter,
sender: UnboundedSender<Vec<u8>>, sender: UnboundedSender<Vec<u8>>,
header: MsgHeader, header: MsgHeader,
buf: Vec<u8>) buf: Vec<u8>,
-> Result<Option<Hash>, ser::Error> { ) -> Result<Option<Hash>, ser::Error> {
match header.msg_type { match header.msg_type {
Type::Ping => { Type::Ping => {
let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?; let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?;
@ -144,8 +151,10 @@ fn handle_payload(adapter: &NetAdapter,
let mut body_data = vec![]; let mut body_data = vec![];
try!(ser::serialize(&mut body_data, &b)); try!(ser::serialize(&mut body_data, &b));
let mut data = vec![]; let mut data = vec![];
try!(ser::serialize(&mut data, try!(ser::serialize(
&MsgHeader::new(Type::Block, body_data.len() as u64))); &mut data,
&MsgHeader::new(Type::Block, body_data.len() as u64),
));
data.append(&mut body_data); data.append(&mut body_data);
sender.send(data).unwrap(); sender.send(data).unwrap();
} }
@ -164,10 +173,15 @@ fn handle_payload(adapter: &NetAdapter,
// serialize and send all the headers over // serialize and send all the headers over
let mut body_data = vec![]; let mut body_data = vec![];
try!(ser::serialize(&mut body_data, &Headers { headers: headers })); try!(ser::serialize(
&mut body_data,
&Headers { headers: headers },
));
let mut data = vec![]; let mut data = vec![];
try!(ser::serialize(&mut data, try!(ser::serialize(
&MsgHeader::new(Type::Headers, body_data.len() as u64))); &mut data,
&MsgHeader::new(Type::Headers, body_data.len() as u64),
));
data.append(&mut body_data); data.append(&mut body_data);
sender.send(data).unwrap(); sender.send(data).unwrap();
@ -184,13 +198,17 @@ fn handle_payload(adapter: &NetAdapter,
// serialize and send all the headers over // serialize and send all the headers over
let mut body_data = vec![]; let mut body_data = vec![];
try!(ser::serialize(&mut body_data, try!(ser::serialize(
&mut body_data,
&PeerAddrs { &PeerAddrs {
peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(), peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(),
})); },
));
let mut data = vec![]; let mut data = vec![];
try!(ser::serialize(&mut data, try!(ser::serialize(
&MsgHeader::new(Type::PeerAddrs, body_data.len() as u64))); &mut data,
&MsgHeader::new(Type::PeerAddrs, body_data.len() as u64),
));
data.append(&mut body_data); data.append(&mut body_data);
sender.send(data).unwrap(); sender.send(data).unwrap();

View file

@ -77,11 +77,18 @@ impl<R: AsyncRead> io::Read for ThrottledReader<R> {
// Check if Allowed // Check if Allowed
if self.allowed < 1 { if self.allowed < 1 {
return Err(io::Error::new(io::ErrorKind::WouldBlock, "Reached Allowed Read Limit")) return Err(io::Error::new(
io::ErrorKind::WouldBlock,
"Reached Allowed Read Limit",
));
} }
// Read Max Allowed // Read Max Allowed
let buf = if buf.len() > self.allowed { &mut buf[0..self.allowed]} else { buf }; let buf = if buf.len() > self.allowed {
&mut buf[0..self.allowed]
} else {
buf
};
let res = self.reader.read(buf); let res = self.reader.read(buf);
// Decrement Allowed amount written // Decrement Allowed amount written
@ -92,7 +99,7 @@ impl<R: AsyncRead> io::Read for ThrottledReader<R> {
} }
} }
impl<R: AsyncRead> AsyncRead for ThrottledReader<R> { } impl<R: AsyncRead> AsyncRead for ThrottledReader<R> {}
/// A Rate Limited Writer /// A Rate Limited Writer
#[derive(Debug)] #[derive(Debug)]
@ -151,11 +158,18 @@ impl<W: AsyncWrite> io::Write for ThrottledWriter<W> {
// Check if Allowed // Check if Allowed
if self.allowed < 1 { if self.allowed < 1 {
return Err(io::Error::new(io::ErrorKind::WouldBlock, "Reached Allowed Write Limit")) return Err(io::Error::new(
io::ErrorKind::WouldBlock,
"Reached Allowed Write Limit",
));
} }
// Write max allowed // Write max allowed
let buf = if buf.len() > self.allowed { &buf[0..self.allowed]} else { buf }; let buf = if buf.len() > self.allowed {
&buf[0..self.allowed]
} else {
buf
};
let res = self.writer.write(buf); let res = self.writer.write(buf);
// Decrement Allowed amount written // Decrement Allowed amount written

View file

@ -132,17 +132,22 @@ impl Server {
let mut stop_mut = self.stop.borrow_mut(); let mut stop_mut = self.stop.borrow_mut();
*stop_mut = Some(stop); *stop_mut = Some(stop);
} }
Box::new(server.select(stop_rx.map_err(|_| Error::ConnectionClose)).then(|res| match res { Box::new(
server
.select(stop_rx.map_err(|_| Error::ConnectionClose))
.then(|res| match res {
Ok((_, _)) => Ok(()), Ok((_, _)) => Ok(()),
Err((e, _)) => Err(e), Err((e, _)) => Err(e),
})) }),
)
} }
/// Asks the server to connect to a new peer. /// Asks the server to connect to a new peer.
pub fn connect_peer(&self, pub fn connect_peer(
&self,
addr: SocketAddr, addr: SocketAddr,
h: reactor::Handle) h: reactor::Handle,
-> Box<Future<Item = Option<Arc<Peer>>, Error = Error>> { ) -> Box<Future<Item = Option<Arc<Peer>>, Error = Error>> {
if let Some(p) = self.get_peer(addr) { if let Some(p) = self.get_peer(addr) {
// if we're already connected to the addr, just return the peer // if we're already connected to the addr, just return the peer
return Box::new(future::ok(Some(p))); return Box::new(future::ok(Some(p)));
@ -163,7 +168,8 @@ impl Server {
let socket = TcpStream::connect(&addr, &h).map_err(|e| Error::Connection(e)); let socket = TcpStream::connect(&addr, &h).map_err(|e| Error::Connection(e));
let h2 = h.clone(); let h2 = h.clone();
let request = socket.and_then(move |socket| { let request = socket
.and_then(move |socket| {
let peers = peers.clone(); let peers = peers.clone();
let total_diff = adapter1.clone().total_difficulty(); let total_diff = adapter1.clone().total_difficulty();
@ -280,11 +286,13 @@ impl Server {
} }
// Adds the peer built by the provided future in the peers map // Adds the peer built by the provided future in the peers map
fn add_to_peers<A>(peers: Arc<RwLock<Vec<Arc<Peer>>>>, fn add_to_peers<A>(
peers: Arc<RwLock<Vec<Arc<Peer>>>>,
adapter: Arc<NetAdapter>, adapter: Arc<NetAdapter>,
peer_fut: A) peer_fut: A,
-> Box<Future<Item = Result<(TcpStream, Arc<Peer>), ()>, Error = Error>> ) -> Box<Future<Item = Result<(TcpStream, Arc<Peer>), ()>, Error = Error>>
where A: IntoFuture<Item = (TcpStream, Peer), Error = Error> + 'static where
A: IntoFuture<Item = (TcpStream, Peer), Error = Error> + 'static,
{ {
let peer_add = peer_fut.into_future().map(move |(conn, peer)| { let peer_add = peer_fut.into_future().map(move |(conn, peer)| {
adapter.peer_connected(&peer.info); adapter.peer_connected(&peer.info);
@ -297,15 +305,17 @@ fn add_to_peers<A>(peers: Arc<RwLock<Vec<Arc<Peer>>>>,
} }
// Adds a timeout to a future // Adds a timeout to a future
fn with_timeout<T: 'static>(fut: Box<Future<Item = Result<T, ()>, Error = Error>>, fn with_timeout<T: 'static>(
h: &reactor::Handle) fut: Box<Future<Item = Result<T, ()>, Error = Error>>,
-> Box<Future<Item = T, Error = Error>> { h: &reactor::Handle,
) -> Box<Future<Item = T, Error = Error>> {
let timeout = reactor::Timeout::new(Duration::new(5, 0), h).unwrap(); let timeout = reactor::Timeout::new(Duration::new(5, 0), h).unwrap();
let timed = fut.select(timeout.map(Err).from_err()) let timed = fut.select(timeout.map(Err).from_err()).then(
.then(|res| match res { |res| match res {
Ok((Ok(inner), _timeout)) => Ok(inner), Ok((Ok(inner), _timeout)) => Ok(inner),
Ok((_, _accept)) => Err(Error::Timeout), Ok((_, _accept)) => Err(Error::Timeout),
Err((e, _other)) => Err(e), Err((e, _other)) => Err(e),
}); },
);
Box::new(timed) Box::new(timed)
} }

View file

@ -53,10 +53,12 @@ pub struct PeerData {
impl Writeable for PeerData { impl Writeable for PeerData {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
SockAddr(self.addr).write(writer)?; SockAddr(self.addr).write(writer)?;
ser_multiwrite!(writer, ser_multiwrite!(
writer,
[write_u32, self.capabilities.bits()], [write_u32, self.capabilities.bits()],
[write_bytes, &self.user_agent], [write_bytes, &self.user_agent],
[write_u8, self.flags as u8]); [write_u8, self.flags as u8]
);
Ok(()) Ok(())
} }
} }
@ -66,7 +68,9 @@ impl Readable for PeerData {
let addr = SockAddr::read(reader)?; let addr = SockAddr::read(reader)?;
let (capab, ua, fl) = ser_multiread!(reader, read_u32, read_vec, read_u8); let (capab, ua, fl) = ser_multiread!(reader, read_u32, read_vec, read_u8);
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?; let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
let capabilities = Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)?; let capabilities = Capabilities::from_bits(capab).ok_or(
ser::Error::CorruptedData,
)?;
match State::from_u8(fl) { match State::from_u8(fl) {
Some(flags) => { Some(flags) => {
Ok(PeerData { Ok(PeerData {
@ -94,8 +98,10 @@ impl PeerStore {
} }
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> { pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
self.db.put_ser(&to_key(PEER_PREFIX, &mut format!("{}", p.addr).into_bytes())[..], self.db.put_ser(
p) &to_key(PEER_PREFIX, &mut format!("{}", p.addr).into_bytes())[..],
p,
)
} }
fn get_peer(&self, peer_addr: SocketAddr) -> Result<PeerData, Error> { fn get_peer(&self, peer_addr: SocketAddr) -> Result<PeerData, Error> {
@ -103,16 +109,22 @@ impl PeerStore {
} }
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> { pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
self.db.exists(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..]) self.db.exists(
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..],
)
} }
pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> { pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> {
self.db.delete(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..]) self.db.delete(
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..],
)
} }
pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> { pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> {
let peers_iter = self.db let peers_iter = self.db.iter::<PeerData>(&to_key(
.iter::<PeerData>(&to_key(PEER_PREFIX, &mut "".to_string().into_bytes())); PEER_PREFIX,
&mut "".to_string().into_bytes(),
));
let mut peers = Vec::with_capacity(count); let mut peers = Vec::with_capacity(count);
for p in peers_iter { for p in peers_iter {
if p.flags == state && p.capabilities.contains(cap) { if p.flags == state && p.capabilities.contains(cap) {

View file

@ -117,9 +117,7 @@ pub trait Protocol {
/// be known already, usually passed during construction. Will typically /// be known already, usually passed during construction. Will typically
/// block so needs to be called withing a coroutine. Should also be called /// block so needs to be called withing a coroutine. Should also be called
/// only once. /// only once.
fn handle(&self, fn handle(&self, conn: TcpStream, na: Arc<NetAdapter>)
conn: TcpStream,
na: Arc<NetAdapter>)
-> Box<Future<Item = (), Error = Error>>; -> Box<Future<Item = (), Error = Error>>;
/// Sends a ping message to the remote peer. /// Sends a ping message to the remote peer.

View file

@ -47,17 +47,23 @@ fn peer_handshake() {
let rhandle = handle.clone(); let rhandle = handle.clone();
let timeout = reactor::Timeout::new(time::Duration::new(1, 0), &handle).unwrap(); let timeout = reactor::Timeout::new(time::Duration::new(1, 0), &handle).unwrap();
let timeout_send = reactor::Timeout::new(time::Duration::new(2, 0), &handle).unwrap(); let timeout_send = reactor::Timeout::new(time::Duration::new(2, 0), &handle).unwrap();
handle.spawn(timeout.from_err() handle.spawn(
timeout
.from_err()
.and_then(move |_| { .and_then(move |_| {
let p2p_conf = p2p::P2PConfig::default(); let p2p_conf = p2p::P2PConfig::default();
let addr = SocketAddr::new(p2p_conf.host, p2p_conf.port); let addr = SocketAddr::new(p2p_conf.host, p2p_conf.port);
let socket = TcpStream::connect(&addr, &phandle).map_err(|e| p2p::Error::Connection(e)); let socket =
socket.and_then(move |socket| { TcpStream::connect(&addr, &phandle).map_err(|e| p2p::Error::Connection(e));
Peer::connect(socket, socket
.and_then(move |socket| {
Peer::connect(
socket,
p2p::UNKNOWN, p2p::UNKNOWN,
Difficulty::one(), Difficulty::one(),
my_addr, my_addr,
&p2p::handshake::Handshake::new()) &p2p::handshake::Handshake::new(),
)
}) })
.and_then(move |(socket, peer)| { .and_then(move |(socket, peer)| {
rhandle.spawn(peer.run(socket, net_adapter.clone()).map_err(|e| { rhandle.spawn(peer.run(socket, net_adapter.clone()).map_err(|e| {
@ -80,7 +86,8 @@ fn peer_handshake() {
}) })
.map_err(|e| { .map_err(|e| {
panic!("Client connection failed: {:?}", e); panic!("Client connection failed: {:?}", e);
})); }),
);
evtlp.run(run_server).unwrap(); evtlp.run(run_server).unwrap();

View file

@ -22,7 +22,7 @@ use types::{BlockChain, PoolError};
#[derive(Debug)] #[derive(Debug)]
pub struct DummyBlockHeaderIndex { pub struct DummyBlockHeaderIndex {
block_headers: HashMap<Commitment, block::BlockHeader> block_headers: HashMap<Commitment, block::BlockHeader>,
} }
impl DummyBlockHeaderIndex { impl DummyBlockHeaderIndex {
@ -30,23 +30,26 @@ impl DummyBlockHeaderIndex {
self.block_headers.insert(commit, block_header); self.block_headers.insert(commit, block_header);
} }
pub fn get_block_header_by_output_commit(&self, commit: Commitment) -> Result<&block::BlockHeader, PoolError> { pub fn get_block_header_by_output_commit(
&self,
commit: Commitment,
) -> Result<&block::BlockHeader, PoolError> {
match self.block_headers.get(&commit) { match self.block_headers.get(&commit) {
Some(h) => Ok(h), Some(h) => Ok(h),
None => Err(PoolError::GenericPoolError) None => Err(PoolError::GenericPoolError),
} }
} }
} }
/// A DummyUtxoSet for mocking up the chain /// A DummyUtxoSet for mocking up the chain
pub struct DummyUtxoSet { pub struct DummyUtxoSet {
outputs : HashMap<Commitment, transaction::Output> outputs: HashMap<Commitment, transaction::Output>,
} }
#[allow(dead_code)] #[allow(dead_code)]
impl DummyUtxoSet { impl DummyUtxoSet {
pub fn empty() -> DummyUtxoSet{ pub fn empty() -> DummyUtxoSet {
DummyUtxoSet{outputs: HashMap::new()} DummyUtxoSet { outputs: HashMap::new() }
} }
pub fn root(&self) -> hash::Hash { pub fn root(&self) -> hash::Hash {
hash::ZERO_HASH hash::ZERO_HASH
@ -59,7 +62,7 @@ impl DummyUtxoSet {
for output in &b.outputs { for output in &b.outputs {
new_hashmap.insert(output.commitment(), output.clone()); new_hashmap.insert(output.commitment(), output.clone());
} }
DummyUtxoSet{outputs: new_hashmap} DummyUtxoSet { outputs: new_hashmap }
} }
pub fn with_block(&mut self, b: &block::Block) { pub fn with_block(&mut self, b: &block::Block) {
for input in &b.inputs { for input in &b.inputs {
@ -70,14 +73,14 @@ impl DummyUtxoSet {
} }
} }
pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet { pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet {
DummyUtxoSet{outputs: HashMap::new()} DummyUtxoSet { outputs: HashMap::new() }
} }
pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> { pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> {
self.outputs.get(output_ref) self.outputs.get(output_ref)
} }
fn clone(&self) -> DummyUtxoSet { fn clone(&self) -> DummyUtxoSet {
DummyUtxoSet{outputs: self.outputs.clone()} DummyUtxoSet { outputs: self.outputs.clone() }
} }
// only for testing: add an output to the map // only for testing: add an output to the map
@ -88,7 +91,7 @@ impl DummyUtxoSet {
pub fn with_output(&self, output: transaction::Output) -> DummyUtxoSet { pub fn with_output(&self, output: transaction::Output) -> DummyUtxoSet {
let mut new_map = self.outputs.clone(); let mut new_map = self.outputs.clone();
new_map.insert(output.commitment(), output); new_map.insert(output.commitment(), output);
DummyUtxoSet{outputs: new_map} DummyUtxoSet { outputs: new_map }
} }
} }
@ -104,9 +107,9 @@ pub struct DummyChainImpl {
#[allow(dead_code)] #[allow(dead_code)]
impl DummyChainImpl { impl DummyChainImpl {
pub fn new() -> DummyChainImpl { pub fn new() -> DummyChainImpl {
DummyChainImpl{ DummyChainImpl {
utxo: RwLock::new(DummyUtxoSet{outputs: HashMap::new()}), utxo: RwLock::new(DummyUtxoSet { outputs: HashMap::new() }),
block_headers: RwLock::new(DummyBlockHeaderIndex{block_headers: HashMap::new()}), block_headers: RwLock::new(DummyBlockHeaderIndex { block_headers: HashMap::new() }),
head_header: RwLock::new(vec![]), head_header: RwLock::new(vec![]),
} }
} }
@ -121,8 +124,14 @@ impl BlockChain for DummyChainImpl {
} }
} }
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<block::BlockHeader, PoolError> { fn get_block_header_by_output_commit(
match self.block_headers.read().unwrap().get_block_header_by_output_commit(*commit) { &self,
commit: &Commitment,
) -> Result<block::BlockHeader, PoolError> {
match self.block_headers
.read()
.unwrap()
.get_block_header_by_output_commit(*commit) {
Ok(h) => Ok(h.clone()), Ok(h) => Ok(h.clone()),
Err(e) => Err(e), Err(e) => Err(e),
} }
@ -145,8 +154,15 @@ impl DummyChain for DummyChainImpl {
fn apply_block(&self, b: &block::Block) { fn apply_block(&self, b: &block::Block) {
self.utxo.write().unwrap().with_block(b); self.utxo.write().unwrap().with_block(b);
} }
fn store_header_by_output_commitment(&self, commitment: Commitment, block_header: &block::BlockHeader) { fn store_header_by_output_commitment(
self.block_headers.write().unwrap().insert(commitment, block_header.clone()); &self,
commitment: Commitment,
block_header: &block::BlockHeader,
) {
self.block_headers.write().unwrap().insert(
commitment,
block_header.clone(),
);
} }
fn store_head_header(&self, block_header: &block::BlockHeader) { fn store_head_header(&self, block_header: &block::BlockHeader) {
let mut h = self.head_header.write().unwrap(); let mut h = self.head_header.write().unwrap();
@ -158,6 +174,10 @@ impl DummyChain for DummyChainImpl {
pub trait DummyChain: BlockChain { pub trait DummyChain: BlockChain {
fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet); fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet);
fn apply_block(&self, b: &block::Block); fn apply_block(&self, b: &block::Block);
fn store_header_by_output_commitment(&self, commitment: Commitment, block_header: &block::BlockHeader); fn store_header_by_output_commitment(
&self,
commitment: Commitment,
block_header: &block::BlockHeader,
);
fn store_head_header(&self, block_header: &block::BlockHeader); fn store_head_header(&self, block_header: &block::BlockHeader);
} }

View file

@ -44,10 +44,11 @@ pub struct PoolEntry {
impl PoolEntry { impl PoolEntry {
/// Create new transaction pool entry /// Create new transaction pool entry
pub fn new(tx: &core::transaction::Transaction) -> PoolEntry { pub fn new(tx: &core::transaction::Transaction) -> PoolEntry {
PoolEntry{ PoolEntry {
transaction_hash: transaction_identifier(tx), transaction_hash: transaction_identifier(tx),
size_estimate : estimate_transaction_size(tx), size_estimate: estimate_transaction_size(tx),
receive_ts: time::now_utc()} receive_ts: time::now_utc(),
}
} }
} }
@ -70,20 +71,36 @@ pub struct Edge {
output: Commitment, output: Commitment,
} }
impl Edge{ impl Edge {
/// Create new edge /// Create new edge
pub fn new(source: Option<core::hash::Hash>, destination: Option<core::hash::Hash>, output: Commitment) -> Edge { pub fn new(
Edge{source: source, destination: destination, output: output} source: Option<core::hash::Hash>,
destination: Option<core::hash::Hash>,
output: Commitment,
) -> Edge {
Edge {
source: source,
destination: destination,
output: output,
}
} }
/// Create new edge with a source /// Create new edge with a source
pub fn with_source(&self, src: Option<core::hash::Hash>) -> Edge { pub fn with_source(&self, src: Option<core::hash::Hash>) -> Edge {
Edge{source: src, destination: self.destination, output: self.output} Edge {
source: src,
destination: self.destination,
output: self.output,
}
} }
/// Create new edge with destination /// Create new edge with destination
pub fn with_destination(&self, dst: Option<core::hash::Hash>) -> Edge { pub fn with_destination(&self, dst: Option<core::hash::Hash>) -> Edge {
Edge{source: self.source, destination: dst, output: self.output} Edge {
source: self.source,
destination: dst,
output: self.output,
}
} }
/// The output commitment of the edge /// The output commitment of the edge
@ -104,8 +121,13 @@ impl Edge{
impl fmt::Debug for Edge { impl fmt::Debug for Edge {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}", write!(
self.source, self.destination, self.output) f,
"Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}",
self.source,
self.destination,
self.output
)
} }
} }
@ -124,7 +146,7 @@ pub struct DirectedGraph {
impl DirectedGraph { impl DirectedGraph {
/// Create an empty directed graph /// Create an empty directed graph
pub fn empty() -> DirectedGraph { pub fn empty() -> DirectedGraph {
DirectedGraph{ DirectedGraph {
edges: HashMap::new(), edges: HashMap::new(),
vertices: Vec::new(), vertices: Vec::new(),
roots: Vec::new(), roots: Vec::new(),
@ -143,10 +165,14 @@ impl DirectedGraph {
/// Remove a vertex by its hash /// Remove a vertex by its hash
pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> { pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> {
match self.roots.iter().position(|x| x.transaction_hash == tx_hash) { match self.roots.iter().position(
|x| x.transaction_hash == tx_hash,
) {
Some(i) => Some(self.roots.swap_remove(i)), Some(i) => Some(self.roots.swap_remove(i)),
None => { None => {
match self.vertices.iter().position(|x| x.transaction_hash == tx_hash) { match self.vertices.iter().position(
|x| x.transaction_hash == tx_hash,
) {
Some(i) => Some(self.vertices.swap_remove(i)), Some(i) => Some(self.vertices.swap_remove(i)),
None => None, None => None,
} }
@ -230,19 +256,26 @@ mod tests {
let ec = Secp256k1::with_caps(ContextFlag::Commit); let ec = Secp256k1::with_caps(ContextFlag::Commit);
let output_commit = ec.commit_value(70).unwrap(); let output_commit = ec.commit_value(70).unwrap();
let inputs = vec![core::transaction::Input(ec.commit_value(50).unwrap()), let inputs = vec![
core::transaction::Input(ec.commit_value(25).unwrap())]; core::transaction::Input(ec.commit_value(50).unwrap()),
let outputs = vec![core::transaction::Output{ core::transaction::Input(ec.commit_value(25).unwrap()),
];
let outputs = vec![
core::transaction::Output {
features: core::transaction::DEFAULT_OUTPUT, features: core::transaction::DEFAULT_OUTPUT,
commit: output_commit, commit: output_commit,
proof: ec.range_proof(0, 100, key::ZERO_KEY, output_commit, ec.nonce())}]; proof: ec.range_proof(0, 100, key::ZERO_KEY, output_commit, ec.nonce()),
let test_transaction = core::transaction::Transaction::new(inputs, },
outputs, 5); ];
let test_transaction = core::transaction::Transaction::new(inputs, outputs, 5);
let test_pool_entry = PoolEntry::new(&test_transaction); let test_pool_entry = PoolEntry::new(&test_transaction);
let incoming_edge_1 = Edge::new(Some(random_hash()), let incoming_edge_1 = Edge::new(
Some(core::hash::ZERO_HASH), output_commit); Some(random_hash()),
Some(core::hash::ZERO_HASH),
output_commit,
);
let mut test_graph = DirectedGraph::empty(); let mut test_graph = DirectedGraph::empty();
@ -259,6 +292,6 @@ mod tests {
/// For testing/debugging: a random tx hash /// For testing/debugging: a random tx hash
pub fn random_hash() -> core::hash::Hash { pub fn random_hash() -> core::hash::Hash {
let hash_bytes: [u8;32]= rand::random(); let hash_bytes: [u8; 32] = rand::random();
core::hash::Hash(hash_bytes) core::hash::Hash(hash_bytes)
} }

View file

@ -35,7 +35,7 @@ pub struct TransactionPool<T> {
/// All transactions in the pool /// All transactions in the pool
pub transactions: HashMap<hash::Hash, Box<transaction::Transaction>>, pub transactions: HashMap<hash::Hash, Box<transaction::Transaction>>,
/// The pool itself /// The pool itself
pub pool : Pool, pub pool: Pool,
/// Orphans in the pool /// Orphans in the pool
pub orphans: Orphans, pub orphans: Orphans,
@ -44,10 +44,13 @@ pub struct TransactionPool<T> {
blockchain: Arc<T>, blockchain: Arc<T>,
} }
impl<T> TransactionPool<T> where T: BlockChain { impl<T> TransactionPool<T>
where
T: BlockChain,
{
/// Create a new transaction pool /// Create a new transaction pool
pub fn new(chain: Arc<T>) -> TransactionPool<T> { pub fn new(chain: Arc<T>) -> TransactionPool<T> {
TransactionPool{ TransactionPool {
transactions: HashMap::new(), transactions: HashMap::new(),
pool: Pool::empty(), pool: Pool::empty(),
orphans: Orphans::empty(), orphans: Orphans::empty(),
@ -66,31 +69,35 @@ impl<T> TransactionPool<T> where T: BlockChain {
// The current best unspent set is: // The current best unspent set is:
// Pool unspent + (blockchain unspent - pool->blockchain spent) // Pool unspent + (blockchain unspent - pool->blockchain spent)
// Pool unspents are unconditional so we check those first // Pool unspents are unconditional so we check those first
self.pool.get_available_output(output_commitment). self.pool
map(|x| Parent::PoolTransaction{tx_ref: x.source_hash().unwrap()}). .get_available_output(output_commitment)
or(self.search_blockchain_unspents(output_commitment)). .map(|x| {
or(self.search_pool_spents(output_commitment)). Parent::PoolTransaction { tx_ref: x.source_hash().unwrap() }
unwrap_or(Parent::Unknown) })
.or(self.search_blockchain_unspents(output_commitment))
.or(self.search_pool_spents(output_commitment))
.unwrap_or(Parent::Unknown)
} }
// search_blockchain_unspents searches the current view of the blockchain // search_blockchain_unspents searches the current view of the blockchain
// unspent set, represented by blockchain unspents - pool spents, for an // unspent set, represented by blockchain unspents - pool spents, for an
// output designated by output_commitment. // output designated by output_commitment.
fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option<Parent> { fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option<Parent> {
self.blockchain.get_unspent(output_commitment). self.blockchain.get_unspent(output_commitment).ok().map(
ok(). |output| match self.pool.get_blockchain_spent(output_commitment) {
map(|output| match self.pool.get_blockchain_spent(output_commitment) { Some(x) => Parent::AlreadySpent { other_tx: x.destination_hash().unwrap() },
Some(x) => Parent::AlreadySpent{other_tx: x.destination_hash().unwrap()}, None => Parent::BlockTransaction { output },
None => Parent::BlockTransaction{output}, },
}) )
} }
// search_pool_spents is the second half of pool input detection, after the // search_pool_spents is the second half of pool input detection, after the
// available_outputs have been checked. This returns either a // available_outputs have been checked. This returns either a
// Parent::AlreadySpent or None. // Parent::AlreadySpent or None.
fn search_pool_spents(&self, output_commitment: &Commitment) -> Option<Parent> { fn search_pool_spents(&self, output_commitment: &Commitment) -> Option<Parent> {
self.pool.get_internal_spent(output_commitment). self.pool.get_internal_spent(output_commitment).map(|x| {
map(|x| Parent::AlreadySpent{other_tx: x.destination_hash().unwrap()}) Parent::AlreadySpent { other_tx: x.destination_hash().unwrap() }
})
} }
@ -115,7 +122,11 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// if necessary, and performing any connection-related validity checks. /// if necessary, and performing any connection-related validity checks.
/// Happens under an exclusive mutable reference gated by the write portion /// Happens under an exclusive mutable reference gated by the write portion
/// of a RWLock. /// of a RWLock.
pub fn add_to_memory_pool(&mut self, _: TxSource, tx: transaction::Transaction) -> Result<(), PoolError> { pub fn add_to_memory_pool(
&mut self,
_: TxSource,
tx: transaction::Transaction,
) -> Result<(), PoolError> {
// Making sure the transaction is valid before anything else. // Making sure the transaction is valid before anything else.
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
tx.validate(&secp).map_err(|_| PoolError::Invalid)?; tx.validate(&secp).map_err(|_| PoolError::Invalid)?;
@ -131,7 +142,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
// to fees or other elements of the signature preimage. // to fees or other elements of the signature preimage.
let tx_hash = graph::transaction_identifier(&tx); let tx_hash = graph::transaction_identifier(&tx);
if self.transactions.contains_key(&tx_hash) { if self.transactions.contains_key(&tx_hash) {
return Err(PoolError::AlreadyInPool) return Err(PoolError::AlreadyInPool);
} }
// The next issue is to identify all unspent outputs that // The next issue is to identify all unspent outputs that
@ -148,25 +159,34 @@ impl<T> TransactionPool<T> where T: BlockChain {
// spent by the orphans set, this does not preclude its inclusion // spent by the orphans set, this does not preclude its inclusion
// into the pool. // into the pool.
match self.search_for_best_output(&input.commitment()) { match self.search_for_best_output(&input.commitment()) {
Parent::PoolTransaction{tx_ref: x} => pool_refs.push(base.with_source(Some(x))), Parent::PoolTransaction { tx_ref: x } => pool_refs.push(base.with_source(Some(x))),
Parent::BlockTransaction{output} => { Parent::BlockTransaction { output } => {
// TODO - pull this out into a separate function? // TODO - pull this out into a separate function?
if output.features.contains(transaction::COINBASE_OUTPUT) { if output.features.contains(transaction::COINBASE_OUTPUT) {
if let Ok(out_header) = self.blockchain.get_block_header_by_output_commit(&output.commitment()) { if let Ok(out_header) = self.blockchain
.get_block_header_by_output_commit(&output.commitment())
{
if let Ok(head_header) = self.blockchain.head_header() { if let Ok(head_header) = self.blockchain.head_header() {
if head_header.height <= out_header.height + consensus::COINBASE_MATURITY { if head_header.height <=
return Err(PoolError::ImmatureCoinbase{ out_header.height + consensus::COINBASE_MATURITY
{
return Err(PoolError::ImmatureCoinbase {
header: out_header, header: out_header,
output: output.commitment() output: output.commitment(),
}) });
}; };
}; };
}; };
}; };
blockchain_refs.push(base); blockchain_refs.push(base);
}, }
Parent::Unknown => orphan_refs.push(base), Parent::Unknown => orphan_refs.push(base),
Parent::AlreadySpent{other_tx: x} => return Err(PoolError::DoubleSpend{other_tx: x, spent_output: input.commitment()}), Parent::AlreadySpent { other_tx: x } => {
return Err(PoolError::DoubleSpend {
other_tx: x,
spent_output: input.commitment(),
})
}
} }
} }
@ -184,22 +204,29 @@ impl<T> TransactionPool<T> where T: BlockChain {
// Assertion: we have exactly as many resolved spending references as // Assertion: we have exactly as many resolved spending references as
// inputs to the transaction. // inputs to the transaction.
assert_eq!(tx.inputs.len(), assert_eq!(
blockchain_refs.len() + pool_refs.len() + orphan_refs.len()); tx.inputs.len(),
blockchain_refs.len() + pool_refs.len() + orphan_refs.len()
);
// At this point we know if we're spending all known unspents and not // At this point we know if we're spending all known unspents and not
// creating any duplicate unspents. // creating any duplicate unspents.
let pool_entry = graph::PoolEntry::new(&tx); let pool_entry = graph::PoolEntry::new(&tx);
let new_unspents = tx.outputs.iter(). let new_unspents = tx.outputs
map(|x| graph::Edge::new(Some(tx_hash), None, x.commitment())). .iter()
collect(); .map(|x| graph::Edge::new(Some(tx_hash), None, x.commitment()))
.collect();
if !is_orphan { if !is_orphan {
// In the non-orphan (pool) case, we've ensured that every input // In the non-orphan (pool) case, we've ensured that every input
// maps one-to-one with an unspent (available) output, and each // maps one-to-one with an unspent (available) output, and each
// output is unique. No further checks are necessary. // output is unique. No further checks are necessary.
self.pool.add_pool_transaction(pool_entry, blockchain_refs, self.pool.add_pool_transaction(
pool_refs, new_unspents); pool_entry,
blockchain_refs,
pool_refs,
new_unspents,
);
self.reconcile_orphans().unwrap(); self.reconcile_orphans().unwrap();
self.transactions.insert(tx_hash, Box::new(tx)); self.transactions.insert(tx_hash, Box::new(tx));
@ -215,10 +242,17 @@ impl<T> TransactionPool<T> where T: BlockChain {
// Note that pool_connections here also does double duty to // Note that pool_connections here also does double duty to
// account for blockchain connections. // account for blockchain connections.
for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) { for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) {
match self.orphans.get_external_spent_output(&pool_ref.output_commitment()){ match self.orphans.get_external_spent_output(
&pool_ref.output_commitment(),
) {
// Should the below err be subtyped to orphans somehow? // Should the below err be subtyped to orphans somehow?
Some(x) => return Err(PoolError::DoubleSpend{other_tx: x.destination_hash().unwrap(), spent_output: x.output_commitment()}), Some(x) => {
None => {}, return Err(PoolError::DoubleSpend {
other_tx: x.destination_hash().unwrap(),
spent_output: x.output_commitment(),
})
}
None => {}
} }
} }
@ -230,8 +264,13 @@ impl<T> TransactionPool<T> where T: BlockChain {
// We have passed all failure modes. // We have passed all failure modes.
pool_refs.append(&mut blockchain_refs); pool_refs.append(&mut blockchain_refs);
self.orphans.add_orphan_transaction(pool_entry, self.orphans.add_orphan_transaction(
pool_refs, orphan_refs, missing_refs, new_unspents); pool_entry,
pool_refs,
orphan_refs,
missing_refs,
new_unspents,
);
Err(PoolError::OrphanTransaction) Err(PoolError::OrphanTransaction)
} }
@ -243,27 +282,33 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// Checks the output (by commitment) against outputs in the blockchain /// Checks the output (by commitment) against outputs in the blockchain
/// or in the pool. If the transaction is destined for orphans, the /// or in the pool. If the transaction is destined for orphans, the
/// orphans set is checked as well. /// orphans set is checked as well.
fn check_duplicate_outputs(&self, output : &transaction::Output, is_orphan: bool) -> Result<(), PoolError> { fn check_duplicate_outputs(
&self,
output: &transaction::Output,
is_orphan: bool,
) -> Result<(), PoolError> {
// Checking against current blockchain unspent outputs // Checking against current blockchain unspent outputs
// We want outputs even if they're spent by pool txs, so we ignore // We want outputs even if they're spent by pool txs, so we ignore
// consumed_blockchain_outputs // consumed_blockchain_outputs
if self.blockchain.get_unspent(&output.commitment()).is_ok() { if self.blockchain.get_unspent(&output.commitment()).is_ok() {
return Err(PoolError::DuplicateOutput{ return Err(PoolError::DuplicateOutput {
other_tx: None, other_tx: None,
in_chain: true, in_chain: true,
output: output.commitment()}) output: output.commitment(),
});
} }
// Check for existence of this output in the pool // Check for existence of this output in the pool
match self.pool.find_output(&output.commitment()) { match self.pool.find_output(&output.commitment()) {
Some(x) => { Some(x) => {
return Err(PoolError::DuplicateOutput{ return Err(PoolError::DuplicateOutput {
other_tx: Some(x), other_tx: Some(x),
in_chain: false, in_chain: false,
output: output.commitment()}) output: output.commitment(),
}, })
None => {}, }
None => {}
}; };
@ -271,14 +316,15 @@ impl<T> TransactionPool<T> where T: BlockChain {
// checks as above but against the orphan set instead. // checks as above but against the orphan set instead.
if is_orphan { if is_orphan {
// Checking against orphan outputs // Checking against orphan outputs
match self.orphans.find_output(&output.commitment()){ match self.orphans.find_output(&output.commitment()) {
Some(x) => { Some(x) => {
return Err(PoolError::DuplicateOutput{ return Err(PoolError::DuplicateOutput {
other_tx: Some(x), other_tx: Some(x),
in_chain: false, in_chain: false,
output: output.commitment()}) output: output.commitment(),
}, })
None => {}, }
None => {}
}; };
// No need to check pool connections since those are covered // No need to check pool connections since those are covered
// by pool unspents and blockchain connections. // by pool unspents and blockchain connections.
@ -295,7 +341,11 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// the orphans set or externally from orphans to the pool or blockchain. /// the orphans set or externally from orphans to the pool or blockchain.
/// The last case results in a failure condition and transaction acceptance /// The last case results in a failure condition and transaction acceptance
/// is aborted. /// is aborted.
fn resolve_orphan_refs(&self, tx_hash: hash::Hash, orphan_refs: &mut Vec<graph::Edge>) -> Result<HashMap<usize, ()>, PoolError> { fn resolve_orphan_refs(
&self,
tx_hash: hash::Hash,
orphan_refs: &mut Vec<graph::Edge>,
) -> Result<HashMap<usize, ()>, PoolError> {
let mut missing_refs: HashMap<usize, ()> = HashMap::new(); let mut missing_refs: HashMap<usize, ()> = HashMap::new();
for (i, orphan_ref) in orphan_refs.iter_mut().enumerate() { for (i, orphan_ref) in orphan_refs.iter_mut().enumerate() {
let orphan_commitment = &orphan_ref.output_commitment(); let orphan_commitment = &orphan_ref.output_commitment();
@ -307,23 +357,29 @@ impl<T> TransactionPool<T> where T: BlockChain {
// already consumed or it belongs in missing_refs. // already consumed or it belongs in missing_refs.
None => { None => {
match self.orphans.get_internal_spent(&orphan_commitment) { match self.orphans.get_internal_spent(&orphan_commitment) {
Some(x) => return Err(PoolError::DoubleSpend{ Some(x) => {
return Err(PoolError::DoubleSpend {
other_tx: x.destination_hash().unwrap(), other_tx: x.destination_hash().unwrap(),
spent_output: x.output_commitment()}), spent_output: x.output_commitment(),
})
}
None => { None => {
// The reference does not resolve to anything. // The reference does not resolve to anything.
// Make sure this missing_output has not already // Make sure this missing_output has not already
// been claimed, then add this entry to // been claimed, then add this entry to
// missing_refs // missing_refs
match self.orphans.get_unknown_output(&orphan_commitment) { match self.orphans.get_unknown_output(&orphan_commitment) {
Some(x) => return Err(PoolError::DoubleSpend{ Some(x) => {
return Err(PoolError::DoubleSpend {
other_tx: x.destination_hash().unwrap(), other_tx: x.destination_hash().unwrap(),
spent_output: x.output_commitment()}), spent_output: x.output_commitment(),
})
}
None => missing_refs.insert(i, ()), None => missing_refs.insert(i, ()),
}; };
}, }
}; };
}, }
}; };
} }
Ok(missing_refs) Ok(missing_refs)
@ -335,7 +391,7 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// be freed? Current thought is to do so under a different lock domain /// be freed? Current thought is to do so under a different lock domain
/// so that we don't have the potential for long recursion under the write /// so that we don't have the potential for long recursion under the write
/// lock. /// lock.
pub fn reconcile_orphans(&self)-> Result<(),PoolError> { pub fn reconcile_orphans(&self) -> Result<(), PoolError> {
Ok(()) Ok(())
} }
@ -354,7 +410,10 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// evicted transactions elsewhere so that we can make a best effort at /// evicted transactions elsewhere so that we can make a best effort at
/// returning them to the pool in the event of a reorg that invalidates /// returning them to the pool in the event of a reorg that invalidates
/// this block. /// this block.
pub fn reconcile_block(&mut self, block: &block::Block) -> Result<Vec<Box<transaction::Transaction>>, PoolError> { pub fn reconcile_block(
&mut self,
block: &block::Block,
) -> Result<Vec<Box<transaction::Transaction>>, PoolError> {
// If this pool has been kept in sync correctly, serializing all // If this pool has been kept in sync correctly, serializing all
// updates, then the inputs must consume only members of the blockchain // updates, then the inputs must consume only members of the blockchain
// utxo set. // utxo set.
@ -386,18 +445,23 @@ impl<T> TransactionPool<T> where T: BlockChain {
// reconciliation job is triggered. // reconciliation job is triggered.
let mut marked_transactions: HashMap<hash::Hash, ()> = HashMap::new(); let mut marked_transactions: HashMap<hash::Hash, ()> = HashMap::new();
{ {
let mut conflicting_txs: Vec<hash::Hash> = block.inputs.iter(). let mut conflicting_txs: Vec<hash::Hash> = block
filter_map(|x| .inputs
self.pool.get_external_spent_output(&x.commitment())). .iter()
map(|x| x.destination_hash().unwrap()). .filter_map(|x| self.pool.get_external_spent_output(&x.commitment()))
collect(); .map(|x| x.destination_hash().unwrap())
.collect();
let mut conflicting_outputs: Vec<hash::Hash> = block.outputs.iter(). let mut conflicting_outputs: Vec<hash::Hash> = block
filter_map(|x: &transaction::Output| .outputs
self.pool.get_internal_spent_output(&x.commitment()). .iter()
or(self.pool.get_available_output(&x.commitment()))). .filter_map(|x: &transaction::Output| {
map(|x| x.source_hash().unwrap()). self.pool.get_internal_spent_output(&x.commitment()).or(
collect(); self.pool.get_available_output(&x.commitment()),
)
})
.map(|x| x.source_hash().unwrap())
.collect();
conflicting_txs.append(&mut conflicting_outputs); conflicting_txs.append(&mut conflicting_outputs);
@ -422,8 +486,11 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// ///
/// Marked transactions are added to the mutable marked_txs HashMap which /// Marked transactions are added to the mutable marked_txs HashMap which
/// is supplied by the calling function. /// is supplied by the calling function.
fn mark_transaction(&self, conflicting_tx: hash::Hash, fn mark_transaction(
marked_txs: &mut HashMap<hash::Hash, ()>) { &self,
conflicting_tx: hash::Hash,
marked_txs: &mut HashMap<hash::Hash, ()>,
) {
marked_txs.insert(conflicting_tx, ()); marked_txs.insert(conflicting_tx, ());
@ -435,8 +502,8 @@ impl<T> TransactionPool<T> where T: BlockChain {
if self.blockchain.get_unspent(&x.output_commitment()).is_err() { if self.blockchain.get_unspent(&x.output_commitment()).is_err() {
self.mark_transaction(x.destination_hash().unwrap(), marked_txs); self.mark_transaction(x.destination_hash().unwrap(), marked_txs);
} }
}, }
None => {}, None => {}
}; };
} }
} }
@ -451,17 +518,20 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// ///
/// TODO: There's some iteration overlap between this and the mark step. /// TODO: There's some iteration overlap between this and the mark step.
/// Additional bookkeeping in the mark step could optimize that away. /// Additional bookkeeping in the mark step could optimize that away.
fn sweep_transactions(&mut self, fn sweep_transactions(
marked_transactions: HashMap<hash::Hash, ()>,) &mut self,
->Vec<Box<transaction::Transaction>> { marked_transactions: HashMap<hash::Hash, ()>,
) -> Vec<Box<transaction::Transaction>> {
let mut removed_txs = Vec::new(); let mut removed_txs = Vec::new();
for tx_hash in marked_transactions.keys() { for tx_hash in marked_transactions.keys() {
let removed_tx = self.transactions.remove(tx_hash).unwrap(); let removed_tx = self.transactions.remove(tx_hash).unwrap();
self.pool.remove_pool_transaction(&removed_tx, self.pool.remove_pool_transaction(
&marked_transactions); &removed_tx,
&marked_transactions,
);
removed_txs.push(removed_tx); removed_txs.push(removed_tx);
} }
@ -471,9 +541,15 @@ impl<T> TransactionPool<T> where T: BlockChain {
/// Fetch mineable transactions. /// Fetch mineable transactions.
/// ///
/// Select a set of mineable transactions for block building. /// Select a set of mineable transactions for block building.
pub fn prepare_mineable_transactions(&self, num_to_fetch: u32) -> Vec<Box<transaction::Transaction>>{ pub fn prepare_mineable_transactions(
self.pool.get_mineable_transactions(num_to_fetch).iter(). &self,
map(|x| self.transactions.get(x).unwrap().clone()).collect() num_to_fetch: u32,
) -> Vec<Box<transaction::Transaction>> {
self.pool
.get_mineable_transactions(num_to_fetch)
.iter()
.map(|x| self.transactions.get(x).unwrap().clone())
.collect()
} }
} }
@ -492,7 +568,10 @@ mod tests {
$( $(
match $pool.search_for_best_output(&test_output($output).commitment()) { match $pool.search_for_best_output(&test_output($output).commitment()) {
$expected => {}, $expected => {},
x => panic!("Unexpected result from output search for {:?}, got {:?}", $output, x), x => panic!(
"Unexpected result from output search for {:?}, got {:?}",
$output,
x),
}; };
)* )*
} }
@ -503,16 +582,16 @@ mod tests {
fn test_basic_pool_add() { fn test_basic_pool_add() {
let mut dummy_chain = DummyChainImpl::new(); let mut dummy_chain = DummyChainImpl::new();
let parent_transaction = test_transaction(vec![5,6,7], vec![11,4]); let parent_transaction = test_transaction(vec![5, 6, 7], vec![11, 4]);
// We want this transaction to be rooted in the blockchain. // We want this transaction to be rooted in the blockchain.
let new_utxo = DummyUtxoSet::empty(). let new_utxo = DummyUtxoSet::empty()
with_output(test_output(5)). .with_output(test_output(5))
with_output(test_output(6)). .with_output(test_output(6))
with_output(test_output(7)). .with_output(test_output(7))
with_output(test_output(8)); .with_output(test_output(8));
// Prepare a second transaction, connected to the first. // Prepare a second transaction, connected to the first.
let child_transaction = test_transaction(vec![11,4], vec![12]); let child_transaction = test_transaction(vec![11, 4], vec![12]);
dummy_chain.update_utxo_set(new_utxo); dummy_chain.update_utxo_set(new_utxo);
@ -526,20 +605,19 @@ mod tests {
assert_eq!(write_pool.total_size(), 0); assert_eq!(write_pool.total_size(), 0);
// First, add the transaction rooted in the blockchain // First, add the transaction rooted in the blockchain
let result = write_pool.add_to_memory_pool(test_source(), let result = write_pool.add_to_memory_pool(test_source(), parent_transaction);
parent_transaction);
if result.is_err() { if result.is_err() {
panic!("got an error adding parent tx: {:?}", panic!("got an error adding parent tx: {:?}", result.err().unwrap());
result.err().unwrap());
} }
// Now, add the transaction connected as a child to the first // Now, add the transaction connected as a child to the first
let child_result = write_pool.add_to_memory_pool(test_source(), let child_result = write_pool.add_to_memory_pool(test_source(), child_transaction);
child_transaction);
if child_result.is_err() { if child_result.is_err() {
panic!("got an error adding child tx: {:?}", panic!(
child_result.err().unwrap()); "got an error adding child tx: {:?}",
child_result.err().unwrap()
);
} }
} }
@ -567,10 +645,10 @@ mod tests {
pub fn test_pool_add_error() { pub fn test_pool_add_error() {
let mut dummy_chain = DummyChainImpl::new(); let mut dummy_chain = DummyChainImpl::new();
let new_utxo = DummyUtxoSet::empty(). let new_utxo = DummyUtxoSet::empty()
with_output(test_output(5)). .with_output(test_output(5))
with_output(test_output(6)). .with_output(test_output(6))
with_output(test_output(7)); .with_output(test_output(7));
dummy_chain.update_utxo_set(new_utxo); dummy_chain.update_utxo_set(new_utxo);
@ -582,27 +660,34 @@ mod tests {
// First expected failure: duplicate output // First expected failure: duplicate output
let duplicate_tx = test_transaction(vec![5,6], vec![7]); let duplicate_tx = test_transaction(vec![5,6], vec![7]);
match write_pool.add_to_memory_pool(test_source(), match write_pool.add_to_memory_pool(test_source(), duplicate_tx) {
duplicate_tx) {
Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"), Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"),
Err(x) =>{ match x { Err(x) => {
PoolError::DuplicateOutput{other_tx, in_chain, output} => { match x {
if other_tx.is_some() || !in_chain || output != test_output(7).commitment() { PoolError::DuplicateOutput {
other_tx,
in_chain,
output,
} => {
if other_tx.is_some() || !in_chain ||
output != test_output(7).commitment()
{
panic!("Unexpected parameter in DuplicateOutput: {:?}", x); panic!("Unexpected parameter in DuplicateOutput: {:?}", x);
} }
}
}, _ => {
_ => panic!("Unexpected error when adding duplicate output transaction: {:?}", x), panic!("Unexpected error when adding duplicate output transaction: {:?}", x)
};}, }
};
}
}; };
// To test DoubleSpend and AlreadyInPool conditions, we need to add // To test DoubleSpend and AlreadyInPool conditions, we need to add
// a valid transaction. // a valid transaction.
let valid_transaction = test_transaction(vec![5,6], vec![8]); let valid_transaction = test_transaction(vec![5,6], vec![8]);
match write_pool.add_to_memory_pool(test_source(), match write_pool.add_to_memory_pool(test_source(), valid_transaction) {
valid_transaction) { Ok(_) => {}
Ok(_) => {},
Err(x) => panic!("Unexpected error while adding a valid transaction: {:?}", x), Err(x) => panic!("Unexpected error while adding a valid transaction: {:?}", x),
}; };
@ -610,31 +695,36 @@ mod tests {
// as valid_transaction: // as valid_transaction:
let double_spend_transaction = test_transaction(vec![6], vec![2]); let double_spend_transaction = test_transaction(vec![6], vec![2]);
match write_pool.add_to_memory_pool(test_source(), match write_pool.add_to_memory_pool(test_source(), double_spend_transaction) {
double_spend_transaction) {
Ok(_) => panic!("Expected error when adding double spend, got Ok"), Ok(_) => panic!("Expected error when adding double spend, got Ok"),
Err(x) => { Err(x) => {
match x { match x {
PoolError::DoubleSpend{other_tx: _, spent_output} => { PoolError::DoubleSpend {
other_tx: _,
spent_output,
} => {
if spent_output != test_output(6).commitment() { if spent_output != test_output(6).commitment() {
panic!("Unexpected parameter in DoubleSpend: {:?}", x); panic!("Unexpected parameter in DoubleSpend: {:?}", x);
} }
}, }
_ => panic!("Unexpected error when adding double spend transaction: {:?}", x), _ => {
panic!("Unexpected error when adding double spend transaction: {:?}", x)
}
}; };
}, }
}; };
let already_in_pool = test_transaction(vec![5,6], vec![8]); let already_in_pool = test_transaction(vec![5,6], vec![8]);
match write_pool.add_to_memory_pool(test_source(), match write_pool.add_to_memory_pool(test_source(), already_in_pool) {
already_in_pool) {
Ok(_) => panic!("Expected error when adding already in pool, got Ok"), Ok(_) => panic!("Expected error when adding already in pool, got Ok"),
Err(x) => { Err(x) => {
match x { match x {
PoolError::AlreadyInPool => {}, PoolError::AlreadyInPool => {}
_ => panic!("Unexpected error when adding already in pool tx: {:?}", _ => {
x), panic!("Unexpected error when adding already in pool tx: {:?}",
x)
}
}; };
} }
@ -656,40 +746,61 @@ mod tests {
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write().unwrap();
let coinbase_header = block::BlockHeader {height: 1, ..block::BlockHeader::default()}; let coinbase_header = block::BlockHeader {
chain_ref.store_header_by_output_commitment(coinbase_output.commitment(), &coinbase_header); height: 1,
..block::BlockHeader::default()
};
chain_ref.store_header_by_output_commitment(
coinbase_output.commitment(),
&coinbase_header,
);
let head_header = block::BlockHeader {height: 2, ..block::BlockHeader::default()}; let head_header = block::BlockHeader {
height: 2,
..block::BlockHeader::default()
};
chain_ref.store_head_header(&head_header); chain_ref.store_head_header(&head_header);
let txn = test_transaction(vec![15], vec![10, 4]); let txn = test_transaction(vec![15], vec![10, 4]);
let result = write_pool.add_to_memory_pool(test_source(), txn); let result = write_pool.add_to_memory_pool(test_source(), txn);
match result { match result {
Err(PoolError::ImmatureCoinbase{header: _, output: out}) => { Err(PoolError::ImmatureCoinbase {
header: _,
output: out,
}) => {
assert_eq!(out, coinbase_output.commitment()); assert_eq!(out, coinbase_output.commitment());
}, }
_ => panic!("expected ImmatureCoinbase error here"), _ => panic!("expected ImmatureCoinbase error here"),
}; };
let head_header = block::BlockHeader {height: 4, ..block::BlockHeader::default()}; let head_header = block::BlockHeader {
height: 4,
..block::BlockHeader::default()
};
chain_ref.store_head_header(&head_header); chain_ref.store_head_header(&head_header);
let txn = test_transaction(vec![15], vec![10, 4]); let txn = test_transaction(vec![15], vec![10, 4]);
let result = write_pool.add_to_memory_pool(test_source(), txn); let result = write_pool.add_to_memory_pool(test_source(), txn);
match result { match result {
Err(PoolError::ImmatureCoinbase{header: _, output: out}) => { Err(PoolError::ImmatureCoinbase {
header: _,
output: out,
}) => {
assert_eq!(out, coinbase_output.commitment()); assert_eq!(out, coinbase_output.commitment());
}, }
_ => panic!("expected ImmatureCoinbase error here"), _ => panic!("expected ImmatureCoinbase error here"),
}; };
let head_header = block::BlockHeader {height: 5, ..block::BlockHeader::default()}; let head_header = block::BlockHeader {
height: 5,
..block::BlockHeader::default()
};
chain_ref.store_head_header(&head_header); chain_ref.store_head_header(&head_header);
let txn = test_transaction(vec![15], vec![10, 4]); let txn = test_transaction(vec![15], vec![10, 4]);
let result = write_pool.add_to_memory_pool(test_source(), txn); let result = write_pool.add_to_memory_pool(test_source(), txn);
match result { match result {
Ok(_) => {}, Ok(_) => {}
Err(_) => panic!("this should not return an error here"), Err(_) => panic!("this should not return an error here"),
}; };
} }
@ -706,11 +817,11 @@ mod tests {
fn test_block_reconciliation() { fn test_block_reconciliation() {
let mut dummy_chain = DummyChainImpl::new(); let mut dummy_chain = DummyChainImpl::new();
let new_utxo = DummyUtxoSet::empty(). let new_utxo = DummyUtxoSet::empty()
with_output(test_output(10)). .with_output(test_output(10))
with_output(test_output(20)). .with_output(test_output(20))
with_output(test_output(30)). .with_output(test_output(30))
with_output(test_output(40)); .with_output(test_output(40));
dummy_chain.update_utxo_set(new_utxo); dummy_chain.update_utxo_set(new_utxo);
@ -790,8 +901,11 @@ mod tests {
let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3, let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3,
&block_tx_4]; &block_tx_4];
let block = block::Block::new(&block::BlockHeader::default(), let block = block::Block::new(
block_transactions, key::ONE_KEY).unwrap(); &block::BlockHeader::default(),
block_transactions,
key::ONE_KEY,
).unwrap();
chain_ref.apply_block(&block); chain_ref.apply_block(&block);
@ -845,11 +959,11 @@ mod tests {
// Add a handful of transactions // Add a handful of transactions
let mut dummy_chain = DummyChainImpl::new(); let mut dummy_chain = DummyChainImpl::new();
let new_utxo = DummyUtxoSet::empty(). let new_utxo = DummyUtxoSet::empty()
with_output(test_output(10)). .with_output(test_output(10))
with_output(test_output(20)). .with_output(test_output(20))
with_output(test_output(30)). .with_output(test_output(30))
with_output(test_output(40)); .with_output(test_output(40));
dummy_chain.update_utxo_set(new_utxo); dummy_chain.update_utxo_set(new_utxo);
@ -861,8 +975,8 @@ mod tests {
let root_tx_2 = test_transaction(vec![30], vec![28]); let root_tx_2 = test_transaction(vec![30], vec![28]);
let root_tx_3 = test_transaction(vec![40], vec![38]); let root_tx_3 = test_transaction(vec![40], vec![38]);
let child_tx_1 = test_transaction(vec![25],vec![23]); let child_tx_1 = test_transaction(vec![25], vec![23]);
let child_tx_2 = test_transaction(vec![38],vec![32]); let child_tx_2 = test_transaction(vec![38], vec![32]);
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write().unwrap();
@ -894,8 +1008,8 @@ mod tests {
// prepare_mineable_transactions to return mut refs // prepare_mineable_transactions to return mut refs
let block_txs: Vec<transaction::Transaction> = txs.drain(..).map(|x| *x).collect(); let block_txs: Vec<transaction::Transaction> = txs.drain(..).map(|x| *x).collect();
let tx_refs = block_txs.iter().collect(); let tx_refs = block_txs.iter().collect();
block = block::Block::new(&block::BlockHeader::default(), block = block::Block::new(&block::BlockHeader::default(), tx_refs, key::ONE_KEY)
tx_refs, key::ONE_KEY).unwrap(); .unwrap();
} }
chain_ref.apply_block(&block); chain_ref.apply_block(&block);
@ -916,7 +1030,7 @@ mod tests {
fn test_setup(dummy_chain: &Arc<DummyChainImpl>) -> TransactionPool<DummyChainImpl> { fn test_setup(dummy_chain: &Arc<DummyChainImpl>) -> TransactionPool<DummyChainImpl> {
TransactionPool{ TransactionPool {
transactions: HashMap::new(), transactions: HashMap::new(),
pool: Pool::empty(), pool: Pool::empty(),
orphans: Orphans::empty(), orphans: Orphans::empty(),
@ -932,8 +1046,12 @@ mod tests {
/// ///
/// Fees are the remainder between input and output values, so the numbers /// Fees are the remainder between input and output values, so the numbers
/// should make sense. /// should make sense.
fn test_transaction(input_values: Vec<u64>, output_values: Vec<u64>) -> transaction::Transaction { fn test_transaction(
let fees: i64 = input_values.iter().sum::<u64>() as i64 - output_values.iter().sum::<u64>() as i64; input_values: Vec<u64>,
output_values: Vec<u64>,
) -> transaction::Transaction {
let fees: i64 = input_values.iter().sum::<u64>() as i64 -
output_values.iter().sum::<u64>() as i64;
assert!(fees >= 0); assert!(fees >= 0);
let mut tx_elements = Vec::new(); let mut tx_elements = Vec::new();
@ -956,10 +1074,11 @@ mod tests {
let ec = Secp256k1::with_caps(ContextFlag::Commit); let ec = Secp256k1::with_caps(ContextFlag::Commit);
let output_key = test_key(value); let output_key = test_key(value);
let output_commitment = ec.commit(value, output_key).unwrap(); let output_commitment = ec.commit(value, output_key).unwrap();
transaction::Output{ transaction::Output {
features: transaction::DEFAULT_OUTPUT, features: transaction::DEFAULT_OUTPUT,
commit: output_commitment, commit: output_commitment,
proof: ec.range_proof(0, value, output_key, output_commitment, ec.nonce())} proof: ec.range_proof(0, value, output_key, output_commitment, ec.nonce()),
}
} }
/// Deterministically generate a coinbase output defined by our test scheme /// Deterministically generate a coinbase output defined by our test scheme
@ -967,10 +1086,11 @@ mod tests {
let ec = Secp256k1::with_caps(ContextFlag::Commit); let ec = Secp256k1::with_caps(ContextFlag::Commit);
let output_key = test_key(value); let output_key = test_key(value);
let output_commitment = ec.commit(value, output_key).unwrap(); let output_commitment = ec.commit(value, output_key).unwrap();
transaction::Output{ transaction::Output {
features: transaction::COINBASE_OUTPUT, features: transaction::COINBASE_OUTPUT,
commit: output_commitment, commit: output_commitment,
proof: ec.range_proof(0, value, output_key, output_commitment, ec.nonce())} proof: ec.range_proof(0, value, output_key, output_commitment, ec.nonce()),
}
} }
/// Makes a SecretKey from a single u64 /// Makes a SecretKey from a single u64
@ -996,8 +1116,8 @@ mod tests {
} }
/// A generic TxSource representing a test /// A generic TxSource representing a test
fn test_source() -> TxSource{ fn test_source() -> TxSource {
TxSource{ TxSource {
debug_name: "test".to_string(), debug_name: "test".to_string(),
identifier: "127.0.0.1".to_string(), identifier: "127.0.0.1".to_string(),
} }

View file

@ -47,20 +47,20 @@ pub struct TxSource {
#[derive(Clone)] #[derive(Clone)]
pub enum Parent { pub enum Parent {
Unknown, Unknown,
BlockTransaction{output: transaction::Output}, BlockTransaction { output: transaction::Output },
PoolTransaction{tx_ref: hash::Hash}, PoolTransaction { tx_ref: hash::Hash },
AlreadySpent{other_tx: hash::Hash}, AlreadySpent { other_tx: hash::Hash },
} }
impl fmt::Debug for Parent { impl fmt::Debug for Parent {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self { match self {
&Parent::Unknown => write!(f, "Parent: Unknown"), &Parent::Unknown => write!(f, "Parent: Unknown"),
&Parent::BlockTransaction{output: _} => write!(f, "Parent: Block Transaction"), &Parent::BlockTransaction { output: _ } => write!(f, "Parent: Block Transaction"),
&Parent::PoolTransaction{tx_ref: x} => write!(f, &Parent::PoolTransaction { tx_ref: x } => {
"Parent: Pool Transaction ({:?})", x), write!(f, "Parent: Pool Transaction ({:?})", x)
&Parent::AlreadySpent{other_tx: x} => write!(f, }
"Parent: Already Spent By {:?}", x), &Parent::AlreadySpent { other_tx: x } => write!(f, "Parent: Already Spent By {:?}", x),
} }
} }
} }
@ -74,23 +74,23 @@ pub enum PoolError {
/// An entry already in the pool /// An entry already in the pool
AlreadyInPool, AlreadyInPool,
/// A duplicate output /// A duplicate output
DuplicateOutput{ DuplicateOutput {
/// The other transaction /// The other transaction
other_tx: Option<hash::Hash>, other_tx: Option<hash::Hash>,
/// Is in chain? /// Is in chain?
in_chain: bool, in_chain: bool,
/// The output /// The output
output: Commitment output: Commitment,
}, },
/// A double spend /// A double spend
DoubleSpend{ DoubleSpend {
/// The other transaction /// The other transaction
other_tx: hash::Hash, other_tx: hash::Hash,
/// The spent output /// The spent output
spent_output: Commitment spent_output: Commitment,
}, },
/// Attempt to spend a coinbase output before it matures (1000 blocks?) /// Attempt to spend a coinbase output before it matures (1000 blocks?)
ImmatureCoinbase{ ImmatureCoinbase {
/// The block header of the block containing the coinbase output /// The block header of the block containing the coinbase output
header: block::BlockHeader, header: block::BlockHeader,
/// The unspent coinbase output /// The unspent coinbase output
@ -98,7 +98,8 @@ pub enum PoolError {
}, },
/// An orphan successfully added to the orphans set /// An orphan successfully added to the orphans set
OrphanTransaction, OrphanTransaction,
/// TODO - wip, just getting imports working, remove this and use more specific errors /// TODO - wip, just getting imports working, remove this and use more
/// specific errors
GenericPoolError, GenericPoolError,
/// TODO - is this the right level of abstraction for pool errors? /// TODO - is this the right level of abstraction for pool errors?
OutputNotFound, OutputNotFound,
@ -112,12 +113,14 @@ pub trait BlockChain {
/// is spent or if it doesn't exist. The blockchain is expected to produce /// is spent or if it doesn't exist. The blockchain is expected to produce
/// a result with its current view of the most worked chain, ignoring /// a result with its current view of the most worked chain, ignoring
/// orphans, etc. /// orphans, etc.
fn get_unspent(&self, output_ref: &Commitment) fn get_unspent(&self, output_ref: &Commitment) -> Result<transaction::Output, PoolError>;
-> Result<transaction::Output, PoolError>;
/// Get the block header by output commitment (needed for spending coinbase after n blocks) /// Get the block header by output commitment (needed for spending coinbase
fn get_block_header_by_output_commit(&self, commit: &Commitment) /// after n blocks)
-> Result<block::BlockHeader, PoolError>; fn get_block_header_by_output_commit(
&self,
commit: &Commitment,
) -> Result<block::BlockHeader, PoolError>;
/// Get the block header at the head /// Get the block header at the head
fn head_header(&self) -> Result<block::BlockHeader, PoolError>; fn head_header(&self) -> Result<block::BlockHeader, PoolError>;
@ -135,7 +138,7 @@ pub trait BlockChain {
/// connections are in the pool edge set, while unspent (dangling) references /// connections are in the pool edge set, while unspent (dangling) references
/// exist in the available_outputs set. /// exist in the available_outputs set.
pub struct Pool { pub struct Pool {
graph : graph::DirectedGraph, graph: graph::DirectedGraph,
// available_outputs are unspent outputs of the current pool set, // available_outputs are unspent outputs of the current pool set,
// maintained as edges with empty destinations, keyed by the // maintained as edges with empty destinations, keyed by the
@ -143,12 +146,12 @@ pub struct Pool {
available_outputs: HashMap<Commitment, graph::Edge>, available_outputs: HashMap<Commitment, graph::Edge>,
// Consumed blockchain utxo's are kept in a separate map. // Consumed blockchain utxo's are kept in a separate map.
consumed_blockchain_outputs: HashMap<Commitment, graph::Edge> consumed_blockchain_outputs: HashMap<Commitment, graph::Edge>,
} }
impl Pool { impl Pool {
pub fn empty() -> Pool { pub fn empty() -> Pool {
Pool{ Pool {
graph: graph::DirectedGraph::empty(), graph: graph::DirectedGraph::empty(),
available_outputs: HashMap::new(), available_outputs: HashMap::new(),
consumed_blockchain_outputs: HashMap::new(), consumed_blockchain_outputs: HashMap::new(),
@ -160,7 +163,10 @@ impl Pool {
/// Returns the transaction (kernel) hash corresponding to the conflicting /// Returns the transaction (kernel) hash corresponding to the conflicting
/// transaction /// transaction
pub fn check_double_spend(&self, o: &transaction::Output) -> Option<hash::Hash> { pub fn check_double_spend(&self, o: &transaction::Output) -> Option<hash::Hash> {
self.graph.get_edge_by_commitment(&o.commitment()).or(self.consumed_blockchain_outputs.get(&o.commitment())).map(|x| x.destination_hash().unwrap()) self.graph
.get_edge_by_commitment(&o.commitment())
.or(self.consumed_blockchain_outputs.get(&o.commitment()))
.map(|x| x.destination_hash().unwrap())
} }
@ -168,21 +174,31 @@ impl Pool {
self.consumed_blockchain_outputs.get(c) self.consumed_blockchain_outputs.get(c)
} }
pub fn add_pool_transaction(&mut self, pool_entry: graph::PoolEntry, pub fn add_pool_transaction(
mut blockchain_refs: Vec<graph::Edge>, pool_refs: Vec<graph::Edge>, &mut self,
mut new_unspents: Vec<graph::Edge>) { pool_entry: graph::PoolEntry,
mut blockchain_refs: Vec<graph::Edge>,
pool_refs: Vec<graph::Edge>,
mut new_unspents: Vec<graph::Edge>,
) {
// Removing consumed available_outputs // Removing consumed available_outputs
for new_edge in &pool_refs { for new_edge in &pool_refs {
// All of these should correspond to an existing unspent // All of these should correspond to an existing unspent
assert!(self.available_outputs.remove(&new_edge.output_commitment()).is_some()); assert!(
self.available_outputs
.remove(&new_edge.output_commitment())
.is_some()
);
} }
// Accounting for consumed blockchain outputs // Accounting for consumed blockchain outputs
for new_blockchain_edge in blockchain_refs.drain(..) { for new_blockchain_edge in blockchain_refs.drain(..) {
self.consumed_blockchain_outputs.insert( self.consumed_blockchain_outputs.insert(
new_blockchain_edge.output_commitment(), new_blockchain_edge
new_blockchain_edge); .output_commitment(),
new_blockchain_edge,
);
} }
// Adding the transaction to the vertices list along with internal // Adding the transaction to the vertices list along with internal
@ -192,12 +208,17 @@ impl Pool {
// Adding the new unspents to the unspent map // Adding the new unspents to the unspent map
for unspent_output in new_unspents.drain(..) { for unspent_output in new_unspents.drain(..) {
self.available_outputs.insert( self.available_outputs.insert(
unspent_output.output_commitment(), unspent_output); unspent_output.output_commitment(),
unspent_output,
);
} }
} }
pub fn remove_pool_transaction(&mut self, tx: &transaction::Transaction, pub fn remove_pool_transaction(
marked_txs: &HashMap<hash::Hash, ()>) { &mut self,
tx: &transaction::Transaction,
marked_txs: &HashMap<hash::Hash, ()>,
) {
self.graph.remove_vertex(graph::transaction_identifier(tx)); self.graph.remove_vertex(graph::transaction_identifier(tx));
@ -205,27 +226,29 @@ impl Pool {
match self.graph.remove_edge_by_commitment(&input) { match self.graph.remove_edge_by_commitment(&input) {
Some(x) => { Some(x) => {
if !marked_txs.contains_key(&x.source_hash().unwrap()) { if !marked_txs.contains_key(&x.source_hash().unwrap()) {
self.available_outputs.insert(x.output_commitment(), self.available_outputs.insert(
x.with_destination(None)); x.output_commitment(),
x.with_destination(None),
);
}
} }
},
None => { None => {
self.consumed_blockchain_outputs.remove(&input); self.consumed_blockchain_outputs.remove(&input);
}, }
}; };
} }
for output in tx.outputs.iter().map(|x| x.commitment()) { for output in tx.outputs.iter().map(|x| x.commitment()) {
match self.graph.remove_edge_by_commitment(&output) { match self.graph.remove_edge_by_commitment(&output) {
Some(x) => { Some(x) => {
if !marked_txs.contains_key( if !marked_txs.contains_key(&x.destination_hash().unwrap()) {
&x.destination_hash().unwrap()) {
self.consumed_blockchain_outputs.insert( self.consumed_blockchain_outputs.insert(
x.output_commitment(), x.output_commitment(),
x.with_source(None)); x.with_source(None),
);
}
} }
},
None => { None => {
self.available_outputs.remove(&output); self.available_outputs.remove(&output);
} }
@ -259,7 +282,7 @@ impl TransactionGraphContainer for Pool {
/// Orphans contains the elements of the transaction graph that have not been /// Orphans contains the elements of the transaction graph that have not been
/// connected in full to the blockchain. /// connected in full to the blockchain.
pub struct Orphans { pub struct Orphans {
graph : graph::DirectedGraph, graph: graph::DirectedGraph,
// available_outputs are unspent outputs of the current orphan set, // available_outputs are unspent outputs of the current orphan set,
// maintained as edges with empty destinations. // maintained as edges with empty destinations.
@ -279,9 +302,9 @@ pub struct Orphans {
impl Orphans { impl Orphans {
pub fn empty() -> Orphans { pub fn empty() -> Orphans {
Orphans{ Orphans {
graph: graph::DirectedGraph::empty(), graph: graph::DirectedGraph::empty(),
available_outputs : HashMap::new(), available_outputs: HashMap::new(),
missing_outputs: HashMap::new(), missing_outputs: HashMap::new(),
pool_connections: HashMap::new(), pool_connections: HashMap::new(),
} }
@ -293,7 +316,10 @@ impl Orphans {
/// Returns the transaction hash corresponding to the conflicting /// Returns the transaction hash corresponding to the conflicting
/// transaction. /// transaction.
pub fn check_double_spend(&self, o: transaction::Output) -> Option<hash::Hash> { pub fn check_double_spend(&self, o: transaction::Output) -> Option<hash::Hash> {
self.graph.get_edge_by_commitment(&o.commitment()).or(self.pool_connections.get(&o.commitment())).map(|x| x.destination_hash().unwrap()) self.graph
.get_edge_by_commitment(&o.commitment())
.or(self.pool_connections.get(&o.commitment()))
.map(|x| x.destination_hash().unwrap())
} }
pub fn get_unknown_output(&self, output: &Commitment) -> Option<&graph::Edge> { pub fn get_unknown_output(&self, output: &Commitment) -> Option<&graph::Edge> {
@ -310,17 +336,28 @@ impl Orphans {
/// ///
/// Expects a HashMap at is_missing describing the indices of orphan_refs /// Expects a HashMap at is_missing describing the indices of orphan_refs
/// which correspond to missing (vs orphan-to-orphan) links. /// which correspond to missing (vs orphan-to-orphan) links.
pub fn add_orphan_transaction(&mut self, orphan_entry: graph::PoolEntry, pub fn add_orphan_transaction(
mut pool_refs: Vec<graph::Edge>, mut orphan_refs: Vec<graph::Edge>, &mut self,
is_missing: HashMap<usize, ()>, mut new_unspents: Vec<graph::Edge>) { orphan_entry: graph::PoolEntry,
mut pool_refs: Vec<graph::Edge>,
mut orphan_refs: Vec<graph::Edge>,
is_missing: HashMap<usize, ()>,
mut new_unspents: Vec<graph::Edge>,
) {
// Removing consumed available_outputs // Removing consumed available_outputs
for (i, new_edge) in orphan_refs.drain(..).enumerate() { for (i, new_edge) in orphan_refs.drain(..).enumerate() {
if is_missing.contains_key(&i) { if is_missing.contains_key(&i) {
self.missing_outputs.insert(new_edge.output_commitment(), self.missing_outputs.insert(
new_edge); new_edge.output_commitment(),
new_edge,
);
} else { } else {
assert!(self.available_outputs.remove(&new_edge.output_commitment()).is_some()); assert!(
self.available_outputs
.remove(&new_edge.output_commitment())
.is_some()
);
self.graph.add_edge_only(new_edge); self.graph.add_edge_only(new_edge);
} }
} }
@ -328,20 +365,26 @@ impl Orphans {
// Accounting for consumed blockchain and pool outputs // Accounting for consumed blockchain and pool outputs
for external_edge in pool_refs.drain(..) { for external_edge in pool_refs.drain(..) {
self.pool_connections.insert( self.pool_connections.insert(
external_edge.output_commitment(), external_edge); external_edge.output_commitment(),
external_edge,
);
} }
// if missing_refs is the same length as orphan_refs, we have // if missing_refs is the same length as orphan_refs, we have
// no orphan-orphan links for this transaction and it is a // no orphan-orphan links for this transaction and it is a
// root transaction of the orphans set // root transaction of the orphans set
self.graph.add_vertex_only(orphan_entry, self.graph.add_vertex_only(
is_missing.len() == orphan_refs.len()); orphan_entry,
is_missing.len() == orphan_refs.len(),
);
// Adding the new unspents to the unspent map // Adding the new unspents to the unspent map
for unspent_output in new_unspents.drain(..) { for unspent_output in new_unspents.drain(..) {
self.available_outputs.insert( self.available_outputs.insert(
unspent_output.output_commitment(), unspent_output); unspent_output.output_commitment(),
unspent_output,
);
} }
} }
} }
@ -400,9 +443,9 @@ pub trait TransactionGraphContainer {
/// Checks if the pool has anything by this output already, between /// Checks if the pool has anything by this output already, between
/// available outputs and internal ones. /// available outputs and internal ones.
fn find_output(&self, c: &Commitment) -> Option<hash::Hash> { fn find_output(&self, c: &Commitment) -> Option<hash::Hash> {
self.get_available_output(c). self.get_available_output(c)
or(self.get_internal_spent_output(c)). .or(self.get_internal_spent_output(c))
map(|x| x.source_hash().unwrap()) .map(|x| x.source_hash().unwrap())
} }
/// Search for a spent reference internal to the graph /// Search for a spent reference internal to the graph
@ -421,5 +464,4 @@ pub trait TransactionGraphContainer {
fn num_output_edges(&self) -> usize { fn num_output_edges(&self) -> usize {
self.get_graph().len_edges() self.get_graph().len_edges()
} }
} }

View file

@ -57,8 +57,8 @@ impl Cuckoo {
/// serialized block header. /// serialized block header.
pub fn new(header: &[u8], sizeshift: u32) -> Cuckoo { pub fn new(header: &[u8], sizeshift: u32) -> Cuckoo {
let size = 1 << sizeshift; let size = 1 << sizeshift;
let hashed=blake2::blake2b::blake2b(32, &[], header); let hashed = blake2::blake2b::blake2b(32, &[], header);
let hashed=hashed.as_bytes(); let hashed = hashed.as_bytes();
let k0 = u8_to_u64(hashed, 0); let k0 = u8_to_u64(hashed, 0);
let k1 = u8_to_u64(hashed, 8); let k1 = u8_to_u64(hashed, 8);
@ -157,11 +157,8 @@ pub struct Miner {
} }
impl MiningWorker for Miner { impl MiningWorker for Miner {
/// Creates a new miner /// Creates a new miner
fn new(ease: u32, fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Miner {
sizeshift: u32,
proof_size: usize) -> Miner {
let size = 1 << sizeshift; let size = 1 << sizeshift;
let graph = vec![0; size + 1]; let graph = vec![0; size + 1];
let easiness = (ease as u64) * (size as u64) / 100; let easiness = (ease as u64) * (size as u64) / 100;
@ -177,7 +174,7 @@ impl MiningWorker for Miner {
fn mine(&mut self, header: &[u8]) -> Result<Proof, Error> { fn mine(&mut self, header: &[u8]) -> Result<Proof, Error> {
let size = 1 << self.sizeshift; let size = 1 << self.sizeshift;
self.graph = vec![0; size + 1]; self.graph = vec![0; size + 1];
self.cuckoo=Some(Cuckoo::new(header, self.sizeshift)); self.cuckoo = Some(Cuckoo::new(header, self.sizeshift));
self.mine_impl() self.mine_impl()
} }
} }
@ -193,8 +190,6 @@ enum CycleSol {
} }
impl Miner { impl Miner {
/// Searches for a solution /// Searches for a solution
pub fn mine_impl(&mut self) -> Result<Proof, Error> { pub fn mine_impl(&mut self) -> Result<Proof, Error> {
let mut us = [0; MAXPATHLEN]; let mut us = [0; MAXPATHLEN];
@ -214,7 +209,7 @@ impl Miner {
match sol { match sol {
CycleSol::ValidProof(res) => { CycleSol::ValidProof(res) => {
return Ok(Proof::new(res.to_vec())); return Ok(Proof::new(res.to_vec()));
}, }
CycleSol::InvalidCycle(_) => continue, CycleSol::InvalidCycle(_) => continue,
CycleSol::NoCycle => { CycleSol::NoCycle => {
self.update_graph(nu, &us, nv, &vs); self.update_graph(nu, &us, nv, &vs);
@ -317,9 +312,9 @@ impl Miner {
/// Utility to transform a 8 bytes of a byte array into a u64. /// Utility to transform a 8 bytes of a byte array into a u64.
fn u8_to_u64(p:&[u8], i: usize) -> u64 { fn u8_to_u64(p: &[u8], i: usize) -> u64 {
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24 | (p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 |
(p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 | (p[i + 3] as u64) << 24 | (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 |
(p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56 (p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56
} }
@ -329,31 +324,183 @@ mod test {
use core::core::Proof; use core::core::Proof;
static V1:[u32;42] = [0x1fe9, 0x2050, 0x4581, 0x6322, 0x65ab, 0xb3c1, 0xc1a4, static V1: [u32; 42] = [
0xe257, 0x106ae, 0x17b11, 0x202d4, 0x2705d, 0x2deb2, 0x2f80e, 0x1fe9,
0x32298, 0x34782, 0x35c5a, 0x37458, 0x38f28, 0x406b2, 0x40e34, 0x2050,
0x40fc6, 0x42220, 0x42d13, 0x46c0f, 0x4fd47, 0x55ad2, 0x598f7, 0x4581,
0x5aa8f, 0x62aa3, 0x65725, 0x65dcb, 0x671c7, 0x6eb20, 0x752fe, 0x6322,
0x7594f, 0x79b9c, 0x7f775, 0x81635, 0x8401c, 0x844e5, 0x89fa8]; 0x65ab,
static V2:[u32;42] = [0x2a37, 0x7557, 0xa3c3, 0xfce6, 0x1248e, 0x15837, 0x1827f, 0xb3c1,
0x18a93, 0x1a7dd, 0x1b56b, 0x1ceb4, 0x1f962, 0x1fe2a, 0x29cb9, 0xc1a4,
0x2f30e, 0x2f771, 0x336bf, 0x34355, 0x391d7, 0x39495, 0x3be0c, 0xe257,
0x463be, 0x4d0c2, 0x4eead, 0x50214, 0x520de, 0x52a86, 0x53818, 0x106ae,
0x53b3b, 0x54c0b, 0x572fa, 0x5d79c, 0x5e3c2, 0x6769e, 0x6a0fe, 0x17b11,
0x6d835, 0x6fc7c, 0x70f03, 0x79d4a, 0x7b03e, 0x81e09, 0x9bd44]; 0x202d4,
static V3:[u32;42] = [0x8158, 0x9f18, 0xc4ba, 0x108c7, 0x11caa, 0x13b82, 0x1618f, 0x2705d,
0x1c83b, 0x1ec89, 0x24354, 0x28864, 0x2a0fb, 0x2ce50, 0x2e8fa, 0x2deb2,
0x32b36, 0x343e6, 0x34dc9, 0x36881, 0x3ffca, 0x40f79, 0x42721, 0x2f80e,
0x43b8c, 0x44b9d, 0x47ed3, 0x4cd34, 0x5278a, 0x5ab64, 0x5b4d4, 0x32298,
0x5d842, 0x5fa33, 0x6464e, 0x676ee, 0x685d6, 0x69df0, 0x6a5fd, 0x34782,
0x6bda3, 0x72544, 0x77974, 0x7908c, 0x80e67, 0x81ef4, 0x8d882]; 0x35c5a,
0x37458,
0x38f28,
0x406b2,
0x40e34,
0x40fc6,
0x42220,
0x42d13,
0x46c0f,
0x4fd47,
0x55ad2,
0x598f7,
0x5aa8f,
0x62aa3,
0x65725,
0x65dcb,
0x671c7,
0x6eb20,
0x752fe,
0x7594f,
0x79b9c,
0x7f775,
0x81635,
0x8401c,
0x844e5,
0x89fa8,
];
static V2: [u32; 42] = [
0x2a37,
0x7557,
0xa3c3,
0xfce6,
0x1248e,
0x15837,
0x1827f,
0x18a93,
0x1a7dd,
0x1b56b,
0x1ceb4,
0x1f962,
0x1fe2a,
0x29cb9,
0x2f30e,
0x2f771,
0x336bf,
0x34355,
0x391d7,
0x39495,
0x3be0c,
0x463be,
0x4d0c2,
0x4eead,
0x50214,
0x520de,
0x52a86,
0x53818,
0x53b3b,
0x54c0b,
0x572fa,
0x5d79c,
0x5e3c2,
0x6769e,
0x6a0fe,
0x6d835,
0x6fc7c,
0x70f03,
0x79d4a,
0x7b03e,
0x81e09,
0x9bd44,
];
static V3: [u32; 42] = [
0x8158,
0x9f18,
0xc4ba,
0x108c7,
0x11caa,
0x13b82,
0x1618f,
0x1c83b,
0x1ec89,
0x24354,
0x28864,
0x2a0fb,
0x2ce50,
0x2e8fa,
0x32b36,
0x343e6,
0x34dc9,
0x36881,
0x3ffca,
0x40f79,
0x42721,
0x43b8c,
0x44b9d,
0x47ed3,
0x4cd34,
0x5278a,
0x5ab64,
0x5b4d4,
0x5d842,
0x5fa33,
0x6464e,
0x676ee,
0x685d6,
0x69df0,
0x6a5fd,
0x6bda3,
0x72544,
0x77974,
0x7908c,
0x80e67,
0x81ef4,
0x8d882,
];
// cuckoo28 at 50% edges of letter 'u' // cuckoo28 at 50% edges of letter 'u'
static V4:[u32;42] = [0x1CBBFD, 0x2C5452, 0x520338, 0x6740C5, 0x8C6997, 0xC77150, 0xFD4972, static V4: [u32; 42] = [
0x1060FA7, 0x11BFEA0, 0x1343E8D, 0x14CE02A, 0x1533515, 0x1715E61, 0x1996D9B, 0x1CBBFD,
0x1CB296B, 0x1FCA180, 0x209A367, 0x20AD02E, 0x23CD2E4, 0x2A3B360, 0x2DD1C0C, 0x2C5452,
0x333A200, 0x33D77BC, 0x3620C78, 0x3DD7FB8, 0x3FBFA49, 0x41BDED2, 0x4A86FD9, 0x520338,
0x570DE24, 0x57CAB86, 0x594B886, 0x5C74C94, 0x5DE7572, 0x60ADD6F, 0x635918B, 0x6740C5,
0x6C9E120, 0x6EFA583, 0x7394ACA, 0x7556A23, 0x77F70AA, 0x7CF750A, 0x7F60790]; 0x8C6997,
0xC77150,
0xFD4972,
0x1060FA7,
0x11BFEA0,
0x1343E8D,
0x14CE02A,
0x1533515,
0x1715E61,
0x1996D9B,
0x1CB296B,
0x1FCA180,
0x209A367,
0x20AD02E,
0x23CD2E4,
0x2A3B360,
0x2DD1C0C,
0x333A200,
0x33D77BC,
0x3620C78,
0x3DD7FB8,
0x3FBFA49,
0x41BDED2,
0x4A86FD9,
0x570DE24,
0x57CAB86,
0x594B886,
0x5C74C94,
0x5DE7572,
0x60ADD6F,
0x635918B,
0x6C9E120,
0x6EFA583,
0x7394ACA,
0x7556A23,
0x77F70AA,
0x7CF750A,
0x7F60790,
];
/// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few /// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few
/// known cycle proofs /// known cycle proofs
@ -372,28 +519,51 @@ mod test {
#[test] #[test]
fn validate20_vectors() { fn validate20_vectors() {
assert!(Cuckoo::new(&[49], 20).verify(Proof::new(V1.to_vec().clone()), 75)); assert!(Cuckoo::new(&[49], 20).verify(
assert!(Cuckoo::new(&[50], 20).verify(Proof::new(V2.to_vec().clone()), 70)); Proof::new(V1.to_vec().clone()),
assert!(Cuckoo::new(&[51], 20).verify(Proof::new(V3.to_vec().clone()), 70)); 75,
));
assert!(Cuckoo::new(&[50], 20).verify(
Proof::new(V2.to_vec().clone()),
70,
));
assert!(Cuckoo::new(&[51], 20).verify(
Proof::new(V3.to_vec().clone()),
70,
));
} }
#[test] #[test]
fn validate28_vectors() { fn validate28_vectors() {
let mut test_header=[0;32]; let mut test_header = [0; 32];
test_header[0]=24; test_header[0] = 24;
assert!(Cuckoo::new(&test_header, 28).verify(Proof::new(V4.to_vec().clone()), 50)); assert!(Cuckoo::new(&test_header, 28).verify(
Proof::new(V4.to_vec().clone()),
50,
));
} }
#[test] #[test]
fn validate_fail() { fn validate_fail() {
// edge checks // edge checks
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75)); assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75));
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0xffff; 42]), 75)); assert!(!Cuckoo::new(&[49], 20).verify(
Proof::new(vec![0xffff; 42]),
75,
));
// wrong data for proof // wrong data for proof
assert!(!Cuckoo::new(&[50], 20).verify(Proof::new(V1.to_vec().clone()), 75)); assert!(!Cuckoo::new(&[50], 20).verify(
let mut test_header=[0;32]; Proof::new(V1.to_vec().clone()),
test_header[0]=24; 75,
assert!(!Cuckoo::new(&test_header, 20).verify(Proof::new(V4.to_vec().clone()), 50)); ));
let mut test_header = [0; 32];
test_header[0] = 24;
assert!(!Cuckoo::new(&test_header, 20).verify(
Proof::new(
V4.to_vec().clone(),
),
50,
));
} }

View file

@ -62,14 +62,14 @@ use cuckoo::{Cuckoo, Error};
/// ///
pub trait MiningWorker { pub trait MiningWorker {
/// This only sets parameters and does initialisation work now /// This only sets parameters and does initialisation work now
fn new(ease: u32, sizeshift: u32, proof_size:usize) -> Self where Self:Sized; fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self
where
Self: Sized;
/// Actually perform a mining attempt on the given input and /// Actually perform a mining attempt on the given input and
/// return a proof if found /// return a proof if found
fn mine(&mut self, header: &[u8]) -> Result<Proof, Error>; fn mine(&mut self, header: &[u8]) -> Result<Proof, Error>;
} }
/// Validates the proof of work of a given header, and that the proof of work /// Validates the proof of work of a given header, and that the proof of work
@ -85,15 +85,20 @@ pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
/// Uses the much easier Cuckoo20 (mostly for /// Uses the much easier Cuckoo20 (mostly for
/// tests). /// tests).
pub fn pow20<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader, diff: Difficulty) -> Result<(), Error> { pub fn pow20<T: MiningWorker>(
miner: &mut T,
bh: &mut BlockHeader,
diff: Difficulty,
) -> Result<(), Error> {
pow_size(miner, bh, diff, 20) pow_size(miner, bh, diff, 20)
} }
/// Mines a genesis block, using the config specified miner if specified. Otherwise, /// Mines a genesis block, using the config specified miner if specified.
/// Otherwise,
/// uses the internal miner /// uses the internal miner
/// ///
pub fn mine_genesis_block(miner_config:Option<types::MinerConfig>)->Option<core::core::Block> { pub fn mine_genesis_block(miner_config: Option<types::MinerConfig>) -> Option<core::core::Block> {
info!("Starting miner loop for Genesis Block"); info!("Starting miner loop for Genesis Block");
let mut gen = genesis::genesis(); let mut gen = genesis::genesis();
let diff = gen.header.difficulty.clone(); let diff = gen.header.difficulty.clone();
@ -101,7 +106,7 @@ pub fn mine_genesis_block(miner_config:Option<types::MinerConfig>)->Option<core:
let sz = global::sizeshift() as u32; let sz = global::sizeshift() as u32;
let proof_size = global::proofsize(); let proof_size = global::proofsize();
let mut miner:Box<MiningWorker> = match miner_config { let mut miner: Box<MiningWorker> = match miner_config {
Some(c) => { Some(c) => {
if c.use_cuckoo_miner { if c.use_cuckoo_miner {
let mut p = plugin::PluginMiner::new(consensus::EASINESS, sz, proof_size); let mut p = plugin::PluginMiner::new(consensus::EASINESS, sz, proof_size);
@ -111,17 +116,23 @@ pub fn mine_genesis_block(miner_config:Option<types::MinerConfig>)->Option<core:
} else { } else {
Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)) Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size))
} }
}, }
None => Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)), None => Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)),
}; };
pow_size(&mut *miner, &mut gen.header, diff, sz as u32).unwrap(); pow_size(&mut *miner, &mut gen.header, diff, sz as u32).unwrap();
Some(gen) Some(gen)
} }
/// Runs a proof of work computation over the provided block using the provided Mining Worker, /// Runs a proof of work computation over the provided block using the provided
/// until the required difficulty target is reached. May take a while for a low target... /// Mining Worker,
pub fn pow_size<T: MiningWorker + ?Sized>(miner:&mut T, bh: &mut BlockHeader, /// until the required difficulty target is reached. May take a while for a low
diff: Difficulty, _: u32) -> Result<(), Error> { /// target...
pub fn pow_size<T: MiningWorker + ?Sized>(
miner: &mut T,
bh: &mut BlockHeader,
diff: Difficulty,
_: u32,
) -> Result<(), Error> {
let start_nonce = bh.nonce; let start_nonce = bh.nonce;
// if we're in production mode, try the pre-mined solution first // if we're in production mode, try the pre-mined solution first
@ -175,8 +186,17 @@ mod test {
global::set_mining_mode(MiningParameterMode::AutomatedTesting); global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let mut b = genesis::genesis(); let mut b = genesis::genesis();
b.header.nonce = 310; b.header.nonce = 310;
let mut internal_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize()); let mut internal_miner = cuckoo::Miner::new(
pow_size(&mut internal_miner, &mut b.header, Difficulty::from_num(MINIMUM_DIFFICULTY), global::sizeshift() as u32).unwrap(); consensus::EASINESS,
global::sizeshift() as u32,
global::proofsize(),
);
pow_size(
&mut internal_miner,
&mut b.header,
Difficulty::from_num(MINIMUM_DIFFICULTY),
global::sizeshift() as u32,
).unwrap();
assert!(b.header.nonce != 310); assert!(b.header.nonce != 310);
assert!(b.header.pow.clone().to_difficulty() >= Difficulty::from_num(MINIMUM_DIFFICULTY)); assert!(b.header.pow.clone().to_difficulty() >= Difficulty::from_num(MINIMUM_DIFFICULTY));
assert!(verify_size(&b.header, global::sizeshift() as u32)); assert!(verify_size(&b.header, global::sizeshift() as u32));

View file

@ -30,7 +30,7 @@ use types::MinerConfig;
use std::sync::Mutex; use std::sync::Mutex;
use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution, use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution,
CuckooMinerDeviceStats, CuckooMinerError}; CuckooMinerDeviceStats, CuckooMinerError};
// For now, we're just going to keep a static reference around to the loaded // For now, we're just going to keep a static reference around to the loaded
// config // config
@ -112,7 +112,7 @@ impl PluginMiner {
let sz = global::sizeshift(); let sz = global::sizeshift();
let mut cuckoo_configs = Vec::new(); let mut cuckoo_configs = Vec::new();
let mut index=0; let mut index = 0;
for f in plugin_vec_filters { for f in plugin_vec_filters {
// So this is built dynamically based on the plugin implementation // So this is built dynamically based on the plugin implementation
// type and the consensus sizeshift // type and the consensus sizeshift
@ -126,12 +126,12 @@ impl PluginMiner {
info!("Mining plugin {} - {}", index, caps[0].full_path.clone()); info!("Mining plugin {} - {}", index, caps[0].full_path.clone());
config.plugin_full_path = caps[0].full_path.clone(); config.plugin_full_path = caps[0].full_path.clone();
if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config { if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config {
if let Some(lp) = l[index].parameter_list.clone(){ if let Some(lp) = l[index].parameter_list.clone() {
config.parameter_list = lp.clone(); config.parameter_list = lp.clone();
} }
} }
cuckoo_configs.push(config); cuckoo_configs.push(config);
index+=1; index += 1;
} }
// Store this config now, because we just want one instance // Store this config now, because we just want one instance
// of the plugin lib per invocation now // of the plugin lib per invocation now
@ -141,7 +141,7 @@ impl PluginMiner {
let result = CuckooMiner::new(cuckoo_configs.clone()); let result = CuckooMiner::new(cuckoo_configs.clone());
if let Err(e) = result { if let Err(e) = result {
error!("Error initializing mining plugin: {:?}", e); error!("Error initializing mining plugin: {:?}", e);
//error!("Accepted values are: {:?}", caps[0].parameters); // error!("Accepted values are: {:?}", caps[0].parameters);
panic!("Unable to init mining plugin."); panic!("Unable to init mining plugin.");
} }
@ -167,7 +167,7 @@ impl PluginMiner {
} }
/// Get stats /// Get stats
pub fn get_stats(&self, index:usize) -> Result<Vec<CuckooMinerDeviceStats>, CuckooMinerError> { pub fn get_stats(&self, index: usize) -> Result<Vec<CuckooMinerDeviceStats>, CuckooMinerError> {
self.miner.as_ref().unwrap().get_stats(index) self.miner.as_ref().unwrap().get_stats(index)
} }
} }
@ -185,7 +185,7 @@ impl MiningWorker for PluginMiner {
/// And simply calls the mine function of the loaded plugin /// And simply calls the mine function of the loaded plugin
/// returning whether a solution was found and the solution itself /// returning whether a solution was found and the solution itself
fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error>{ fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error> {
let result = self.miner let result = self.miner
.as_mut() .as_mut()
.unwrap() .unwrap()

View file

@ -19,18 +19,18 @@ use std::collections::HashMap;
/// CuckooMinerPlugin configuration /// CuckooMinerPlugin configuration
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CuckooMinerPluginConfig { pub struct CuckooMinerPluginConfig {
///The type of plugin to load (i.e. filters on filename) /// The type of plugin to load (i.e. filters on filename)
pub type_filter : String, pub type_filter: String,
///Parameters for this plugin /// Parameters for this plugin
pub parameter_list : Option<HashMap<String, u32>>, pub parameter_list: Option<HashMap<String, u32>>,
} }
impl Default for CuckooMinerPluginConfig { impl Default for CuckooMinerPluginConfig {
fn default() -> CuckooMinerPluginConfig { fn default() -> CuckooMinerPluginConfig {
CuckooMinerPluginConfig { CuckooMinerPluginConfig {
type_filter : String::new(), type_filter: String::new(),
parameter_list : None, parameter_list: None,
} }
} }
} }

View file

@ -374,10 +374,10 @@ fn wallet_command(wallet_args: &ArgMatches) {
dest = d; dest = d;
} }
wallet::issue_send_tx(&wallet_config, &key, amount, dest.to_string()).unwrap(); wallet::issue_send_tx(&wallet_config, &key, amount, dest.to_string()).unwrap();
}, }
("info", Some(_)) => { ("info", Some(_)) => {
wallet::show_info(&wallet_config, &key); wallet::show_info(&wallet_config, &key);
}, }
_ => panic!("Unknown wallet command, use 'grin help wallet' for details"), _ => panic!("Unknown wallet command, use 'grin help wallet' for details"),
} }
} }

View file

@ -188,7 +188,11 @@ impl RemoveLog {
if last_offs == 0 { if last_offs == 0 {
self.removed = vec![]; self.removed = vec![];
} else { } else {
self.removed = self.removed.iter().filter(|&&(_, idx)| { idx < last_offs }).map(|x| *x).collect(); self.removed = self.removed
.iter()
.filter(|&&(_, idx)| idx < last_offs)
.map(|x| *x)
.collect();
} }
Ok(()) Ok(())
} }
@ -230,8 +234,7 @@ impl RemoveLog {
/// Whether the remove log currently includes the provided position. /// Whether the remove log currently includes the provided position.
fn includes(&self, elmt: u64) -> bool { fn includes(&self, elmt: u64) -> bool {
include_tuple(&self.removed, elmt) || include_tuple(&self.removed, elmt) || include_tuple(&self.removed_tmp, elmt)
include_tuple(&self.removed_tmp, elmt)
} }
/// Number of positions stored in the remove log. /// Number of positions stored in the remove log.
@ -305,7 +308,7 @@ where
// Third, check if it's in the pruned list or its offset // Third, check if it's in the pruned list or its offset
let shift = self.pruned_nodes.get_shift(position); let shift = self.pruned_nodes.get_shift(position);
if let None = shift { if let None = shift {
return None return None;
} }
// The MMR starts at 1, our binary backend starts at 0 // The MMR starts at 1, our binary backend starts at 0
@ -329,7 +332,9 @@ where
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> { fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
assert!(self.buffer.len() == 0, "Rewind on non empty buffer."); assert!(self.buffer.len() == 0, "Rewind on non empty buffer.");
self.remove_log.truncate(index).map_err(|e| format!("Could not truncate remove log: {}", e))?; self.remove_log.truncate(index).map_err(|e| {
format!("Could not truncate remove log: {}", e)
})?;
self.rewind = Some((position, index, self.buffer_index)); self.rewind = Some((position, index, self.buffer_index));
self.buffer_index = position as usize; self.buffer_index = position as usize;
Ok(()) Ok(())
@ -340,7 +345,9 @@ where
if self.buffer.used_size() > 0 { if self.buffer.used_size() > 0 {
for position in &positions { for position in &positions {
let pos_sz = *position as usize; let pos_sz = *position as usize;
if pos_sz > self.buffer_index && pos_sz - 1 < self.buffer_index + self.buffer.len() { if pos_sz > self.buffer_index &&
pos_sz - 1 < self.buffer_index + self.buffer.len()
{
self.buffer.remove(vec![*position], index).unwrap(); self.buffer.remove(vec![*position], index).unwrap();
} }
} }
@ -370,7 +377,7 @@ where
remove_log: rm_log, remove_log: rm_log,
buffer: VecBackend::new(), buffer: VecBackend::new(),
buffer_index: (sz as usize) / record_len, buffer_index: (sz as usize) / record_len,
pruned_nodes: pmmr::PruneList{pruned_nodes: prune_list}, pruned_nodes: pmmr::PruneList { pruned_nodes: prune_list },
rewind: None, rewind: None,
}) })
} }
@ -398,7 +405,10 @@ where
if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) { if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::Interrupted, io::ErrorKind::Interrupted,
format!("Could not write to log storage, disk full? {:?}", e) format!(
"Could not write to log storage, disk full? {:?}",
e
),
)); ));
} }
} }
@ -431,12 +441,14 @@ where
/// to decide whether the remove log has reached its maximum length, /// to decide whether the remove log has reached its maximum length,
/// otherwise the RM_LOG_MAX_NODES default value is used. /// otherwise the RM_LOG_MAX_NODES default value is used.
/// ///
/// TODO whatever is calling this should also clean up the commit to position /// TODO whatever is calling this should also clean up the commit to
/// position
/// index in db /// index in db
pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> { pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> {
if !(max_len > 0 && self.remove_log.len() > max_len || if !(max_len > 0 && self.remove_log.len() > max_len ||
max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES) { max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES)
return Ok(()) {
return Ok(());
} }
// 0. validate none of the nodes in the rm log are in the prune list (to // 0. validate none of the nodes in the rm log are in the prune list (to
@ -444,8 +456,10 @@ where
for pos in &self.remove_log.removed[..] { for pos in &self.remove_log.removed[..] {
if let None = self.pruned_nodes.pruned_pos(pos.0) { if let None = self.pruned_nodes.pruned_pos(pos.0) {
// TODO we likely can recover from this by directly jumping to 3 // TODO we likely can recover from this by directly jumping to 3
error!("The remove log contains nodes that are already in the pruned \ error!(
list, a previous compaction likely failed."); "The remove log contains nodes that are already in the pruned \
list, a previous compaction likely failed."
);
return Ok(()); return Ok(());
} }
} }
@ -454,20 +468,34 @@ where
// remove list // remove list
let tmp_prune_file = format!("{}/{}.prune", self.data_dir, PMMR_DATA_FILE); let tmp_prune_file = format!("{}/{}.prune", self.data_dir, PMMR_DATA_FILE);
let record_len = (32 + T::sum_len()) as u64; let record_len = (32 + T::sum_len()) as u64;
let to_rm = self.remove_log.removed.iter().map(|&(pos, _)| { let to_rm = self.remove_log
.removed
.iter()
.map(|&(pos, _)| {
let shift = self.pruned_nodes.get_shift(pos); let shift = self.pruned_nodes.get_shift(pos);
(pos - 1 - shift.unwrap()) * record_len (pos - 1 - shift.unwrap()) * record_len
}).collect(); })
self.hashsum_file.save_prune(tmp_prune_file.clone(), to_rm, record_len)?; .collect();
self.hashsum_file.save_prune(
tmp_prune_file.clone(),
to_rm,
record_len,
)?;
// 2. update the prune list and save it in place // 2. update the prune list and save it in place
for &(rm_pos, _) in &self.remove_log.removed[..] { for &(rm_pos, _) in &self.remove_log.removed[..] {
self.pruned_nodes.add(rm_pos); self.pruned_nodes.add(rm_pos);
} }
write_vec(format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE), &self.pruned_nodes.pruned_nodes)?; write_vec(
format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE),
&self.pruned_nodes.pruned_nodes,
)?;
// 3. move the compact copy to the hashsum file and re-open it // 3. move the compact copy to the hashsum file and re-open it
fs::rename(tmp_prune_file.clone(), format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?; fs::rename(
tmp_prune_file.clone(),
format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
)?;
self.hashsum_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?; self.hashsum_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?;
self.hashsum_file.sync()?; self.hashsum_file.sync()?;
@ -481,7 +509,9 @@ where
// Read an ordered vector of scalars from a file. // Read an ordered vector of scalars from a file.
fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>> fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
where T: ser::Readable + cmp::Ord { where
T: ser::Readable + cmp::Ord,
{
let file_path = Path::new(&path); let file_path = Path::new(&path);
let mut ovec = Vec::with_capacity(1000); let mut ovec = Vec::with_capacity(1000);
@ -506,7 +536,10 @@ fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
Err(_) => { Err(_) => {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::InvalidData, io::ErrorKind::InvalidData,
format!("Corrupted storage, could not read file at {}", path), format!(
"Corrupted storage, could not read file at {}",
path
),
)); ));
} }
} }
@ -519,13 +552,16 @@ fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
} }
fn write_vec<T>(path: String, v: &Vec<T>) -> io::Result<()> fn write_vec<T>(path: String, v: &Vec<T>) -> io::Result<()>
where T: ser::Writeable { where
T: ser::Writeable,
{
let mut file_path = File::create(&path)?; let mut file_path = File::create(&path)?;
ser::serialize(&mut file_path, v).map_err(|_| { ser::serialize(&mut file_path, v).map_err(|_| {
io::Error::new( io::Error::new(
io::ErrorKind::InvalidInput, io::ErrorKind::InvalidInput,
format!("Failed to serialize data when writing to {}", path)) format!("Failed to serialize data when writing to {}", path),
)
})?; })?;
Ok(()) Ok(())
} }

View file

@ -164,8 +164,7 @@ fn setup() -> (String, Vec<TestElem>) {
(data_dir, elems) (data_dir, elems)
} }
fn load(pos: u64, elems: &[TestElem], fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 {
backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 {
let mut pmmr = PMMR::at(backend, pos); let mut pmmr = PMMR::at(backend, pos);
for elem in elems { for elem in elems {

View file

@ -36,13 +36,16 @@ pub fn from_hex(hex_str: String) -> Result<Vec<u8>, num::ParseIntError> {
} else { } else {
hex_str.clone() hex_str.clone()
}; };
split_n(&hex_trim.trim()[..], 2).iter() split_n(&hex_trim.trim()[..], 2)
.iter()
.map(|b| u8::from_str_radix(b, 16)) .map(|b| u8::from_str_radix(b, 16))
.collect::<Result<Vec<u8>, _>>() .collect::<Result<Vec<u8>, _>>()
} }
fn split_n(s: &str, n: usize) -> Vec<&str> { fn split_n(s: &str, n: usize) -> Vec<&str> {
(0 .. (s.len() - n + 1)/2 + 1).map(|i| &s[2*i .. 2*i + n]).collect() (0..(s.len() - n + 1) / 2 + 1)
.map(|i| &s[2 * i..2 * i + n])
.collect()
} }
#[cfg(test)] #[cfg(test)]
@ -59,7 +62,13 @@ mod test {
#[test] #[test]
fn test_from_hex() { fn test_from_hex() {
assert_eq!(from_hex("00000000".to_string()).unwrap(), vec![0, 0, 0, 0]); assert_eq!(from_hex("00000000".to_string()).unwrap(), vec![0, 0, 0, 0]);
assert_eq!(from_hex("0a0b0c0d".to_string()).unwrap(), vec![10, 11, 12, 13]); assert_eq!(
assert_eq!(from_hex("000000ff".to_string()).unwrap(), vec![0, 0, 0, 255]); from_hex("0a0b0c0d".to_string()).unwrap(),
vec![10, 11, 12, 13]
);
assert_eq!(
from_hex("000000ff".to_string()).unwrap(),
vec![0, 0, 0, 255]
);
} }
} }

View file

@ -22,11 +22,7 @@ use types::*;
use util; use util;
fn refresh_output( fn refresh_output(out: &mut OutputData, api_out: Option<api::Output>, tip: &api::Tip) {
out: &mut OutputData,
api_out: Option<api::Output>,
tip: &api::Tip,
) {
if let Some(api_out) = api_out { if let Some(api_out) = api_out {
out.height = api_out.height; out.height = api_out.height;
out.lock_height = api_out.lock_height; out.lock_height = api_out.lock_height;
@ -38,25 +34,23 @@ fn refresh_output(
} else { } else {
out.status = OutputStatus::Unspent; out.status = OutputStatus::Unspent;
} }
} else if vec![ } else if vec![OutputStatus::Unspent, OutputStatus::Locked].contains(&out.status) {
OutputStatus::Unspent,
OutputStatus::Locked
].contains(&out.status) {
out.status = OutputStatus::Spent; out.status = OutputStatus::Spent;
} }
} }
/// Goes through the list of outputs that haven't been spent yet and check /// Goes through the list of outputs that haven't been spent yet and check
/// with a node whether their status has changed. /// with a node whether their status has changed.
pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(), Error>{ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(), Error> {
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
let tip = get_tip(config)?; let tip = get_tip(config)?;
WalletData::with_wallet(&config.data_file_dir, |wallet_data| { WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
// check each output that's not spent // check each output that's not spent
for mut out in wallet_data.outputs for mut out in wallet_data.outputs.iter_mut().filter(|out| {
.iter_mut() out.status != OutputStatus::Spent
.filter(|out| out.status != OutputStatus::Spent) { })
{
// figure out the commitment // figure out the commitment
// TODO check the pool for unconfirmed // TODO check the pool for unconfirmed
@ -66,8 +60,9 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(
match get_output_by_commitment(config, commitment) { match get_output_by_commitment(config, commitment) {
Ok(api_out) => refresh_output(&mut out, api_out, &tip), Ok(api_out) => refresh_output(&mut out, api_out, &tip),
Err(_) => { Err(_) => {
//TODO find error with connection and return // TODO find error with connection and return
//error!("Error contacting server node at {}. Is it running?", config.check_node_api_http_addr); // error!("Error contacting server node at {}. Is it running?",
// config.check_node_api_http_addr);
} }
} }
} }
@ -76,14 +71,14 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(
fn get_tip(config: &WalletConfig) -> Result<api::Tip, Error> { fn get_tip(config: &WalletConfig) -> Result<api::Tip, Error> {
let url = format!("{}/v1/chain/1", config.check_node_api_http_addr); let url = format!("{}/v1/chain/1", config.check_node_api_http_addr);
api::client::get::<api::Tip>(url.as_str()) api::client::get::<api::Tip>(url.as_str()).map_err(|e| Error::Node(e))
.map_err(|e| Error::Node(e))
} }
// queries a reachable node for a given output, checking whether it's been confirmed // queries a reachable node for a given output, checking whether it's been
// confirmed
fn get_output_by_commitment( fn get_output_by_commitment(
config: &WalletConfig, config: &WalletConfig,
commit: pedersen::Commitment commit: pedersen::Commitment,
) -> Result<Option<api::Output>, Error> { ) -> Result<Option<api::Output>, Error> {
let url = format!( let url = format!(
"{}/v1/chain/utxo/{}", "{}/v1/chain/utxo/{}",

View file

@ -207,8 +207,9 @@ impl ExtendedKey {
let mut secret_key = SecretKey::from_slice(&secp, &derived.as_bytes()[0..32]) let mut secret_key = SecretKey::from_slice(&secp, &derived.as_bytes()[0..32])
.expect("Error deriving key"); .expect("Error deriving key");
secret_key.add_assign(secp, &self.key) secret_key.add_assign(secp, &self.key).expect(
.expect("Error deriving key"); "Error deriving key",
);
// TODO check if key != 0 ? // TODO check if key != 0 ?
let mut chain_code: [u8; 32] = [0; 32]; let mut chain_code: [u8; 32] = [0; 32];
@ -241,18 +242,26 @@ mod test {
let s = Secp256k1::new(); let s = Secp256k1::new();
let seed = from_hex("000102030405060708090a0b0c0d0e0f"); let seed = from_hex("000102030405060708090a0b0c0d0e0f");
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap(); let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
let sec = let sec = from_hex(
from_hex("c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd"); "c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd",
);
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap(); let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
let chaincode = let chaincode = from_hex(
from_hex("e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72"); "e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72",
);
let identifier = from_hex("942b6c0bd43bdcb24f3edfe7fadbc77054ecc4f2"); let identifier = from_hex("942b6c0bd43bdcb24f3edfe7fadbc77054ecc4f2");
let fingerprint = from_hex("942b6c0b"); let fingerprint = from_hex("942b6c0b");
let depth = 0; let depth = 0;
let n_child = 0; let n_child = 0;
assert_eq!(extk.key, secret_key); assert_eq!(extk.key, secret_key);
assert_eq!(extk.identifier(), Identifier::from_bytes(identifier.as_slice())); assert_eq!(
assert_eq!(extk.fingerprint, Fingerprint::from_bytes(fingerprint.as_slice())); extk.identifier(),
Identifier::from_bytes(identifier.as_slice())
);
assert_eq!(
extk.fingerprint,
Fingerprint::from_bytes(fingerprint.as_slice())
);
assert_eq!( assert_eq!(
extk.identifier().fingerprint(), extk.identifier().fingerprint(),
Fingerprint::from_bytes(fingerprint.as_slice()) Fingerprint::from_bytes(fingerprint.as_slice())
@ -269,19 +278,27 @@ mod test {
let seed = from_hex("000102030405060708090a0b0c0d0e0f"); let seed = from_hex("000102030405060708090a0b0c0d0e0f");
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap(); let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
let derived = extk.derive(&s, 0).unwrap(); let derived = extk.derive(&s, 0).unwrap();
let sec = let sec = from_hex(
from_hex("d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f"); "d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f",
);
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap(); let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
let chaincode = let chaincode = from_hex(
from_hex("243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52"); "243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52",
);
let fingerprint = from_hex("942b6c0b"); let fingerprint = from_hex("942b6c0b");
let identifier = from_hex("8b011f14345f3f0071e85f6eec116de1e575ea10"); let identifier = from_hex("8b011f14345f3f0071e85f6eec116de1e575ea10");
let identifier_fingerprint = from_hex("8b011f14"); let identifier_fingerprint = from_hex("8b011f14");
let depth = 1; let depth = 1;
let n_child = 0; let n_child = 0;
assert_eq!(derived.key, secret_key); assert_eq!(derived.key, secret_key);
assert_eq!(derived.identifier(), Identifier::from_bytes(identifier.as_slice())); assert_eq!(
assert_eq!(derived.fingerprint, Fingerprint::from_bytes(fingerprint.as_slice())); derived.identifier(),
Identifier::from_bytes(identifier.as_slice())
);
assert_eq!(
derived.fingerprint,
Fingerprint::from_bytes(fingerprint.as_slice())
);
assert_eq!( assert_eq!(
derived.identifier().fingerprint(), derived.identifier().fingerprint(),
Fingerprint::from_bytes(identifier_fingerprint.as_slice()) Fingerprint::from_bytes(identifier_fingerprint.as_slice())

View file

@ -27,9 +27,10 @@ pub fn show_info(config: &WalletConfig, ext_key: &ExtendedKey) {
println!("Outputs - "); println!("Outputs - ");
println!("fingerprint, n_child, height, lock_height, status, value"); println!("fingerprint, n_child, height, lock_height, status, value");
println!("----------------------------------"); println!("----------------------------------");
for out in &mut wallet_data.outputs for out in &mut wallet_data.outputs.iter().filter(|o| {
.iter() o.fingerprint == ext_key.fingerprint
.filter(|o| o.fingerprint == ext_key.fingerprint ) { })
{
let key = ext_key.derive(&secp, out.n_child).unwrap(); let key = ext_key.derive(&secp, out.n_child).unwrap();
println!( println!(

View file

@ -50,7 +50,7 @@
//! So we may as well have it in place already. //! So we may as well have it in place already.
use std::convert::From; use std::convert::From;
use secp::{self}; use secp;
use secp::key::SecretKey; use secp::key::SecretKey;
use core::core::{Block, Transaction, TxKernel, Output, build}; use core::core::{Block, Transaction, TxKernel, Output, build};
@ -72,16 +72,15 @@ struct TxWrapper {
pub fn receive_json_tx( pub fn receive_json_tx(
config: &WalletConfig, config: &WalletConfig,
ext_key: &ExtendedKey, ext_key: &ExtendedKey,
partial_tx_str: &str partial_tx_str: &str,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (amount, blinding, partial_tx) = partial_tx_from_json(partial_tx_str)?; let (amount, blinding, partial_tx) = partial_tx_from_json(partial_tx_str)?;
let final_tx = receive_transaction(&config, ext_key, amount, blinding, partial_tx)?; let final_tx = receive_transaction(&config, ext_key, amount, blinding, partial_tx)?;
let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap()); let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap());
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str()); let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).map_err(|e| { let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })
Error::Node(e) .map_err(|e| Error::Node(e))?;
})?;
Ok(()) Ok(())
} }
@ -102,7 +101,7 @@ impl ApiEndpoint for WalletReceiver {
fn operations(&self) -> Vec<Operation> { fn operations(&self) -> Vec<Operation> {
vec![ vec![
Operation::Custom("coinbase".to_string()), Operation::Custom("coinbase".to_string()),
Operation::Custom("receive_json_tx".to_string()) Operation::Custom("receive_json_tx".to_string()),
] ]
} }
@ -115,16 +114,17 @@ impl ApiEndpoint for WalletReceiver {
if cb_amount.amount == 0 { if cb_amount.amount == 0 {
return Err(api::Error::Argument(format!("Zero amount not allowed."))); return Err(api::Error::Argument(format!("Zero amount not allowed.")));
} }
let (out, kern) = let (out, kern) = receive_coinbase(
receive_coinbase(&self.config, &self.key, cb_amount.amount).map_err(|e| { &self.config,
&self.key,
cb_amount.amount,
).map_err(|e| {
api::Error::Internal(format!("Error building coinbase: {:?}", e)) api::Error::Internal(format!("Error building coinbase: {:?}", e))
})?; })?;
let out_bin = let out_bin = ser::ser_vec(&out).map_err(|e| {
ser::ser_vec(&out).map_err(|e| {
api::Error::Internal(format!("Error serializing output: {:?}", e)) api::Error::Internal(format!("Error serializing output: {:?}", e))
})?; })?;
let kern_bin = let kern_bin = ser::ser_vec(&kern).map_err(|e| {
ser::ser_vec(&kern).map_err(|e| {
api::Error::Internal(format!("Error serializing kernel: {:?}", e)) api::Error::Internal(format!("Error serializing kernel: {:?}", e))
})?; })?;
Ok(CbData { Ok(CbData {
@ -132,24 +132,32 @@ impl ApiEndpoint for WalletReceiver {
kernel: util::to_hex(kern_bin), kernel: util::to_hex(kern_bin),
}) })
} }
_ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))), _ => Err(api::Error::Argument(
format!("Incorrect request data: {}", op),
)),
} }
} }
"receive_json_tx" => { "receive_json_tx" => {
match input { match input {
WalletReceiveRequest::PartialTransaction(partial_tx_str) => { WalletReceiveRequest::PartialTransaction(partial_tx_str) => {
debug!("Operation {} with transaction {}", op, &partial_tx_str); debug!("Operation {} with transaction {}", op, &partial_tx_str);
receive_json_tx(&self.config, &self.key, &partial_tx_str).map_err(|e| { receive_json_tx(&self.config, &self.key, &partial_tx_str)
api::Error::Internal(format!("Error processing partial transaction: {:?}", e)) .map_err(|e| {
}).unwrap(); api::Error::Internal(
format!("Error processing partial transaction: {:?}", e),
)
})
.unwrap();
//TODO: Return emptiness for now, should be a proper enum return type // TODO: Return emptiness for now, should be a proper enum return type
Ok(CbData { Ok(CbData {
output: String::from(""), output: String::from(""),
kernel: String::from(""), kernel: String::from(""),
}) })
} }
_ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))), _ => Err(api::Error::Argument(
format!("Incorrect request data: {}", op),
)),
} }
} }
_ => Err(api::Error::Argument(format!("Unknown operation: {}", op))), _ => Err(api::Error::Argument(format!("Unknown operation: {}", op))),
@ -158,7 +166,11 @@ impl ApiEndpoint for WalletReceiver {
} }
/// Build a coinbase output and the corresponding kernel /// Build a coinbase output and the corresponding kernel
fn receive_coinbase(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> Result<(Output, TxKernel), Error> { fn receive_coinbase(
config: &WalletConfig,
ext_key: &ExtendedKey,
amount: u64,
) -> Result<(Output, TxKernel), Error> {
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
// operate within a lock on wallet data // operate within a lock on wallet data
@ -177,20 +189,23 @@ fn receive_coinbase(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -
height: 0, height: 0,
lock_height: 0, lock_height: 0,
}); });
debug!("Using child {} for a new coinbase output.", debug!(
coinbase_key.n_child); "Using child {} for a new coinbase output.",
coinbase_key.n_child
);
Block::reward_output(coinbase_key.key, &secp).map_err(&From::from) Block::reward_output(coinbase_key.key, &secp).map_err(&From::from)
})? })?
} }
/// Builds a full transaction from the partial one sent to us for transfer /// Builds a full transaction from the partial one sent to us for transfer
fn receive_transaction(config: &WalletConfig, fn receive_transaction(
config: &WalletConfig,
ext_key: &ExtendedKey, ext_key: &ExtendedKey,
amount: u64, amount: u64,
blinding: SecretKey, blinding: SecretKey,
partial: Transaction) partial: Transaction,
-> Result<Transaction, Error> { ) -> Result<Transaction, Error> {
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
@ -200,9 +215,11 @@ fn receive_transaction(config: &WalletConfig,
let next_child = wallet_data.next_child(&ext_key.fingerprint); let next_child = wallet_data.next_child(&ext_key.fingerprint);
let out_key = ext_key.derive(&secp, next_child).map_err(|e| Error::Key(e))?; let out_key = ext_key.derive(&secp, next_child).map_err(|e| Error::Key(e))?;
let (tx_final, _) = build::transaction(vec![build::initial_tx(partial), let (tx_final, _) = build::transaction(vec![
build::initial_tx(partial),
build::with_excess(blinding), build::with_excess(blinding),
build::output(amount, out_key.key)])?; build::output(amount, out_key.key),
])?;
// make sure the resulting transaction is valid (could have been lied to // make sure the resulting transaction is valid (could have been lied to
// on excess) // on excess)
@ -218,8 +235,10 @@ fn receive_transaction(config: &WalletConfig,
lock_height: 0, lock_height: 0,
}); });
debug!("Using child {} for a new transaction output.", debug!(
out_key.n_child); "Using child {} for a new transaction output.",
out_key.n_child
);
Ok(tx_final) Ok(tx_final)
})? })?

View file

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
use std::convert::From; use std::convert::From;
use secp::{self}; use secp;
use secp::key::SecretKey; use secp::key::SecretKey;
use checker; use checker;
@ -27,7 +27,12 @@ use api;
/// wallet /// wallet
/// UTXOs. The destination can be "stdout" (for command line) or a URL to the /// UTXOs. The destination can be "stdout" (for command line) or a URL to the
/// recipients wallet receiver (to be implemented). /// recipients wallet receiver (to be implemented).
pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64, dest: String) -> Result<(), Error> { pub fn issue_send_tx(
config: &WalletConfig,
ext_key: &ExtendedKey,
amount: u64,
dest: String,
) -> Result<(), Error> {
let _ = checker::refresh_outputs(&config, ext_key); let _ = checker::refresh_outputs(&config, ext_key);
let (tx, blind_sum) = build_send_tx(config, ext_key, amount)?; let (tx, blind_sum) = build_send_tx(config, ext_key, amount)?;
@ -39,8 +44,10 @@ pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64,
let url = format!("{}/v1/receive/receive_json_tx", &dest); let url = format!("{}/v1/receive/receive_json_tx", &dest);
debug!("Posting partial transaction to {}", url); debug!("Posting partial transaction to {}", url);
let request = WalletReceiveRequest::PartialTransaction(json_tx); let request = WalletReceiveRequest::PartialTransaction(json_tx);
let _: CbData = api::client::post(url.as_str(), &request) let _: CbData = api::client::post(url.as_str(), &request).expect(&format!(
.expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url)); "Wallet receiver at {} unreachable, could not send transaction. Is it running?",
url
));
} }
Ok(()) Ok(())
} }
@ -48,7 +55,11 @@ pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64,
/// Builds a transaction to send to someone from the HD seed associated with the /// Builds a transaction to send to someone from the HD seed associated with the
/// wallet and the amount to send. Handles reading through the wallet data file, /// wallet and the amount to send. Handles reading through the wallet data file,
/// selecting outputs to spend and building the change. /// selecting outputs to spend and building the change.
fn build_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> Result<(Transaction, SecretKey), Error> { fn build_send_tx(
config: &WalletConfig,
ext_key: &ExtendedKey,
amount: u64,
) -> Result<(Transaction, SecretKey), Error> {
// first, rebuild the private key from the seed // first, rebuild the private key from the seed
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
@ -66,7 +77,9 @@ fn build_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> R
// third, build inputs using the appropriate key // third, build inputs using the appropriate key
let mut parts = vec![]; let mut parts = vec![];
for coin in &coins { for coin in &coins {
let in_key = ext_key.derive(&secp, coin.n_child).map_err(|e| Error::Key(e))?; let in_key = ext_key.derive(&secp, coin.n_child).map_err(
|e| Error::Key(e),
)?;
parts.push(build::input(coin.value, in_key.key)); parts.push(build::input(coin.value, in_key.key));
} }

View file

@ -79,14 +79,14 @@ impl From<api::Error> for Error {
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
pub struct WalletConfig { pub struct WalletConfig {
//Whether to run a wallet // Whether to run a wallet
pub enable_wallet: bool, pub enable_wallet: bool,
//The api address that this api server (i.e. this wallet) will run // The api address that this api server (i.e. this wallet) will run
pub api_http_addr: String, pub api_http_addr: String,
//The api address of a running server node, against which transaction inputs will be checked // The api address of a running server node, against which transaction inputs will be checked
//during send // during send
pub check_node_api_http_addr: String, pub check_node_api_http_addr: String,
//The directory in which wallet files are stored // The directory in which wallet files are stored
pub data_file_dir: String, pub data_file_dir: String,
} }
@ -171,10 +171,11 @@ impl WalletData {
/// Note that due to the impossibility to do an actual file lock easily /// Note that due to the impossibility to do an actual file lock easily
/// across operating systems, this just creates a lock file with a "should /// across operating systems, this just creates a lock file with a "should
/// not exist" option. /// not exist" option.
pub fn with_wallet<T, F>(data_file_dir:&str, f: F) -> Result<T, Error> pub fn with_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error>
where F: FnOnce(&mut WalletData) -> T where
F: FnOnce(&mut WalletData) -> T,
{ {
//create directory if it doesn't exist // create directory if it doesn't exist
fs::create_dir_all(data_file_dir).unwrap_or_else(|why| { fs::create_dir_all(data_file_dir).unwrap_or_else(|why| {
info!("! {:?}", why.kind()); info!("! {:?}", why.kind());
}); });
@ -191,16 +192,23 @@ impl WalletData {
.create_new(true) .create_new(true)
.open(lock_file_path) .open(lock_file_path)
.map_err(|_| { .map_err(|_| {
Error::WalletData(format!("Could not create wallet lock file. Either \ Error::WalletData(format!(
some other process is using the wallet or there's a write access issue.")) "Could not create wallet lock file. Either \
some other process is using the wallet or there's a write access issue."
))
}); });
match result { match result {
Ok(_) => { break; }, Ok(_) => {
break;
}
Err(e) => { Err(e) => {
if retries >= 3 { if retries >= 3 {
return Err(e); return Err(e);
} }
debug!("failed to obtain wallet.lock, retries - {}, sleeping", retries); debug!(
"failed to obtain wallet.lock, retries - {}, sleeping",
retries
);
retries += 1; retries += 1;
thread::sleep(time::Duration::from_millis(500)); thread::sleep(time::Duration::from_millis(500));
} }
@ -215,16 +223,16 @@ impl WalletData {
// delete the lock file // delete the lock file
fs::remove_file(lock_file_path).map_err(|_| { fs::remove_file(lock_file_path).map_err(|_| {
Error::WalletData( Error::WalletData(format!(
format!("Could not remove wallet lock file. Maybe insufficient rights?") "Could not remove wallet lock file. Maybe insufficient rights?"
) ))
})?; })?;
Ok(res) Ok(res)
} }
/// Read the wallet data or created a brand new one if it doesn't exist yet /// Read the wallet data or created a brand new one if it doesn't exist yet
fn read_or_create(data_file_path:&str) -> Result<WalletData, Error> { fn read_or_create(data_file_path: &str) -> Result<WalletData, Error> {
if Path::new(data_file_path).exists() { if Path::new(data_file_path).exists() {
WalletData::read(data_file_path) WalletData::read(data_file_path)
} else { } else {
@ -234,7 +242,7 @@ impl WalletData {
} }
/// Read the wallet data from disk. /// Read the wallet data from disk.
fn read(data_file_path:&str) -> Result<WalletData, Error> { fn read(data_file_path: &str) -> Result<WalletData, Error> {
let data_file = File::open(data_file_path).map_err(|e| { let data_file = File::open(data_file_path).map_err(|e| {
Error::WalletData(format!("Could not open {}: {}", data_file_path, e)) Error::WalletData(format!("Could not open {}: {}", data_file_path, e))
})?; })?;
@ -244,7 +252,7 @@ impl WalletData {
} }
/// Write the wallet data to disk. /// Write the wallet data to disk.
fn write(&self, data_file_path:&str) -> Result<(), Error> { fn write(&self, data_file_path: &str) -> Result<(), Error> {
let mut data_file = File::create(data_file_path).map_err(|e| { let mut data_file = File::create(data_file_path).map_err(|e| {
Error::WalletData(format!("Could not create {}: {}", data_file_path, e)) Error::WalletData(format!("Could not create {}: {}", data_file_path, e))
})?; })?;
@ -262,11 +270,12 @@ impl WalletData {
} }
pub fn lock_output(&mut self, out: &OutputData) { pub fn lock_output(&mut self, out: &OutputData) {
if let Some(out_to_lock) = self.outputs.iter_mut().find(|out_to_lock| { if let Some(out_to_lock) =
out_to_lock.n_child == out.n_child && self.outputs.iter_mut().find(|out_to_lock| {
out_to_lock.fingerprint == out.fingerprint && out_to_lock.n_child == out.n_child && out_to_lock.fingerprint == out.fingerprint &&
out_to_lock.value == out.value out_to_lock.value == out.value
}) { })
{
out_to_lock.lock(); out_to_lock.lock();
} }
} }
@ -333,7 +342,9 @@ pub fn partial_tx_from_json(json_str: &str) -> Result<(u64, SecretKey, Transacti
let blinding = SecretKey::from_slice(&secp, &blind_bin[..])?; let blinding = SecretKey::from_slice(&secp, &blind_bin[..])?;
let tx_bin = util::from_hex(partial_tx.tx)?; let tx_bin = util::from_hex(partial_tx.tx)?;
let tx = ser::deserialize(&mut &tx_bin[..]).map_err(|_| { let tx = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
Error::Format("Could not deserialize transaction, invalid format.".to_string()) Error::Format(
"Could not deserialize transaction, invalid format.".to_string(),
)
})?; })?;
Ok((partial_tx.amount, blinding, tx)) Ok((partial_tx.amount, blinding, tx))