mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
Cargo fmt all the things
This commit is contained in:
parent
3b51180359
commit
8504efb796
57 changed files with 3678 additions and 2600 deletions
|
@ -26,12 +26,14 @@ use rest::Error;
|
|||
/// returns a JSON object. Handles request building, JSON deserialization and
|
||||
/// response code checking.
|
||||
pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
|
||||
where for<'de> T: Deserialize<'de>
|
||||
where
|
||||
for<'de> T: Deserialize<'de>,
|
||||
{
|
||||
let client = hyper::Client::new();
|
||||
let res = check_error(client.get(url).send())?;
|
||||
serde_json::from_reader(res)
|
||||
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e)))
|
||||
serde_json::from_reader(res).map_err(|e| {
|
||||
Error::Internal(format!("Server returned invalid JSON: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
/// Helper function to easily issue a HTTP POST request with the provided JSON
|
||||
|
@ -39,15 +41,18 @@ pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
|
|||
/// building, JSON serialization and deserialization, and response code
|
||||
/// checking.
|
||||
pub fn post<'a, IN, OUT>(url: &'a str, input: &IN) -> Result<OUT, Error>
|
||||
where IN: Serialize,
|
||||
for<'de> OUT: Deserialize<'de>
|
||||
where
|
||||
IN: Serialize,
|
||||
for<'de> OUT: Deserialize<'de>,
|
||||
{
|
||||
let in_json = serde_json::to_string(input)
|
||||
.map_err(|e| Error::Internal(format!("Could not serialize data to JSON: {}", e)))?;
|
||||
let in_json = serde_json::to_string(input).map_err(|e| {
|
||||
Error::Internal(format!("Could not serialize data to JSON: {}", e))
|
||||
})?;
|
||||
let client = hyper::Client::new();
|
||||
let res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?;
|
||||
serde_json::from_reader(res)
|
||||
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e)))
|
||||
serde_json::from_reader(res).map_err(|e| {
|
||||
Error::Internal(format!("Server returned invalid JSON: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
// convert hyper error and check for non success response codes
|
||||
|
|
|
@ -203,25 +203,35 @@ struct OpWrapper<E> {
|
|||
}
|
||||
|
||||
impl<E> Handler for OpWrapper<E>
|
||||
where E: ApiEndpoint
|
||||
where
|
||||
E: ApiEndpoint,
|
||||
{
|
||||
fn handle(&self, req: &mut Request) -> IronResult<Response> {
|
||||
let t: E::OP_IN = serde_json::from_reader(req.body.by_ref())
|
||||
.map_err(|e| IronError::new(e, status::BadRequest))?;
|
||||
let t: E::OP_IN = serde_json::from_reader(req.body.by_ref()).map_err(|e| {
|
||||
IronError::new(e, status::BadRequest)
|
||||
})?;
|
||||
let res = self.endpoint.operation(self.operation.clone(), t)?;
|
||||
let res_json = serde_json::to_string(&res)
|
||||
.map_err(|e| IronError::new(e, status::InternalServerError))?;
|
||||
let res_json = serde_json::to_string(&res).map_err(|e| {
|
||||
IronError::new(e, status::InternalServerError)
|
||||
})?;
|
||||
Ok(Response::with((status::Ok, res_json)))
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_param<ID>(req: &mut Request, param: &'static str) -> IronResult<ID>
|
||||
where ID: ToString + FromStr,
|
||||
<ID as FromStr>::Err: Debug + Send + error::Error + 'static
|
||||
where
|
||||
ID: ToString + FromStr,
|
||||
<ID as FromStr>::Err: Debug + Send + error::Error + 'static,
|
||||
{
|
||||
|
||||
let id = req.extensions.get::<Router>().unwrap().find(param).unwrap_or("");
|
||||
id.parse::<ID>().map_err(|e| IronError::new(e, status::BadRequest))
|
||||
let id = req.extensions
|
||||
.get::<Router>()
|
||||
.unwrap()
|
||||
.find(param)
|
||||
.unwrap_or("");
|
||||
id.parse::<ID>().map_err(
|
||||
|e| IronError::new(e, status::BadRequest),
|
||||
)
|
||||
}
|
||||
|
||||
/// HTTP server allowing the registration of ApiEndpoint implementations.
|
||||
|
@ -229,7 +239,6 @@ pub struct ApiServer {
|
|||
root: String,
|
||||
router: Router,
|
||||
server_listener: Option<Listening>,
|
||||
|
||||
}
|
||||
|
||||
impl ApiServer {
|
||||
|
@ -245,7 +254,7 @@ impl ApiServer {
|
|||
|
||||
/// Starts the ApiServer at the provided address.
|
||||
pub fn start<A: ToSocketAddrs>(&mut self, addr: A) -> Result<(), String> {
|
||||
//replace this value to satisfy borrow checker
|
||||
// replace this value to satisfy borrow checker
|
||||
let r = mem::replace(&mut self.router, Router::new());
|
||||
let result = Iron::new(r).http(addr);
|
||||
let return_value = result.as_ref().map(|_| ()).map_err(|e| e.to_string());
|
||||
|
@ -254,7 +263,7 @@ impl ApiServer {
|
|||
}
|
||||
|
||||
/// Stops the API server
|
||||
pub fn stop(&mut self){
|
||||
pub fn stop(&mut self) {
|
||||
let r = mem::replace(&mut self.server_listener, None);
|
||||
r.unwrap().close().unwrap();
|
||||
}
|
||||
|
@ -262,8 +271,9 @@ impl ApiServer {
|
|||
/// Register a new API endpoint, providing a relative URL for the new
|
||||
/// endpoint.
|
||||
pub fn register_endpoint<E>(&mut self, subpath: String, endpoint: E)
|
||||
where E: ApiEndpoint,
|
||||
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error
|
||||
where
|
||||
E: ApiEndpoint,
|
||||
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error,
|
||||
{
|
||||
|
||||
assert_eq!(subpath.chars().nth(0).unwrap(), '/');
|
||||
|
@ -281,7 +291,12 @@ impl ApiServer {
|
|||
endpoint: endpoint.clone(),
|
||||
};
|
||||
let full_path = format!("{}/{}", root.clone(), op_s.clone());
|
||||
self.router.route(op.to_method(), full_path.clone(), wrapper, route_name);
|
||||
self.router.route(
|
||||
op.to_method(),
|
||||
full_path.clone(),
|
||||
wrapper,
|
||||
route_name,
|
||||
);
|
||||
info!("route: POST {}", full_path);
|
||||
} else {
|
||||
|
||||
|
@ -294,15 +309,21 @@ impl ApiServer {
|
|||
_ => panic!("unreachable"),
|
||||
};
|
||||
let wrapper = ApiWrapper(endpoint.clone());
|
||||
self.router.route(op.to_method(), full_path.clone(), wrapper, route_name);
|
||||
self.router.route(
|
||||
op.to_method(),
|
||||
full_path.clone(),
|
||||
wrapper,
|
||||
route_name,
|
||||
);
|
||||
info!("route: {} {}", op.to_method(), full_path);
|
||||
}
|
||||
}
|
||||
|
||||
// support for the HTTP Options method by differentiating what's on the
|
||||
// root resource vs the id resource
|
||||
let (root_opts, sub_opts) =
|
||||
endpoint.operations().iter().fold((vec![], vec![]), |mut acc, op| {
|
||||
let (root_opts, sub_opts) = endpoint.operations().iter().fold(
|
||||
(vec![], vec![]),
|
||||
|mut acc, op| {
|
||||
let m = op.to_method();
|
||||
if m == Method::Post {
|
||||
acc.0.push(m);
|
||||
|
@ -310,19 +331,26 @@ impl ApiServer {
|
|||
acc.1.push(m);
|
||||
}
|
||||
acc
|
||||
});
|
||||
self.router.options(root.clone(),
|
||||
move |_: &mut Request| {
|
||||
Ok(Response::with((status::Ok,
|
||||
Header(headers::Allow(root_opts.clone())))))
|
||||
},
|
||||
"option_".to_string() + route_postfix);
|
||||
self.router.options(root.clone() + "/:id",
|
||||
move |_: &mut Request| {
|
||||
Ok(Response::with((status::Ok,
|
||||
Header(headers::Allow(sub_opts.clone())))))
|
||||
},
|
||||
"option_id_".to_string() + route_postfix);
|
||||
},
|
||||
);
|
||||
self.router.options(
|
||||
root.clone(),
|
||||
move |_: &mut Request| {
|
||||
Ok(Response::with(
|
||||
(status::Ok, Header(headers::Allow(root_opts.clone()))),
|
||||
))
|
||||
},
|
||||
"option_".to_string() + route_postfix,
|
||||
);
|
||||
self.router.options(
|
||||
root.clone() + "/:id",
|
||||
move |_: &mut Request| {
|
||||
Ok(Response::with(
|
||||
(status::Ok, Header(headers::Allow(sub_opts.clone()))),
|
||||
))
|
||||
},
|
||||
"option_id_".to_string() + route_postfix,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -344,8 +372,8 @@ mod test {
|
|||
impl ApiEndpoint for TestApi {
|
||||
type ID = String;
|
||||
type T = Animal;
|
||||
type OP_IN = ();
|
||||
type OP_OUT = ();
|
||||
type OP_IN = ();
|
||||
type OP_OUT = ();
|
||||
|
||||
fn operations(&self) -> Vec<Operation> {
|
||||
vec![Operation::Get]
|
||||
|
|
|
@ -30,9 +30,7 @@ pub struct Tip {
|
|||
|
||||
impl Tip {
|
||||
pub fn from_tip(tip: chain::Tip) -> Tip {
|
||||
Tip {
|
||||
height: tip.height,
|
||||
}
|
||||
Tip { height: tip.height }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,8 +58,11 @@ impl Output {
|
|||
pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> Output {
|
||||
let (output_type, lock_height) = match output.features {
|
||||
x if x.contains(core::transaction::COINBASE_OUTPUT) => {
|
||||
(OutputType::Coinbase, block_header.height + consensus::COINBASE_MATURITY)
|
||||
},
|
||||
(
|
||||
OutputType::Coinbase,
|
||||
block_header.height + consensus::COINBASE_MATURITY,
|
||||
)
|
||||
}
|
||||
_ => (OutputType::Transaction, 0),
|
||||
};
|
||||
Output {
|
||||
|
|
|
@ -29,7 +29,7 @@ use store;
|
|||
use sumtree;
|
||||
use types::*;
|
||||
|
||||
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
|
||||
use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
|
||||
|
||||
const MAX_ORPHANS: usize = 20;
|
||||
|
||||
|
@ -44,7 +44,7 @@ pub struct Chain {
|
|||
orphans: Arc<Mutex<VecDeque<(Options, Block)>>>,
|
||||
sumtrees: Arc<RwLock<sumtree::SumTrees>>,
|
||||
|
||||
//POW verification function
|
||||
// POW verification function
|
||||
pow_verifier: fn(&BlockHeader, u32) -> bool,
|
||||
}
|
||||
|
||||
|
@ -52,14 +52,13 @@ unsafe impl Sync for Chain {}
|
|||
unsafe impl Send for Chain {}
|
||||
|
||||
impl Chain {
|
||||
|
||||
/// Check whether the chain exists. If not, the call to 'init' will
|
||||
/// expect an already mined genesis block. This keeps the chain free
|
||||
/// from needing to know about the mining implementation
|
||||
pub fn chain_exists(db_root: String)->bool {
|
||||
pub fn chain_exists(db_root: String) -> bool {
|
||||
let chain_store = store::ChainKVStore::new(db_root).unwrap();
|
||||
match chain_store.head() {
|
||||
Ok(_) => {true},
|
||||
Ok(_) => true,
|
||||
Err(NotFoundErr) => false,
|
||||
Err(_) => false,
|
||||
}
|
||||
|
@ -138,7 +137,12 @@ impl Chain {
|
|||
orphans.truncate(MAX_ORPHANS);
|
||||
}
|
||||
Err(ref e) => {
|
||||
info!("Rejected block {} at {} : {:?}", b.hash(), b.header.height, e);
|
||||
info!(
|
||||
"Rejected block {} at {} : {:?}",
|
||||
b.hash(),
|
||||
b.header.height,
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -161,7 +165,7 @@ impl Chain {
|
|||
|
||||
fn ctx_from_head(&self, head: Tip, opts: Options) -> pipe::BlockContext {
|
||||
let opts_in = opts;
|
||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
||||
let opts_in = match *param_ref {
|
||||
MiningParameterMode::AutomatedTesting => opts_in | EASY_POW,
|
||||
MiningParameterMode::UserTesting => opts_in | EASY_POW,
|
||||
|
@ -178,7 +182,7 @@ impl Chain {
|
|||
}
|
||||
}
|
||||
|
||||
/// Pop orphans out of the queue and check if we can now accept them.
|
||||
/// Pop orphans out of the queue and check if we can now accept them.
|
||||
fn check_orphans(&self) {
|
||||
// first check how many we have to retry, unfort. we can't extend the lock
|
||||
// in the loop as it needs to be freed before going in process_block
|
||||
|
@ -209,7 +213,9 @@ impl Chain {
|
|||
let sumtrees = self.sumtrees.read().unwrap();
|
||||
let is_unspent = sumtrees.is_unspent(output_ref)?;
|
||||
if is_unspent {
|
||||
self.store.get_output_by_commit(output_ref).map_err(&Error::StoreErr)
|
||||
self.store.get_output_by_commit(output_ref).map_err(
|
||||
&Error::StoreErr,
|
||||
)
|
||||
} else {
|
||||
Err(Error::OutputNotFound)
|
||||
}
|
||||
|
@ -219,7 +225,7 @@ impl Chain {
|
|||
/// current sumtree state.
|
||||
pub fn set_sumtree_roots(&self, b: &mut Block) -> Result<(), Error> {
|
||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||
|
||||
|
||||
let roots = sumtree::extending(&mut sumtrees, |mut extension| {
|
||||
// apply the block on the sumtrees and check the resulting root
|
||||
extension.apply_block(b)?;
|
||||
|
@ -266,10 +272,13 @@ impl Chain {
|
|||
}
|
||||
|
||||
/// Gets the block header by the provided output commitment
|
||||
pub fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, Error> {
|
||||
self.store.get_block_header_by_output_commit(commit).map_err(
|
||||
&Error::StoreErr,
|
||||
)
|
||||
pub fn get_block_header_by_output_commit(
|
||||
&self,
|
||||
commit: &Commitment,
|
||||
) -> Result<BlockHeader, Error> {
|
||||
self.store
|
||||
.get_block_header_by_output_commit(commit)
|
||||
.map_err(&Error::StoreErr)
|
||||
}
|
||||
|
||||
/// Get the tip of the header chain
|
||||
|
|
|
@ -139,7 +139,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
if header.height != prev.height + 1 {
|
||||
return Err(Error::InvalidBlockHeight);
|
||||
}
|
||||
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode(){
|
||||
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
|
||||
// prevent time warp attacks and some timestamp manipulations by forcing strict
|
||||
// time progression (but not in CI mode)
|
||||
return Err(Error::InvalidBlockTime);
|
||||
|
@ -182,11 +182,15 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
}
|
||||
|
||||
/// Fully validate the block content.
|
||||
fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extension) -> Result<(), Error> {
|
||||
fn validate_block(
|
||||
b: &Block,
|
||||
ctx: &mut BlockContext,
|
||||
ext: &mut sumtree::Extension,
|
||||
) -> Result<(), Error> {
|
||||
if b.header.height > ctx.head.height + 1 {
|
||||
return Err(Error::Orphan);
|
||||
}
|
||||
|
||||
|
||||
// main isolated block validation, checks all commitment sums and sigs
|
||||
let curve = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
try!(b.validate(&curve).map_err(&Error::InvalidBlockProof));
|
||||
|
@ -194,10 +198,13 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio
|
|||
// check that all the outputs of the block are "new" -
|
||||
// that they do not clobber any existing unspent outputs (by their commitment)
|
||||
//
|
||||
// TODO - do we need to do this here (and can we do this here if we need access to the chain)
|
||||
// see check_duplicate_outputs in pool for the analogous operation on transaction outputs
|
||||
// TODO - do we need to do this here (and can we do this here if we need access
|
||||
// to the chain)
|
||||
// see check_duplicate_outputs in pool for the analogous operation on
|
||||
// transaction outputs
|
||||
// for output in &block.outputs {
|
||||
// here we would check that the output is not a duplicate output based on the current chain
|
||||
// here we would check that the output is not a duplicate output based on the
|
||||
// current chain
|
||||
// };
|
||||
|
||||
|
||||
|
@ -206,7 +213,7 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio
|
|||
// standard head extension
|
||||
ext.apply_block(b)?;
|
||||
} else {
|
||||
|
||||
|
||||
// extending a fork, first identify the block where forking occurred
|
||||
// keeping the hashes of blocks along the fork
|
||||
let mut current = b.header.previous;
|
||||
|
@ -228,7 +235,11 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio
|
|||
if forked_block.header.height > 0 {
|
||||
let last_output = &forked_block.outputs[forked_block.outputs.len() - 1];
|
||||
let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1];
|
||||
ext.rewind(forked_block.header.height, last_output, last_kernel)?;
|
||||
ext.rewind(
|
||||
forked_block.header.height,
|
||||
last_output,
|
||||
last_kernel,
|
||||
)?;
|
||||
}
|
||||
|
||||
// apply all forked blocks, including this new one
|
||||
|
@ -240,27 +251,33 @@ fn validate_block(b: &Block, ctx: &mut BlockContext, ext: &mut sumtree::Extensio
|
|||
}
|
||||
|
||||
let (utxo_root, rproof_root, kernel_root) = ext.roots();
|
||||
if utxo_root.hash != b.header.utxo_root ||
|
||||
rproof_root.hash != b.header.range_proof_root ||
|
||||
kernel_root.hash != b.header.kernel_root {
|
||||
if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root ||
|
||||
kernel_root.hash != b.header.kernel_root
|
||||
{
|
||||
|
||||
return Err(Error::InvalidRoot);
|
||||
}
|
||||
|
||||
// check that any coinbase outputs are spendable (that they have matured sufficiently)
|
||||
// check that any coinbase outputs are spendable (that they have matured
|
||||
// sufficiently)
|
||||
for input in &b.inputs {
|
||||
if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) {
|
||||
if output.features.contains(transaction::COINBASE_OUTPUT) {
|
||||
if let Ok(output_header) = ctx.store.get_block_header_by_output_commit(&input.commitment()) {
|
||||
if let Ok(output_header) =
|
||||
ctx.store.get_block_header_by_output_commit(
|
||||
&input.commitment(),
|
||||
)
|
||||
{
|
||||
|
||||
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction validation rule
|
||||
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction
|
||||
// validation rule
|
||||
if b.header.height <= output_header.height + consensus::COINBASE_MATURITY {
|
||||
return Err(Error::ImmatureCoinbase);
|
||||
}
|
||||
};
|
||||
};
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -85,7 +85,9 @@ impl ChainStore for ChainKVStore {
|
|||
}
|
||||
|
||||
fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())))
|
||||
option_to_not_found(self.db.get_ser(
|
||||
&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec()),
|
||||
))
|
||||
}
|
||||
|
||||
fn check_block_exists(&self, h: &Hash) -> Result<bool, Error> {
|
||||
|
@ -97,13 +99,30 @@ impl ChainStore for ChainKVStore {
|
|||
let mut batch = self.db
|
||||
.batch()
|
||||
.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?
|
||||
.put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..], &b.header)?;
|
||||
.put_ser(
|
||||
&to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..],
|
||||
&b.header,
|
||||
)?;
|
||||
|
||||
// saving the full output under its hash, as well as a commitment to hash index
|
||||
for out in &b.outputs {
|
||||
batch = batch
|
||||
.put_ser(&to_key(OUTPUT_COMMIT_PREFIX, &mut out.commitment().as_ref().to_vec())[..], out)?
|
||||
.put_ser(&to_key(HEADER_BY_OUTPUT_PREFIX, &mut out.commitment().as_ref().to_vec())[..], &b.hash())?;
|
||||
.put_ser(
|
||||
&to_key(
|
||||
OUTPUT_COMMIT_PREFIX,
|
||||
&mut out.commitment().as_ref().to_vec(),
|
||||
)
|
||||
[..],
|
||||
out,
|
||||
)?
|
||||
.put_ser(
|
||||
&to_key(
|
||||
HEADER_BY_OUTPUT_PREFIX,
|
||||
&mut out.commitment().as_ref().to_vec(),
|
||||
)
|
||||
[..],
|
||||
&b.hash(),
|
||||
)?;
|
||||
}
|
||||
batch.write()
|
||||
}
|
||||
|
@ -111,11 +130,14 @@ impl ChainStore for ChainKVStore {
|
|||
// lookup the block header hash by output commitment
|
||||
// lookup the block header based on this hash
|
||||
// to check the chain is correct compare this block header to
|
||||
// the block header currently indexed at the relevant block height (tbd if actually necessary)
|
||||
// the block header currently indexed at the relevant block height (tbd if
|
||||
// actually necessary)
|
||||
//
|
||||
// NOTE: This index is not exhaustive.
|
||||
// This node may not have seen this full block, so may not have populated the index.
|
||||
// Block headers older than some threshold (2 months?) will not necessarily be included
|
||||
// This node may not have seen this full block, so may not have populated the
|
||||
// index.
|
||||
// Block headers older than some threshold (2 months?) will not necessarily be
|
||||
// included
|
||||
// in this index.
|
||||
//
|
||||
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, Error> {
|
||||
|
@ -133,13 +155,16 @@ impl ChainStore for ChainKVStore {
|
|||
} else {
|
||||
Err(Error::NotFoundErr)
|
||||
}
|
||||
},
|
||||
None => Err(Error::NotFoundErr)
|
||||
}
|
||||
None => Err(Error::NotFoundErr),
|
||||
}
|
||||
}
|
||||
|
||||
fn save_block_header(&self, bh: &BlockHeader) -> Result<(), Error> {
|
||||
self.db.put_ser(&to_key(BLOCK_HEADER_PREFIX, &mut bh.hash().to_vec())[..], bh)
|
||||
self.db.put_ser(
|
||||
&to_key(BLOCK_HEADER_PREFIX, &mut bh.hash().to_vec())[..],
|
||||
bh,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||
|
@ -154,26 +179,44 @@ impl ChainStore for ChainKVStore {
|
|||
}
|
||||
|
||||
fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), Error> {
|
||||
self.db.put_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())[..], &pos)
|
||||
self.db.put_ser(
|
||||
&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())[..],
|
||||
&pos,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())))
|
||||
option_to_not_found(self.db.get_ser(&to_key(
|
||||
COMMIT_POS_PREFIX,
|
||||
&mut commit.as_ref().to_vec(),
|
||||
)))
|
||||
}
|
||||
|
||||
fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> {
|
||||
self.db.put_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..], &pos)
|
||||
self.db.put_ser(
|
||||
&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())[..],
|
||||
&pos,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())))
|
||||
option_to_not_found(self.db.get_ser(&to_key(
|
||||
KERNEL_POS_PREFIX,
|
||||
&mut excess.as_ref().to_vec(),
|
||||
)))
|
||||
}
|
||||
|
||||
/// Maintain consistency of the "header_by_height" index by traversing back through the
|
||||
/// current chain and updating "header_by_height" until we reach a block_header
|
||||
/// that is consistent with its height (everything prior to this will be consistent)
|
||||
/// Maintain consistency of the "header_by_height" index by traversing back
|
||||
/// through the
|
||||
/// current chain and updating "header_by_height" until we reach a
|
||||
/// block_header
|
||||
/// that is consistent with its height (everything prior to this will be
|
||||
/// consistent)
|
||||
fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> {
|
||||
self.db.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), bh)?;
|
||||
self.db.put_ser(
|
||||
&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height),
|
||||
bh,
|
||||
)?;
|
||||
if bh.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -184,10 +227,12 @@ impl ChainStore for ChainKVStore {
|
|||
let prev = self.get_header_by_height(prev_height)?;
|
||||
if prev.hash() != prev_h {
|
||||
let real_prev = self.get_block_header(&prev_h)?;
|
||||
self.db.put_ser(
|
||||
&u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height),
|
||||
&real_prev,
|
||||
).unwrap();
|
||||
self.db
|
||||
.put_ser(
|
||||
&u64_to_key(HEADER_HEIGHT_PREFIX, real_prev.height),
|
||||
&real_prev,
|
||||
)
|
||||
.unwrap();
|
||||
prev_h = real_prev.previous;
|
||||
prev_height = real_prev.height - 1;
|
||||
} else {
|
||||
|
|
|
@ -35,12 +35,18 @@ const UTXO_SUBDIR: &'static str = "utxo";
|
|||
const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
|
||||
const KERNEL_SUBDIR: &'static str = "kernel";
|
||||
|
||||
struct PMMRHandle<T> where T: Summable + Clone {
|
||||
struct PMMRHandle<T>
|
||||
where
|
||||
T: Summable + Clone,
|
||||
{
|
||||
backend: PMMRBackend<T>,
|
||||
last_pos: u64,
|
||||
}
|
||||
|
||||
impl<T> PMMRHandle<T> where T: Summable + Clone {
|
||||
impl<T> PMMRHandle<T>
|
||||
where
|
||||
T: Summable + Clone,
|
||||
{
|
||||
fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> {
|
||||
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
|
||||
fs::create_dir_all(path.clone())?;
|
||||
|
@ -88,7 +94,7 @@ impl SumTrees {
|
|||
match rpos {
|
||||
Ok(pos) => Ok(self.output_pmmr_h.backend.get(pos).is_some()),
|
||||
Err(grin_store::Error::NotFoundErr) => Ok(false),
|
||||
Err(e) => Err(Error::StoreErr(e))
|
||||
Err(e) => Err(Error::StoreErr(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -101,8 +107,10 @@ impl SumTrees {
|
|||
/// If the closure returns an error, modifications are canceled and the unit
|
||||
/// of work is abandoned. Otherwise, the unit of work is permanently applied.
|
||||
pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result<T, Error>
|
||||
where F: FnOnce(&mut Extension) -> Result<T, Error> {
|
||||
|
||||
where
|
||||
F: FnOnce(&mut Extension) -> Result<T, Error>,
|
||||
{
|
||||
|
||||
let sizes: (u64, u64, u64);
|
||||
let res: Result<T, Error>;
|
||||
let rollback: bool;
|
||||
|
@ -153,17 +161,25 @@ pub struct Extension<'a> {
|
|||
commit_index: Arc<ChainStore>,
|
||||
new_output_commits: HashMap<Commitment, u64>,
|
||||
new_kernel_excesses: HashMap<Commitment, u64>,
|
||||
rollback: bool
|
||||
rollback: bool,
|
||||
}
|
||||
|
||||
impl<'a> Extension<'a> {
|
||||
|
||||
// constructor
|
||||
fn new(trees: &'a mut SumTrees, commit_index: Arc<ChainStore>) -> Extension<'a> {
|
||||
Extension {
|
||||
output_pmmr: PMMR::at(&mut trees.output_pmmr_h.backend, trees.output_pmmr_h.last_pos),
|
||||
rproof_pmmr: PMMR::at(&mut trees.rproof_pmmr_h.backend, trees.rproof_pmmr_h.last_pos),
|
||||
kernel_pmmr: PMMR::at(&mut trees.kernel_pmmr_h.backend, trees.kernel_pmmr_h.last_pos),
|
||||
output_pmmr: PMMR::at(
|
||||
&mut trees.output_pmmr_h.backend,
|
||||
trees.output_pmmr_h.last_pos,
|
||||
),
|
||||
rproof_pmmr: PMMR::at(
|
||||
&mut trees.rproof_pmmr_h.backend,
|
||||
trees.rproof_pmmr_h.last_pos,
|
||||
),
|
||||
kernel_pmmr: PMMR::at(
|
||||
&mut trees.kernel_pmmr_h.backend,
|
||||
trees.kernel_pmmr_h.last_pos,
|
||||
),
|
||||
commit_index: commit_index,
|
||||
new_output_commits: HashMap::new(),
|
||||
new_kernel_excesses: HashMap::new(),
|
||||
|
@ -184,14 +200,17 @@ impl<'a> Extension<'a> {
|
|||
if let Ok(pos) = pos_res {
|
||||
match self.output_pmmr.prune(pos, b.header.height as u32) {
|
||||
Ok(true) => {
|
||||
self.rproof_pmmr.prune(pos, b.header.height as u32)
|
||||
self.rproof_pmmr
|
||||
.prune(pos, b.header.height as u32)
|
||||
.map_err(|s| Error::SumTreeErr(s))?;
|
||||
},
|
||||
}
|
||||
Ok(false) => return Err(Error::AlreadySpent),
|
||||
Err(s) => return Err(Error::SumTreeErr(s)),
|
||||
}
|
||||
} else {
|
||||
return Err(Error::SumTreeErr(format!("Missing index for {:?}", input.commitment())));
|
||||
return Err(Error::SumTreeErr(
|
||||
format!("Missing index for {:?}", input.commitment()),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -200,15 +219,19 @@ impl<'a> Extension<'a> {
|
|||
return Err(Error::DuplicateCommitment(out.commitment()));
|
||||
}
|
||||
// push new outputs commitments in their MMR and save them in the index
|
||||
let pos = self.output_pmmr.push(SumCommit {
|
||||
commit: out.commitment(),
|
||||
secp: secp.clone(),
|
||||
}).map_err(&Error::SumTreeErr)?;
|
||||
let pos = self.output_pmmr
|
||||
.push(SumCommit {
|
||||
commit: out.commitment(),
|
||||
secp: secp.clone(),
|
||||
})
|
||||
.map_err(&Error::SumTreeErr)?;
|
||||
|
||||
self.new_output_commits.insert(out.commitment(), pos);
|
||||
|
||||
// push range proofs in their MMR
|
||||
self.rproof_pmmr.push(NoSum(out.proof)).map_err(&Error::SumTreeErr)?;
|
||||
self.rproof_pmmr.push(NoSum(out.proof)).map_err(
|
||||
&Error::SumTreeErr,
|
||||
)?;
|
||||
}
|
||||
|
||||
for kernel in &b.kernels {
|
||||
|
@ -216,7 +239,9 @@ impl<'a> Extension<'a> {
|
|||
return Err(Error::DuplicateKernel(kernel.excess.clone()));
|
||||
}
|
||||
// push kernels in their MMR
|
||||
let pos = self.kernel_pmmr.push(NoSum(kernel.clone())).map_err(&Error::SumTreeErr)?;
|
||||
let pos = self.kernel_pmmr.push(NoSum(kernel.clone())).map_err(
|
||||
&Error::SumTreeErr,
|
||||
)?;
|
||||
self.new_kernel_excesses.insert(kernel.excess, pos);
|
||||
}
|
||||
Ok(())
|
||||
|
@ -238,16 +263,28 @@ impl<'a> Extension<'a> {
|
|||
let out_pos_rew = self.commit_index.get_output_pos(&output.commitment())?;
|
||||
let kern_pos_rew = self.commit_index.get_kernel_pos(&kernel.excess)?;
|
||||
|
||||
self.output_pmmr.rewind(out_pos_rew, height as u32).map_err(&Error::SumTreeErr)?;
|
||||
self.rproof_pmmr.rewind(out_pos_rew, height as u32).map_err(&Error::SumTreeErr)?;
|
||||
self.kernel_pmmr.rewind(kern_pos_rew, height as u32).map_err(&Error::SumTreeErr)?;
|
||||
self.output_pmmr
|
||||
.rewind(out_pos_rew, height as u32)
|
||||
.map_err(&Error::SumTreeErr)?;
|
||||
self.rproof_pmmr
|
||||
.rewind(out_pos_rew, height as u32)
|
||||
.map_err(&Error::SumTreeErr)?;
|
||||
self.kernel_pmmr
|
||||
.rewind(kern_pos_rew, height as u32)
|
||||
.map_err(&Error::SumTreeErr)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Current root hashes and sums (if applicable) for the UTXO, range proof
|
||||
/// and kernel sum trees.
|
||||
pub fn roots(&self) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
|
||||
(self.output_pmmr.root(), self.rproof_pmmr.root(), self.kernel_pmmr.root())
|
||||
pub fn roots(
|
||||
&self,
|
||||
) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
|
||||
(
|
||||
self.output_pmmr.root(),
|
||||
self.rproof_pmmr.root(),
|
||||
self.kernel_pmmr.root(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Force the rollback of this extension, no matter the result
|
||||
|
@ -257,7 +294,10 @@ impl<'a> Extension<'a> {
|
|||
|
||||
// Sizes of the sum trees, used by `extending` on rollback.
|
||||
fn sizes(&self) -> (u64, u64, u64) {
|
||||
(self.output_pmmr.unpruned_size(), self.rproof_pmmr.unpruned_size(),
|
||||
self.kernel_pmmr.unpruned_size())
|
||||
(
|
||||
self.output_pmmr.unpruned_size(),
|
||||
self.rproof_pmmr.unpruned_size(),
|
||||
self.kernel_pmmr.unpruned_size(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -208,7 +208,10 @@ pub trait ChainStore: Send + Sync {
|
|||
fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, store::Error>;
|
||||
|
||||
/// Gets a block_header for the given input commit
|
||||
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, store::Error>;
|
||||
fn get_block_header_by_output_commit(
|
||||
&self,
|
||||
commit: &Commitment,
|
||||
) -> Result<BlockHeader, store::Error>;
|
||||
|
||||
/// Saves the position of an output, represented by its commitment, in the
|
||||
/// UTXO MMR. Used as an index for spending and pruning.
|
||||
|
|
|
@ -35,7 +35,7 @@ use core::global::MiningParameterMode;
|
|||
|
||||
use pow::{types, cuckoo, MiningWorker};
|
||||
|
||||
fn clean_output_dir(dir_name:&str){
|
||||
fn clean_output_dir(dir_name: &str) {
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
}
|
||||
|
||||
|
@ -44,11 +44,15 @@ fn setup(dir_name: &str) -> Chain {
|
|||
clean_output_dir(dir_name);
|
||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||
let mut genesis_block = None;
|
||||
if !chain::Chain::chain_exists(dir_name.to_string()){
|
||||
genesis_block=pow::mine_genesis_block(None);
|
||||
if !chain::Chain::chain_exists(dir_name.to_string()) {
|
||||
genesis_block = pow::mine_genesis_block(None);
|
||||
}
|
||||
chain::Chain::init(dir_name.to_string(), Arc::new(NoopAdapter {}),
|
||||
genesis_block, pow::verify_size).unwrap()
|
||||
chain::Chain::init(
|
||||
dir_name.to_string(),
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis_block,
|
||||
pow::verify_size,
|
||||
).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -67,7 +71,10 @@ fn mine_empty_chain() {
|
|||
miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps"));
|
||||
|
||||
let mut cuckoo_miner = cuckoo::Miner::new(
|
||||
consensus::EASINESS, global::sizeshift() as u32, global::proofsize());
|
||||
consensus::EASINESS,
|
||||
global::sizeshift() as u32,
|
||||
global::proofsize(),
|
||||
);
|
||||
for n in 1..4 {
|
||||
let prev = chain.head_header().unwrap();
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
|
@ -83,7 +90,7 @@ fn mine_empty_chain() {
|
|||
&mut b.header,
|
||||
difficulty,
|
||||
global::sizeshift() as u32,
|
||||
).unwrap();
|
||||
).unwrap();
|
||||
|
||||
let bhash = b.hash();
|
||||
chain.process_block(b, chain::EASY_POW).unwrap();
|
||||
|
@ -110,8 +117,9 @@ fn mine_empty_chain() {
|
|||
|
||||
// now check the header output index
|
||||
let output = block.outputs[0];
|
||||
let header_by_output_commit = chain.
|
||||
get_block_header_by_output_commit(&output.commitment()).unwrap();
|
||||
let header_by_output_commit = chain
|
||||
.get_block_header_by_output_commit(&output.commitment())
|
||||
.unwrap();
|
||||
assert_eq!(header_by_output_commit.hash(), bhash);
|
||||
}
|
||||
}
|
||||
|
@ -141,7 +149,7 @@ fn mine_forks() {
|
|||
|
||||
// checking our new head
|
||||
let head = chain.head().unwrap();
|
||||
assert_eq!(head.height, (n+1) as u64);
|
||||
assert_eq!(head.height, (n + 1) as u64);
|
||||
assert_eq!(head.last_block_h, bhash);
|
||||
assert_eq!(head.prev_block_h, prev.hash());
|
||||
|
||||
|
@ -151,7 +159,7 @@ fn mine_forks() {
|
|||
|
||||
// checking head switch
|
||||
let head = chain.head().unwrap();
|
||||
assert_eq!(head.height, (n+1) as u64);
|
||||
assert_eq!(head.height, (n + 1) as u64);
|
||||
assert_eq!(head.last_block_h, bhash);
|
||||
assert_eq!(head.prev_block_h, prev.hash());
|
||||
}
|
||||
|
|
|
@ -33,23 +33,27 @@ use core::global::MiningParameterMode;
|
|||
|
||||
use pow::{types, cuckoo, MiningWorker};
|
||||
|
||||
fn clean_output_dir(dir_name:&str){
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
fn clean_output_dir(dir_name: &str) {
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_coinbase_maturity() {
|
||||
let _ = env_logger::init();
|
||||
let _ = env_logger::init();
|
||||
clean_output_dir(".grin");
|
||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||
|
||||
let mut rng = OsRng::new().unwrap();
|
||||
let mut genesis_block = None;
|
||||
if !chain::Chain::chain_exists(".grin".to_string()){
|
||||
genesis_block=pow::mine_genesis_block(None);
|
||||
if !chain::Chain::chain_exists(".grin".to_string()) {
|
||||
genesis_block = pow::mine_genesis_block(None);
|
||||
}
|
||||
let chain = chain::Chain::init(".grin".to_string(), Arc::new(NoopAdapter {}),
|
||||
genesis_block, pow::verify_size).unwrap();
|
||||
let chain = chain::Chain::init(
|
||||
".grin".to_string(),
|
||||
Arc::new(NoopAdapter {}),
|
||||
genesis_block,
|
||||
pow::verify_size,
|
||||
).unwrap();
|
||||
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
|
||||
|
@ -60,10 +64,14 @@ fn test_coinbase_maturity() {
|
|||
};
|
||||
miner_config.cuckoo_miner_plugin_dir = Some(String::from("../target/debug/deps"));
|
||||
|
||||
let mut cuckoo_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize());
|
||||
let mut cuckoo_miner = cuckoo::Miner::new(
|
||||
consensus::EASINESS,
|
||||
global::sizeshift() as u32,
|
||||
global::proofsize(),
|
||||
);
|
||||
|
||||
let prev = chain.head_header().unwrap();
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let mut block = core::core::Block::new(&prev, vec![], reward_key).unwrap();
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
|
@ -78,21 +86,23 @@ fn test_coinbase_maturity() {
|
|||
global::sizeshift() as u32,
|
||||
).unwrap();
|
||||
|
||||
assert_eq!(block.outputs.len(), 1);
|
||||
assert!(block.outputs[0].features.contains(transaction::COINBASE_OUTPUT));
|
||||
assert_eq!(block.outputs.len(), 1);
|
||||
assert!(block.outputs[0].features.contains(
|
||||
transaction::COINBASE_OUTPUT,
|
||||
));
|
||||
|
||||
chain.process_block(block, chain::EASY_POW).unwrap();
|
||||
|
||||
let prev = chain.head_header().unwrap();
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
let amount = consensus::REWARD;
|
||||
let (coinbase_txn, _) = build::transaction(vec![
|
||||
build::input(amount, reward_key),
|
||||
build::output_rand(amount-1),
|
||||
build::with_fee(1)]
|
||||
).unwrap();
|
||||
let amount = consensus::REWARD;
|
||||
let (coinbase_txn, _) = build::transaction(vec![
|
||||
build::input(amount, reward_key),
|
||||
build::output_rand(amount - 1),
|
||||
build::with_fee(1),
|
||||
]).unwrap();
|
||||
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], reward_key).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
@ -109,56 +119,56 @@ fn test_coinbase_maturity() {
|
|||
).unwrap();
|
||||
|
||||
let result = chain.process_block(block, chain::EASY_POW);
|
||||
match result {
|
||||
Err(Error::ImmatureCoinbase) => (),
|
||||
_ => panic!("expected ImmatureCoinbase error here"),
|
||||
};
|
||||
match result {
|
||||
Err(Error::ImmatureCoinbase) => (),
|
||||
_ => panic!("expected ImmatureCoinbase error here"),
|
||||
};
|
||||
|
||||
// mine 10 blocks so we increase the height sufficiently
|
||||
// coinbase will mature and be spendable in the block after these
|
||||
for _ in 0..10 {
|
||||
let prev = chain.head_header().unwrap();
|
||||
// mine 10 blocks so we increase the height sufficiently
|
||||
// coinbase will mature and be spendable in the block after these
|
||||
for _ in 0..10 {
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let mut block = core::core::Block::new(&prev, vec![], reward_key).unwrap();
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let mut block = core::core::Block::new(&prev, vec![], reward_key).unwrap();
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
block.header.difficulty = difficulty.clone();
|
||||
chain.set_sumtree_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
&mut block.header,
|
||||
difficulty,
|
||||
global::sizeshift() as u32,
|
||||
).unwrap();
|
||||
|
||||
chain.process_block(block, chain::EASY_POW).unwrap();
|
||||
};
|
||||
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], reward_key).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
block.header.difficulty = difficulty.clone();
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
block.header.difficulty = difficulty.clone();
|
||||
chain.set_sumtree_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
&mut block.header,
|
||||
difficulty,
|
||||
global::sizeshift() as u32,
|
||||
).unwrap();
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
&mut block.header,
|
||||
difficulty,
|
||||
global::sizeshift() as u32,
|
||||
).unwrap();
|
||||
|
||||
let result = chain.process_block(block, chain::EASY_POW);
|
||||
match result {
|
||||
Ok(_) => (),
|
||||
Err(Error::ImmatureCoinbase) => panic!("we should not get an ImmatureCoinbase here"),
|
||||
Err(_) => panic!("we did not expect an error here"),
|
||||
};
|
||||
chain.process_block(block, chain::EASY_POW).unwrap();
|
||||
}
|
||||
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
let reward_key = secp::key::SecretKey::new(&secp, &mut rng);
|
||||
let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], reward_key).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
block.header.difficulty = difficulty.clone();
|
||||
chain.set_sumtree_roots(&mut block).unwrap();
|
||||
|
||||
pow::pow_size(
|
||||
&mut cuckoo_miner,
|
||||
&mut block.header,
|
||||
difficulty,
|
||||
global::sizeshift() as u32,
|
||||
).unwrap();
|
||||
|
||||
let result = chain.process_block(block, chain::EASY_POW);
|
||||
match result {
|
||||
Ok(_) => (),
|
||||
Err(Error::ImmatureCoinbase) => panic!("we should not get an ImmatureCoinbase here"),
|
||||
Err(_) => panic!("we did not expect an error here"),
|
||||
};
|
||||
}
|
||||
|
|
|
@ -26,50 +26,50 @@ use core::ser::{Writeable, Writer, Error};
|
|||
#[derive(Copy, Clone, Debug)]
|
||||
struct TestElem([u32; 4]);
|
||||
impl Summable for TestElem {
|
||||
type Sum = u64;
|
||||
fn sum(&self) -> u64 {
|
||||
// sums are not allowed to overflow, so we use this simple
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
|
||||
self.0[3] as u64
|
||||
}
|
||||
type Sum = u64;
|
||||
fn sum(&self) -> u64 {
|
||||
// sums are not allowed to overflow, so we use this simple
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
|
||||
self.0[3] as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl Writeable for TestElem {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
try!(writer.write_u32(self.0[0]));
|
||||
try!(writer.write_u32(self.0[1]));
|
||||
try!(writer.write_u32(self.0[2]));
|
||||
writer.write_u32(self.0[3])
|
||||
}
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
try!(writer.write_u32(self.0[0]));
|
||||
try!(writer.write_u32(self.0[1]));
|
||||
try!(writer.write_u32(self.0[2]));
|
||||
writer.write_u32(self.0[3])
|
||||
}
|
||||
}
|
||||
|
||||
#[bench]
|
||||
fn bench_small_tree(b: &mut Bencher) {
|
||||
let mut rng = rand::thread_rng();
|
||||
b.iter(|| {
|
||||
let mut big_tree = SumTree::new();
|
||||
for i in 0..1000 {
|
||||
// To avoid RNG overflow we generate random elements that are small.
|
||||
// Though to avoid repeat elements they have to be reasonably big.
|
||||
let new_elem;
|
||||
let word1 = rng.gen::<u16>() as u32;
|
||||
let word2 = rng.gen::<u16>() as u32;
|
||||
if rng.gen() {
|
||||
if rng.gen() {
|
||||
new_elem = TestElem([word1, word2, 0, 0]);
|
||||
} else {
|
||||
new_elem = TestElem([word1, 0, word2, 0]);
|
||||
}
|
||||
} else {
|
||||
if rng.gen() {
|
||||
new_elem = TestElem([0, word1, 0, word2]);
|
||||
} else {
|
||||
new_elem = TestElem([0, 0, word1, word2]);
|
||||
}
|
||||
}
|
||||
let mut rng = rand::thread_rng();
|
||||
b.iter(|| {
|
||||
let mut big_tree = SumTree::new();
|
||||
for i in 0..1000 {
|
||||
// To avoid RNG overflow we generate random elements that are small.
|
||||
// Though to avoid repeat elements they have to be reasonably big.
|
||||
let new_elem;
|
||||
let word1 = rng.gen::<u16>() as u32;
|
||||
let word2 = rng.gen::<u16>() as u32;
|
||||
if rng.gen() {
|
||||
if rng.gen() {
|
||||
new_elem = TestElem([word1, word2, 0, 0]);
|
||||
} else {
|
||||
new_elem = TestElem([word1, 0, word2, 0]);
|
||||
}
|
||||
} else {
|
||||
if rng.gen() {
|
||||
new_elem = TestElem([0, word1, 0, word2]);
|
||||
} else {
|
||||
new_elem = TestElem([0, 0, word1, word2]);
|
||||
}
|
||||
}
|
||||
|
||||
big_tree.push(new_elem);
|
||||
}
|
||||
});
|
||||
big_tree.push(new_elem);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -27,7 +27,8 @@ use core::target::Difficulty;
|
|||
pub const REWARD: u64 = 1_000_000_000;
|
||||
|
||||
/// Number of blocks before a coinbase matures and can be spent
|
||||
/// TODO - reduced this for testing - need to investigate if we can lower this in test env
|
||||
/// TODO - reduced this for testing - need to investigate if we can lower this
|
||||
/// in test env
|
||||
// pub const COINBASE_MATURITY: u64 = 1_000;
|
||||
pub const COINBASE_MATURITY: u64 = 3;
|
||||
|
||||
|
@ -99,7 +100,8 @@ impl fmt::Display for TargetError {
|
|||
/// difference between the median timestamps at the beginning and the end
|
||||
/// of the window.
|
||||
pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
|
||||
where T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>
|
||||
where
|
||||
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
|
||||
{
|
||||
|
||||
// Block times at the begining and end of the adjustment window, used to
|
||||
|
@ -155,8 +157,9 @@ pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
|
|||
ts_damp
|
||||
};
|
||||
|
||||
Ok(diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) /
|
||||
Difficulty::from_num(adj_ts))
|
||||
Ok(
|
||||
diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / Difficulty::from_num(adj_ts),
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -171,24 +174,25 @@ mod test {
|
|||
// Builds an iterator for next difficulty calculation with the provided
|
||||
// constant time interval, difficulty and total length.
|
||||
fn repeat(interval: u64, diff: u64, len: u64) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
//watch overflow here, length shouldn't be ridiculous anyhow
|
||||
// watch overflow here, length shouldn't be ridiculous anyhow
|
||||
assert!(len < std::usize::MAX as u64);
|
||||
let diffs = vec![Difficulty::from_num(diff); len as usize];
|
||||
let times = (0..(len as usize)).map(|n| n * interval as usize).rev();
|
||||
let pairs = times.zip(diffs.iter());
|
||||
pairs.map(|(t, d)| Ok((t as u64, d.clone()))).collect::<Vec<_>>()
|
||||
pairs
|
||||
.map(|(t, d)| Ok((t as u64, d.clone())))
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn repeat_offs(from: u64,
|
||||
interval: u64,
|
||||
diff: u64,
|
||||
len: u64)
|
||||
-> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
map_vec!(repeat(interval, diff, len), |e| {
|
||||
match e.clone() {
|
||||
Err(e) => Err(e),
|
||||
Ok((t, d)) => Ok((t + from, d)),
|
||||
}
|
||||
fn repeat_offs(
|
||||
from: u64,
|
||||
interval: u64,
|
||||
diff: u64,
|
||||
len: u64,
|
||||
) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
map_vec!(repeat(interval, diff, len), |e| match e.clone() {
|
||||
Err(e) => Err(e),
|
||||
Ok((t, d)) => Ok((t + from, d)),
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -196,19 +200,28 @@ mod test {
|
|||
#[test]
|
||||
fn next_target_adjustment() {
|
||||
// not enough data
|
||||
assert_eq!(next_difficulty(vec![]).unwrap(), Difficulty::from_num(MINIMUM_DIFFICULTY));
|
||||
assert_eq!(
|
||||
next_difficulty(vec![]).unwrap(),
|
||||
Difficulty::from_num(MINIMUM_DIFFICULTY)
|
||||
);
|
||||
|
||||
assert_eq!(next_difficulty(vec![Ok((60, Difficulty::one()))]).unwrap(),
|
||||
Difficulty::from_num(MINIMUM_DIFFICULTY));
|
||||
assert_eq!(
|
||||
next_difficulty(vec![Ok((60, Difficulty::one()))]).unwrap(),
|
||||
Difficulty::from_num(MINIMUM_DIFFICULTY)
|
||||
);
|
||||
|
||||
assert_eq!(next_difficulty(repeat(60, 10, DIFFICULTY_ADJUST_WINDOW)).unwrap(),
|
||||
Difficulty::from_num(MINIMUM_DIFFICULTY));
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(60, 10, DIFFICULTY_ADJUST_WINDOW)).unwrap(),
|
||||
Difficulty::from_num(MINIMUM_DIFFICULTY)
|
||||
);
|
||||
|
||||
// just enough data, right interval, should stay constant
|
||||
|
||||
let just_enough = DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW;
|
||||
assert_eq!(next_difficulty(repeat(60, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1000));
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(60, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1000)
|
||||
);
|
||||
|
||||
// checking averaging works, window length is odd so need to compensate a little
|
||||
let sec = DIFFICULTY_ADJUST_WINDOW / 2 + 1 + MEDIAN_TIME_WINDOW;
|
||||
|
@ -218,28 +231,44 @@ mod test {
|
|||
assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(999));
|
||||
|
||||
// too slow, diff goes down
|
||||
assert_eq!(next_difficulty(repeat(90, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(889));
|
||||
assert_eq!(next_difficulty(repeat(120, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(800));
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(90, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(889)
|
||||
);
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(120, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(800)
|
||||
);
|
||||
|
||||
// too fast, diff goes up
|
||||
assert_eq!(next_difficulty(repeat(55, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1021));
|
||||
assert_eq!(next_difficulty(repeat(45, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1067));
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(55, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1021)
|
||||
);
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(45, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1067)
|
||||
);
|
||||
|
||||
// hitting lower time bound, should always get the same result below
|
||||
assert_eq!(next_difficulty(repeat(20, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1200));
|
||||
assert_eq!(next_difficulty(repeat(10, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1200));
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(20, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1200)
|
||||
);
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(10, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(1200)
|
||||
);
|
||||
|
||||
// hitting higher time bound, should always get the same result above
|
||||
assert_eq!(next_difficulty(repeat(160, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(750));
|
||||
assert_eq!(next_difficulty(repeat(200, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(750));
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(160, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(750)
|
||||
);
|
||||
assert_eq!(
|
||||
next_difficulty(repeat(200, 1000, just_enough)).unwrap(),
|
||||
Difficulty::from_num(750)
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -85,14 +85,16 @@ impl Default for BlockHeader {
|
|||
/// Serialization of a block header
|
||||
impl Writeable for BlockHeader {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u64, self.height],
|
||||
[write_fixed_bytes, &self.previous],
|
||||
[write_i64, self.timestamp.to_timespec().sec],
|
||||
[write_fixed_bytes, &self.utxo_root],
|
||||
[write_fixed_bytes, &self.range_proof_root],
|
||||
[write_fixed_bytes, &self.kernel_root],
|
||||
[write_u8, self.features.bits()]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u64, self.height],
|
||||
[write_fixed_bytes, &self.previous],
|
||||
[write_i64, self.timestamp.to_timespec().sec],
|
||||
[write_fixed_bytes, &self.utxo_root],
|
||||
[write_fixed_bytes, &self.range_proof_root],
|
||||
[write_fixed_bytes, &self.kernel_root],
|
||||
[write_u8, self.features.bits()]
|
||||
);
|
||||
|
||||
try!(writer.write_u64(self.nonce));
|
||||
try!(self.difficulty.write(writer));
|
||||
|
@ -129,7 +131,9 @@ impl Readable for BlockHeader {
|
|||
utxo_root: utxo_root,
|
||||
range_proof_root: rproof_root,
|
||||
kernel_root: kernel_root,
|
||||
features: BlockFeatures::from_bits(features).ok_or(ser::Error::CorruptedData)?,
|
||||
features: BlockFeatures::from_bits(features).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?,
|
||||
pow: pow,
|
||||
nonce: nonce,
|
||||
difficulty: difficulty,
|
||||
|
@ -162,10 +166,12 @@ impl Writeable for Block {
|
|||
try!(self.header.write(writer));
|
||||
|
||||
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u64, self.inputs.len() as u64],
|
||||
[write_u64, self.outputs.len() as u64],
|
||||
[write_u64, self.kernels.len() as u64]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u64, self.inputs.len() as u64],
|
||||
[write_u64, self.outputs.len() as u64],
|
||||
[write_u64, self.kernels.len() as u64]
|
||||
);
|
||||
|
||||
for inp in &self.inputs {
|
||||
try!(inp.write(writer));
|
||||
|
@ -234,10 +240,11 @@ impl Block {
|
|||
/// Builds a new block from the header of the previous block, a vector of
|
||||
/// transactions and the private key that will receive the reward. Checks
|
||||
/// that all transactions are valid and calculates the Merkle tree.
|
||||
pub fn new(prev: &BlockHeader,
|
||||
txs: Vec<&Transaction>,
|
||||
reward_key: SecretKey)
|
||||
-> Result<Block, secp::Error> {
|
||||
pub fn new(
|
||||
prev: &BlockHeader,
|
||||
txs: Vec<&Transaction>,
|
||||
reward_key: SecretKey,
|
||||
) -> Result<Block, secp::Error> {
|
||||
|
||||
let secp = Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
let (reward_out, reward_proof) = try!(Block::reward_output(reward_key, &secp));
|
||||
|
@ -248,11 +255,12 @@ impl Block {
|
|||
/// Builds a new block ready to mine from the header of the previous block,
|
||||
/// a vector of transactions and the reward information. Checks
|
||||
/// that all transactions are valid and calculates the Merkle tree.
|
||||
pub fn with_reward(prev: &BlockHeader,
|
||||
txs: Vec<&Transaction>,
|
||||
reward_out: Output,
|
||||
reward_kern: TxKernel)
|
||||
-> Result<Block, secp::Error> {
|
||||
pub fn with_reward(
|
||||
prev: &BlockHeader,
|
||||
txs: Vec<&Transaction>,
|
||||
reward_out: Output,
|
||||
reward_kern: TxKernel,
|
||||
) -> Result<Block, secp::Error> {
|
||||
// note: the following reads easily but may not be the most efficient due to
|
||||
// repeated iterations, revisit if a problem
|
||||
let secp = Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
|
@ -264,18 +272,16 @@ impl Block {
|
|||
// build vectors with all inputs and all outputs, ordering them by hash
|
||||
// needs to be a fold so we don't end up with a vector of vectors and we
|
||||
// want to fully own the refs (not just a pointer like flat_map).
|
||||
let mut inputs = txs.iter()
|
||||
.fold(vec![], |mut acc, ref tx| {
|
||||
let mut inputs = tx.inputs.clone();
|
||||
acc.append(&mut inputs);
|
||||
acc
|
||||
});
|
||||
let mut outputs = txs.iter()
|
||||
.fold(vec![], |mut acc, ref tx| {
|
||||
let mut outputs = tx.outputs.clone();
|
||||
acc.append(&mut outputs);
|
||||
acc
|
||||
});
|
||||
let mut inputs = txs.iter().fold(vec![], |mut acc, ref tx| {
|
||||
let mut inputs = tx.inputs.clone();
|
||||
acc.append(&mut inputs);
|
||||
acc
|
||||
});
|
||||
let mut outputs = txs.iter().fold(vec![], |mut acc, ref tx| {
|
||||
let mut outputs = tx.outputs.clone();
|
||||
acc.append(&mut outputs);
|
||||
acc
|
||||
});
|
||||
outputs.push(reward_out);
|
||||
|
||||
inputs.sort_by_key(|inp| inp.hash());
|
||||
|
@ -283,19 +289,24 @@ impl Block {
|
|||
|
||||
// calculate the overall Merkle tree and fees
|
||||
|
||||
Ok(Block {
|
||||
Ok(
|
||||
Block {
|
||||
header: BlockHeader {
|
||||
height: prev.height + 1,
|
||||
timestamp: time::Tm { tm_nsec: 0, ..time::now_utc() },
|
||||
timestamp: time::Tm {
|
||||
tm_nsec: 0,
|
||||
..time::now_utc()
|
||||
},
|
||||
previous: prev.hash(),
|
||||
total_difficulty: prev.pow.clone().to_difficulty() + prev.total_difficulty.clone(),
|
||||
total_difficulty: prev.pow.clone().to_difficulty() +
|
||||
prev.total_difficulty.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
inputs: inputs,
|
||||
outputs: outputs,
|
||||
kernels: kernels,
|
||||
}
|
||||
.compact())
|
||||
}.compact(),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
|
@ -312,37 +323,37 @@ impl Block {
|
|||
/// Matches any output with a potential spending input, eliminating them
|
||||
/// from the block. Provides a simple way to compact the block. The
|
||||
/// elimination is stable with respect to inputs and outputs order.
|
||||
///
|
||||
/// NOTE: exclude coinbase from compaction process
|
||||
/// if a block contains a new coinbase output and
|
||||
/// is a transaction spending a previous coinbase
|
||||
/// we do not want to compact these away
|
||||
///
|
||||
///
|
||||
/// NOTE: exclude coinbase from compaction process
|
||||
/// if a block contains a new coinbase output and
|
||||
/// is a transaction spending a previous coinbase
|
||||
/// we do not want to compact these away
|
||||
///
|
||||
pub fn compact(&self) -> Block {
|
||||
let in_set = self.inputs
|
||||
.iter()
|
||||
.map(|inp| inp.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
let in_set = self.inputs
|
||||
.iter()
|
||||
.map(|inp| inp.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let out_set = self.outputs
|
||||
.iter()
|
||||
.filter(|out| !out.features.contains(COINBASE_OUTPUT))
|
||||
.map(|out| out.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
let out_set = self.outputs
|
||||
.iter()
|
||||
.filter(|out| !out.features.contains(COINBASE_OUTPUT))
|
||||
.map(|out| out.commitment())
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
let commitments_to_compact = in_set.intersection(&out_set).collect::<HashSet<_>>();
|
||||
let commitments_to_compact = in_set.intersection(&out_set).collect::<HashSet<_>>();
|
||||
|
||||
let new_inputs = self.inputs
|
||||
.iter()
|
||||
.filter(|inp| !commitments_to_compact.contains(&inp.commitment()))
|
||||
.map(|&inp| inp)
|
||||
.collect::<Vec<_>>();
|
||||
let new_inputs = self.inputs
|
||||
.iter()
|
||||
.filter(|inp| !commitments_to_compact.contains(&inp.commitment()))
|
||||
.map(|&inp| inp)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let new_outputs = self.outputs
|
||||
.iter()
|
||||
.filter(|out| !commitments_to_compact.contains(&out.commitment()))
|
||||
.map(|&out| out)
|
||||
.collect::<Vec<_>>();
|
||||
let new_outputs = self.outputs
|
||||
.iter()
|
||||
.filter(|out| !commitments_to_compact.contains(&out.commitment()))
|
||||
.map(|&out| out)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Block {
|
||||
header: BlockHeader {
|
||||
|
@ -374,18 +385,17 @@ impl Block {
|
|||
all_outputs.sort_by_key(|out| out.hash());
|
||||
|
||||
Block {
|
||||
// compact will fix the merkle tree
|
||||
header: BlockHeader {
|
||||
pow: self.header.pow.clone(),
|
||||
difficulty: self.header.difficulty.clone(),
|
||||
total_difficulty: self.header.total_difficulty.clone(),
|
||||
..self.header
|
||||
},
|
||||
inputs: all_inputs,
|
||||
outputs: all_outputs,
|
||||
kernels: all_kernels,
|
||||
}
|
||||
.compact()
|
||||
// compact will fix the merkle tree
|
||||
header: BlockHeader {
|
||||
pow: self.header.pow.clone(),
|
||||
difficulty: self.header.difficulty.clone(),
|
||||
total_difficulty: self.header.total_difficulty.clone(),
|
||||
..self.header
|
||||
},
|
||||
inputs: all_inputs,
|
||||
outputs: all_outputs,
|
||||
kernels: all_kernels,
|
||||
}.compact()
|
||||
}
|
||||
|
||||
/// Validates all the elements in a block that can be checked without
|
||||
|
@ -394,7 +404,7 @@ impl Block {
|
|||
pub fn validate(&self, secp: &Secp256k1) -> Result<(), secp::Error> {
|
||||
self.verify_coinbase(secp)?;
|
||||
self.verify_kernels(secp)?;
|
||||
Ok(())
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate the sum of input/output commitments match the sum in kernels
|
||||
|
@ -441,23 +451,25 @@ impl Block {
|
|||
// verifying the kernels on a block composed of just the coinbase outputs
|
||||
// and kernels checks all we need
|
||||
Block {
|
||||
header: BlockHeader::default(),
|
||||
inputs: vec![],
|
||||
outputs: cb_outs,
|
||||
kernels: cb_kerns,
|
||||
}
|
||||
.verify_kernels(secp)
|
||||
header: BlockHeader::default(),
|
||||
inputs: vec![],
|
||||
outputs: cb_outs,
|
||||
kernels: cb_kerns,
|
||||
}.verify_kernels(secp)
|
||||
}
|
||||
|
||||
/// Builds the blinded output and related signature proof for the block
|
||||
/// reward.
|
||||
pub fn reward_output(skey: secp::key::SecretKey,
|
||||
secp: &Secp256k1)
|
||||
-> Result<(Output, TxKernel), secp::Error> {
|
||||
let msg = try!(secp::Message::from_slice(&[0; secp::constants::MESSAGE_SIZE]));
|
||||
pub fn reward_output(
|
||||
skey: secp::key::SecretKey,
|
||||
secp: &Secp256k1,
|
||||
) -> Result<(Output, TxKernel), secp::Error> {
|
||||
let msg = try!(secp::Message::from_slice(
|
||||
&[0; secp::constants::MESSAGE_SIZE],
|
||||
));
|
||||
let sig = try!(secp.sign(&msg, &skey));
|
||||
let commit = secp.commit(REWARD, skey).unwrap();
|
||||
//let switch_commit = secp.switch_commit(skey).unwrap();
|
||||
// let switch_commit = secp.switch_commit(skey).unwrap();
|
||||
let nonce = secp.nonce();
|
||||
let rproof = secp.range_proof(0, REWARD, skey, commit, nonce);
|
||||
|
||||
|
@ -560,78 +572,85 @@ mod test {
|
|||
assert_eq!(b3.outputs.len(), 4);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_block_with_coinbase_is_valid() {
|
||||
let ref secp = new_secp();
|
||||
let b = new_block(vec![], secp);
|
||||
#[test]
|
||||
fn empty_block_with_coinbase_is_valid() {
|
||||
let ref secp = new_secp();
|
||||
let b = new_block(vec![], secp);
|
||||
|
||||
assert_eq!(b.inputs.len(), 0);
|
||||
assert_eq!(b.outputs.len(), 1);
|
||||
assert_eq!(b.kernels.len(), 1);
|
||||
assert_eq!(b.inputs.len(), 0);
|
||||
assert_eq!(b.outputs.len(), 1);
|
||||
assert_eq!(b.kernels.len(), 1);
|
||||
|
||||
let coinbase_outputs = b.outputs
|
||||
let coinbase_outputs = b.outputs
|
||||
.iter()
|
||||
.filter(|out| out.features.contains(COINBASE_OUTPUT))
|
||||
.map(|o| o.clone())
|
||||
.map(|o| o.clone())
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(coinbase_outputs.len(), 1);
|
||||
assert_eq!(coinbase_outputs.len(), 1);
|
||||
|
||||
let coinbase_kernels = b.kernels
|
||||
let coinbase_kernels = b.kernels
|
||||
.iter()
|
||||
.filter(|out| out.features.contains(COINBASE_KERNEL))
|
||||
.map(|o| o.clone())
|
||||
.map(|o| o.clone())
|
||||
.collect::<Vec<_>>();
|
||||
assert_eq!(coinbase_kernels.len(), 1);
|
||||
assert_eq!(coinbase_kernels.len(), 1);
|
||||
|
||||
// the block should be valid here (single coinbase output with corresponding txn kernel)
|
||||
assert_eq!(b.validate(&secp), Ok(()));
|
||||
}
|
||||
// the block should be valid here (single coinbase output with corresponding
|
||||
// txn kernel)
|
||||
assert_eq!(b.validate(&secp), Ok(()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
// test that flipping the COINBASE_OUTPUT flag on the output features
|
||||
// invalidates the block and specifically it causes verify_coinbase to fail
|
||||
// additionally verifying the merkle_inputs_outputs also fails
|
||||
fn remove_coinbase_output_flag() {
|
||||
let ref secp = new_secp();
|
||||
let mut b = new_block(vec![], secp);
|
||||
#[test]
|
||||
// test that flipping the COINBASE_OUTPUT flag on the output features
|
||||
// invalidates the block and specifically it causes verify_coinbase to fail
|
||||
// additionally verifying the merkle_inputs_outputs also fails
|
||||
fn remove_coinbase_output_flag() {
|
||||
let ref secp = new_secp();
|
||||
let mut b = new_block(vec![], secp);
|
||||
|
||||
assert!(b.outputs[0].features.contains(COINBASE_OUTPUT));
|
||||
b.outputs[0].features.remove(COINBASE_OUTPUT);
|
||||
assert!(b.outputs[0].features.contains(COINBASE_OUTPUT));
|
||||
b.outputs[0].features.remove(COINBASE_OUTPUT);
|
||||
|
||||
assert_eq!(b.verify_coinbase(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
assert_eq!(b.verify_kernels(&secp), Ok(()));
|
||||
assert_eq!(
|
||||
b.verify_coinbase(&secp),
|
||||
Err(secp::Error::IncorrectCommitSum)
|
||||
);
|
||||
assert_eq!(b.verify_kernels(&secp), Ok(()));
|
||||
|
||||
assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
}
|
||||
assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
}
|
||||
|
||||
#[test]
|
||||
// test that flipping the COINBASE_KERNEL flag on the kernel features
|
||||
// invalidates the block and specifically it causes verify_coinbase to fail
|
||||
fn remove_coinbase_kernel_flag() {
|
||||
let ref secp = new_secp();
|
||||
let mut b = new_block(vec![], secp);
|
||||
#[test]
|
||||
// test that flipping the COINBASE_KERNEL flag on the kernel features
|
||||
// invalidates the block and specifically it causes verify_coinbase to fail
|
||||
fn remove_coinbase_kernel_flag() {
|
||||
let ref secp = new_secp();
|
||||
let mut b = new_block(vec![], secp);
|
||||
|
||||
assert!(b.kernels[0].features.contains(COINBASE_KERNEL));
|
||||
b.kernels[0].features.remove(COINBASE_KERNEL);
|
||||
assert!(b.kernels[0].features.contains(COINBASE_KERNEL));
|
||||
b.kernels[0].features.remove(COINBASE_KERNEL);
|
||||
|
||||
assert_eq!(b.verify_coinbase(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
assert_eq!(b.verify_kernels(&secp), Ok(()));
|
||||
assert_eq!(
|
||||
b.verify_coinbase(&secp),
|
||||
Err(secp::Error::IncorrectCommitSum)
|
||||
);
|
||||
assert_eq!(b.verify_kernels(&secp), Ok(()));
|
||||
|
||||
assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
}
|
||||
assert_eq!(b.validate(&secp), Err(secp::Error::IncorrectCommitSum));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_deserialize_block() {
|
||||
let ref secp = new_secp();
|
||||
let b = new_block(vec![], secp);
|
||||
#[test]
|
||||
fn serialize_deserialize_block() {
|
||||
let ref secp = new_secp();
|
||||
let b = new_block(vec![], secp);
|
||||
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let b2: Block = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let b2: Block = ser::deserialize(&mut &vec[..]).unwrap();
|
||||
|
||||
assert_eq!(b.inputs, b2.inputs);
|
||||
assert_eq!(b.outputs, b2.outputs);
|
||||
assert_eq!(b.kernels, b2.kernels);
|
||||
assert_eq!(b.header, b2.header);
|
||||
}
|
||||
assert_eq!(b.inputs, b2.inputs);
|
||||
assert_eq!(b.outputs, b2.outputs);
|
||||
assert_eq!(b.kernels, b2.kernels);
|
||||
assert_eq!(b.header, b2.header);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -112,12 +112,14 @@ pub fn output(value: u64, blinding: SecretKey) -> Box<Append> {
|
|||
let commit = build.secp.commit(value, blinding).unwrap();
|
||||
let nonce = build.secp.nonce();
|
||||
let rproof = build.secp.range_proof(0, value, blinding, commit, nonce);
|
||||
(tx.with_output(Output {
|
||||
features: DEFAULT_OUTPUT,
|
||||
commit: commit,
|
||||
proof: rproof,
|
||||
}),
|
||||
sum.add(blinding))
|
||||
(
|
||||
tx.with_output(Output {
|
||||
features: DEFAULT_OUTPUT,
|
||||
commit: commit,
|
||||
proof: rproof,
|
||||
}),
|
||||
sum.add(blinding),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -130,30 +132,38 @@ pub fn output_rand(value: u64) -> Box<Append> {
|
|||
let commit = build.secp.commit(value, blinding).unwrap();
|
||||
let nonce = build.secp.nonce();
|
||||
let rproof = build.secp.range_proof(0, value, blinding, commit, nonce);
|
||||
(tx.with_output(Output {
|
||||
features: DEFAULT_OUTPUT,
|
||||
commit: commit,
|
||||
proof: rproof,
|
||||
}),
|
||||
sum.add(blinding))
|
||||
(
|
||||
tx.with_output(Output {
|
||||
features: DEFAULT_OUTPUT,
|
||||
commit: commit,
|
||||
proof: rproof,
|
||||
}),
|
||||
sum.add(blinding),
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
/// Sets the fee on the transaction being built.
|
||||
pub fn with_fee(fee: u64) -> Box<Append> {
|
||||
Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) { (tx.with_fee(fee), sum) })
|
||||
Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) {
|
||||
(tx.with_fee(fee), sum)
|
||||
})
|
||||
}
|
||||
|
||||
/// Sets a known excess value on the transaction being built. Usually used in
|
||||
/// combination with the initial_tx function when a new transaction is built
|
||||
/// by adding to a pre-existing one.
|
||||
pub fn with_excess(excess: SecretKey) -> Box<Append> {
|
||||
Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) { (tx, sum.add(excess)) })
|
||||
Box::new(move |_build, (tx, sum)| -> (Transaction, BlindSum) {
|
||||
(tx, sum.add(excess))
|
||||
})
|
||||
}
|
||||
|
||||
/// Sets an initial transaction to add to when building a new transaction.
|
||||
pub fn initial_tx(tx: Transaction) -> Box<Append> {
|
||||
Box::new(move |_build, (_, sum)| -> (Transaction, BlindSum) { (tx.clone(), sum) })
|
||||
Box::new(move |_build, (_, sum)| -> (Transaction, BlindSum) {
|
||||
(tx.clone(), sum)
|
||||
})
|
||||
}
|
||||
|
||||
/// Builds a new transaction by combining all the combinators provided in a
|
||||
|
@ -171,8 +181,10 @@ pub fn transaction(elems: Vec<Box<Append>>) -> Result<(Transaction, SecretKey),
|
|||
secp: Secp256k1::with_caps(secp::ContextFlag::Commit),
|
||||
rng: OsRng::new().unwrap(),
|
||||
};
|
||||
let (mut tx, sum) = elems.iter().fold((Transaction::empty(), BlindSum::new()),
|
||||
|acc, elem| elem(&mut ctx, acc));
|
||||
let (mut tx, sum) = elems.iter().fold(
|
||||
(Transaction::empty(), BlindSum::new()),
|
||||
|acc, elem| elem(&mut ctx, acc),
|
||||
);
|
||||
|
||||
let blind_sum = sum.sum(&ctx.secp)?;
|
||||
let msg = secp::Message::from_slice(&u64_to_32bytes(tx.fee))?;
|
||||
|
@ -199,9 +211,12 @@ mod test {
|
|||
#[test]
|
||||
fn blind_simple_tx() {
|
||||
let secp = Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
let (tx, _) =
|
||||
transaction(vec![input_rand(10), input_rand(11), output_rand(20), with_fee(1)])
|
||||
.unwrap();
|
||||
let (tx, _) = transaction(vec![
|
||||
input_rand(10),
|
||||
input_rand(11),
|
||||
output_rand(20),
|
||||
with_fee(1),
|
||||
]).unwrap();
|
||||
tx.verify_sig(&secp).unwrap();
|
||||
}
|
||||
#[test]
|
||||
|
|
|
@ -143,9 +143,9 @@ impl HashWriter {
|
|||
/// Consume the `HashWriter`, outputting a `Hash` corresponding to its
|
||||
/// current state
|
||||
pub fn into_hash(self) -> Hash {
|
||||
let mut res = [0; 32];
|
||||
let mut res = [0; 32];
|
||||
(&mut res).copy_from_slice(self.state.finalize().as_bytes());
|
||||
Hash(res)
|
||||
Hash(res)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ pub mod hash;
|
|||
pub mod pmmr;
|
||||
pub mod target;
|
||||
pub mod transaction;
|
||||
//pub mod txoset;
|
||||
// pub mod txoset;
|
||||
#[allow(dead_code)]
|
||||
|
||||
use std::fmt;
|
||||
|
@ -82,7 +82,7 @@ pub trait Committed {
|
|||
/// Proof of work
|
||||
pub struct Proof {
|
||||
/// The nonces
|
||||
pub nonces:Vec<u32>,
|
||||
pub nonces: Vec<u32>,
|
||||
|
||||
/// The proof size
|
||||
pub proof_size: usize,
|
||||
|
@ -125,9 +125,8 @@ impl Clone for Proof {
|
|||
}
|
||||
|
||||
impl Proof {
|
||||
|
||||
/// Builds a proof with all bytes zeroed out
|
||||
pub fn new(in_nonces:Vec<u32>) -> Proof {
|
||||
pub fn new(in_nonces: Vec<u32>) -> Proof {
|
||||
Proof {
|
||||
proof_size: in_nonces.len(),
|
||||
nonces: in_nonces,
|
||||
|
@ -135,10 +134,10 @@ impl Proof {
|
|||
}
|
||||
|
||||
/// Builds a proof with all bytes zeroed out
|
||||
pub fn zero(proof_size:usize) -> Proof {
|
||||
pub fn zero(proof_size: usize) -> Proof {
|
||||
Proof {
|
||||
proof_size: proof_size,
|
||||
nonces: vec![0;proof_size],
|
||||
nonces: vec![0; proof_size],
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,9 +250,12 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn hash_output() {
|
||||
let (tx, _) =
|
||||
build::transaction(vec![input_rand(75), output_rand(42), output_rand(32), with_fee(1)])
|
||||
.unwrap();
|
||||
let (tx, _) = build::transaction(vec![
|
||||
input_rand(75),
|
||||
output_rand(42),
|
||||
output_rand(32),
|
||||
with_fee(1),
|
||||
]).unwrap();
|
||||
let h = tx.outputs[0].hash();
|
||||
assert!(h != ZERO_HASH);
|
||||
let h2 = tx.outputs[1].hash();
|
||||
|
@ -309,9 +311,11 @@ mod test {
|
|||
// From now on, Bob only has the obscured transaction and the sum of
|
||||
// blinding factors. He adds his output, finalizes the transaction so it's
|
||||
// ready for broadcast.
|
||||
let (tx_final, _) =
|
||||
build::transaction(vec![initial_tx(tx_alice), with_excess(blind_sum), output_rand(5)])
|
||||
.unwrap();
|
||||
let (tx_final, _) = build::transaction(vec![
|
||||
initial_tx(tx_alice),
|
||||
with_excess(blind_sum),
|
||||
output_rand(5),
|
||||
]).unwrap();
|
||||
|
||||
tx_final.validate(&secp).unwrap();
|
||||
}
|
||||
|
@ -357,8 +361,12 @@ mod test {
|
|||
|
||||
// utility producing a transaction with 2 inputs and a single outputs
|
||||
pub fn tx2i1o() -> Transaction {
|
||||
build::transaction(vec![input_rand(10), input_rand(11), output_rand(20), with_fee(1)])
|
||||
.map(|(tx, _)| tx)
|
||||
build::transaction(vec![
|
||||
input_rand(10),
|
||||
input_rand(11),
|
||||
output_rand(20),
|
||||
with_fee(1),
|
||||
]).map(|(tx, _)| tx)
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,11 @@
|
|||
//! Persistent and prunable Merkle Mountain Range implementation. For a high
|
||||
//! level description of MMRs, see:
|
||||
//!
|
||||
//! https://github.com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.md
|
||||
//! https://github.
|
||||
//! com/opentimestamps/opentimestamps-server/blob/master/doc/merkle-mountain-range.
|
||||
//!
|
||||
//!
|
||||
//! md
|
||||
//!
|
||||
//! This implementation is built in two major parts:
|
||||
//!
|
||||
|
@ -91,7 +95,10 @@ impl<T> Summable for NoSum<T> {
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
impl<T> Writeable for NoSum<T> where T: Writeable {
|
||||
impl<T> Writeable for NoSum<T>
|
||||
where
|
||||
T: Writeable,
|
||||
{
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
self.0.write(writer)
|
||||
}
|
||||
|
@ -100,14 +107,20 @@ impl<T> Writeable for NoSum<T> where T: Writeable {
|
|||
/// A utility type to handle (Hash, Sum) pairs more conveniently. The addition
|
||||
/// of two HashSums is the (Hash(h1|h2), h1 + h2) HashSum.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct HashSum<T> where T: Summable {
|
||||
pub struct HashSum<T>
|
||||
where
|
||||
T: Summable,
|
||||
{
|
||||
/// The hash
|
||||
pub hash: Hash,
|
||||
/// The sum
|
||||
pub sum: T::Sum,
|
||||
}
|
||||
|
||||
impl<T> HashSum<T> where T: Summable + Hashed {
|
||||
impl<T> HashSum<T>
|
||||
where
|
||||
T: Summable + Hashed,
|
||||
{
|
||||
/// Create a hash sum from a summable
|
||||
pub fn from_summable(idx: u64, elmt: &T) -> HashSum<T> {
|
||||
let hash = elmt.hash();
|
||||
|
@ -120,7 +133,10 @@ impl<T> HashSum<T> where T: Summable + Hashed {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T> Readable for HashSum<T> where T: Summable {
|
||||
impl<T> Readable for HashSum<T>
|
||||
where
|
||||
T: Summable,
|
||||
{
|
||||
fn read(r: &mut Reader) -> Result<HashSum<T>, ser::Error> {
|
||||
Ok(HashSum {
|
||||
hash: Hash::read(r)?,
|
||||
|
@ -129,14 +145,20 @@ impl<T> Readable for HashSum<T> where T: Summable {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T> Writeable for HashSum<T> where T: Summable {
|
||||
impl<T> Writeable for HashSum<T>
|
||||
where
|
||||
T: Summable,
|
||||
{
|
||||
fn write<W: Writer>(&self, w: &mut W) -> Result<(), ser::Error> {
|
||||
self.hash.write(w)?;
|
||||
self.sum.write(w)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> ops::Add for HashSum<T> where T: Summable {
|
||||
impl<T> ops::Add for HashSum<T>
|
||||
where
|
||||
T: Summable,
|
||||
{
|
||||
type Output = HashSum<T>;
|
||||
fn add(self, other: HashSum<T>) -> HashSum<T> {
|
||||
HashSum {
|
||||
|
@ -150,8 +172,10 @@ impl<T> ops::Add for HashSum<T> where T: Summable {
|
|||
/// The PMMR itself does not need the Backend to be accurate on the existence
|
||||
/// of an element (i.e. remove could be a no-op) but layers above can
|
||||
/// depend on an accurate Backend to check existence.
|
||||
pub trait Backend<T> where T: Summable {
|
||||
|
||||
pub trait Backend<T>
|
||||
where
|
||||
T: Summable,
|
||||
{
|
||||
/// Append the provided HashSums to the backend storage. The position of the
|
||||
/// first element of the Vec in the MMR is provided to help the
|
||||
/// implementation.
|
||||
|
@ -176,15 +200,22 @@ pub trait Backend<T> where T: Summable {
|
|||
/// Heavily relies on navigation operations within a binary tree. In particular,
|
||||
/// all the implementation needs to keep track of the MMR structure is how far
|
||||
/// we are in the sequence of nodes making up the MMR.
|
||||
pub struct PMMR<'a, T, B> where T: Summable, B: 'a + Backend<T> {
|
||||
pub struct PMMR<'a, T, B>
|
||||
where
|
||||
T: Summable,
|
||||
B: 'a + Backend<T>,
|
||||
{
|
||||
last_pos: u64,
|
||||
backend: &'a mut B,
|
||||
// only needed for parameterizing Backend
|
||||
summable: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backend<T> {
|
||||
|
||||
impl<'a, T, B> PMMR<'a, T, B>
|
||||
where
|
||||
T: Summable + Hashed + Clone,
|
||||
B: 'a + Backend<T>,
|
||||
{
|
||||
/// Build a new prunable Merkle Mountain Range using the provided backend.
|
||||
pub fn new(backend: &'a mut B) -> PMMR<T, B> {
|
||||
PMMR {
|
||||
|
@ -194,7 +225,8 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
|
|||
}
|
||||
}
|
||||
|
||||
/// Build a new prunable Merkle Mountain Range pre-initlialized until last_pos
|
||||
/// Build a new prunable Merkle Mountain Range pre-initlialized until
|
||||
/// last_pos
|
||||
/// with the provided backend.
|
||||
pub fn at(backend: &'a mut B, last_pos: u64) -> PMMR<T, B> {
|
||||
PMMR {
|
||||
|
@ -215,7 +247,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
|
|||
ret = match (ret, peak) {
|
||||
(None, x) => x,
|
||||
(Some(hsum), None) => Some(hsum),
|
||||
(Some(lhsum), Some(rhsum)) => Some(lhsum + rhsum)
|
||||
(Some(lhsum), Some(rhsum)) => Some(lhsum + rhsum),
|
||||
}
|
||||
}
|
||||
ret.expect("no root, invalid tree")
|
||||
|
@ -234,10 +266,11 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
|
|||
// height it means we have to build a higher peak by summing with a previous
|
||||
// sibling. we do it iteratively in case the new peak itself allows the
|
||||
// creation of another parent.
|
||||
while bintree_postorder_height(pos+1) > height {
|
||||
while bintree_postorder_height(pos + 1) > height {
|
||||
let left_sibling = bintree_jump_left_sibling(pos);
|
||||
let left_hashsum = self.backend.get(left_sibling)
|
||||
.expect("missing left sibling in tree, should not have been pruned");
|
||||
let left_hashsum = self.backend.get(left_sibling).expect(
|
||||
"missing left sibling in tree, should not have been pruned",
|
||||
);
|
||||
current_hashsum = left_hashsum + current_hashsum;
|
||||
|
||||
to_append.push(current_hashsum.clone());
|
||||
|
@ -259,7 +292,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
|
|||
// position is a leaf, which may had some parent that needs to exist
|
||||
// afterward for the MMR to be valid
|
||||
let mut pos = position;
|
||||
while bintree_postorder_height(pos+1) > 0 {
|
||||
while bintree_postorder_height(pos + 1) > 0 {
|
||||
pos += 1;
|
||||
}
|
||||
|
||||
|
@ -268,13 +301,14 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Prune an element from the tree given its position. Note that to be able to
|
||||
/// Prune an element from the tree given its position. Note that to be able
|
||||
/// to
|
||||
/// provide that position and prune, consumers of this API are expected to
|
||||
/// keep an index of elements to positions in the tree. Prunes parent
|
||||
/// nodes as well when they become childless.
|
||||
pub fn prune(&mut self, position: u64, index: u32) -> Result<bool, String> {
|
||||
if let None = self.backend.get(position) {
|
||||
return Ok(false)
|
||||
return Ok(false);
|
||||
}
|
||||
let prunable_height = bintree_postorder_height(position);
|
||||
if prunable_height > 0 {
|
||||
|
@ -286,7 +320,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
|
|||
// the tree.
|
||||
let mut to_prune = vec![];
|
||||
let mut current = position;
|
||||
while current+1 < self.last_pos {
|
||||
while current + 1 < self.last_pos {
|
||||
let (parent, sibling) = family(current);
|
||||
if parent > self.last_pos {
|
||||
// can't prune when our parent isn't here yet
|
||||
|
@ -330,7 +364,7 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
|
|||
print!("{:>8} ", n + 1);
|
||||
}
|
||||
println!("");
|
||||
for n in 1..(sz+1) {
|
||||
for n in 1..(sz + 1) {
|
||||
let ohs = self.get(n);
|
||||
match ohs {
|
||||
Some(hs) => print!("{} ", hs.hash),
|
||||
|
@ -345,36 +379,45 @@ impl<'a, T, B> PMMR<'a, T, B> where T: Summable + Hashed + Clone, B: 'a + Backen
|
|||
/// compact the Vector itself but still frees the reference to the
|
||||
/// underlying HashSum.
|
||||
#[derive(Clone)]
|
||||
pub struct VecBackend<T> where T: Summable + Clone {
|
||||
pub struct VecBackend<T>
|
||||
where
|
||||
T: Summable + Clone,
|
||||
{
|
||||
pub elems: Vec<Option<HashSum<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Backend<T> for VecBackend<T> where T: Summable + Clone {
|
||||
impl<T> Backend<T> for VecBackend<T>
|
||||
where
|
||||
T: Summable + Clone,
|
||||
{
|
||||
#[allow(unused_variables)]
|
||||
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> {
|
||||
self.elems.append(&mut map_vec!(data, |d| Some(d.clone())));
|
||||
Ok(())
|
||||
}
|
||||
fn get(&self, position: u64) -> Option<HashSum<T>> {
|
||||
self.elems[(position-1) as usize].clone()
|
||||
self.elems[(position - 1) as usize].clone()
|
||||
}
|
||||
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> {
|
||||
for n in positions {
|
||||
self.elems[(n-1) as usize] = None
|
||||
self.elems[(n - 1) as usize] = None
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
#[allow(unused_variables)]
|
||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||
self.elems = self.elems[0..(position as usize)+1].to_vec();
|
||||
self.elems = self.elems[0..(position as usize) + 1].to_vec();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> VecBackend<T> where T: Summable + Clone {
|
||||
impl<T> VecBackend<T>
|
||||
where
|
||||
T: Summable + Clone,
|
||||
{
|
||||
/// Instantiates a new VecBackend<T>
|
||||
pub fn new() -> VecBackend<T> {
|
||||
VecBackend{elems: vec![]}
|
||||
VecBackend { elems: vec![] }
|
||||
}
|
||||
|
||||
/// Current number of HashSum elements in the underlying Vec.
|
||||
|
@ -418,7 +461,7 @@ pub struct PruneList {
|
|||
impl PruneList {
|
||||
/// Instantiate a new empty prune list
|
||||
pub fn new() -> PruneList {
|
||||
PruneList{pruned_nodes: vec![]}
|
||||
PruneList { pruned_nodes: vec![] }
|
||||
}
|
||||
|
||||
/// Computes by how many positions a node at pos should be shifted given the
|
||||
|
@ -501,7 +544,7 @@ fn peaks(num: u64) -> Vec<u64> {
|
|||
|
||||
// detecting an invalid mountain range, when siblings exist but no parent
|
||||
// exists
|
||||
if bintree_postorder_height(num+1) > bintree_postorder_height(num) {
|
||||
if bintree_postorder_height(num + 1) > bintree_postorder_height(num) {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
|
@ -616,7 +659,7 @@ pub fn family(pos: u64) -> (u64, u64) {
|
|||
let parent: u64;
|
||||
|
||||
let pos_height = bintree_postorder_height(pos);
|
||||
let next_height = bintree_postorder_height(pos+1);
|
||||
let next_height = bintree_postorder_height(pos + 1);
|
||||
if next_height > pos_height {
|
||||
sibling = bintree_jump_left_sibling(pos);
|
||||
parent = pos + 1;
|
||||
|
@ -710,15 +753,19 @@ mod test {
|
|||
#[test]
|
||||
#[allow(unused_variables)]
|
||||
fn first_50_mmr_heights() {
|
||||
let first_100_str =
|
||||
"0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 \
|
||||
let first_100_str = "0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 \
|
||||
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 5 \
|
||||
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 0 0 1 0 0";
|
||||
let first_100 = first_100_str.split(' ').map(|n| n.parse::<u64>().unwrap());
|
||||
let mut count = 1;
|
||||
for n in first_100 {
|
||||
assert_eq!(n, bintree_postorder_height(count), "expected {}, got {}",
|
||||
n, bintree_postorder_height(count));
|
||||
assert_eq!(
|
||||
n,
|
||||
bintree_postorder_height(count),
|
||||
"expected {}, got {}",
|
||||
n,
|
||||
bintree_postorder_height(count)
|
||||
);
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
@ -785,7 +832,13 @@ mod test {
|
|||
let hash = Hashed::hash(&elems[0]);
|
||||
let sum = elems[0].sum();
|
||||
let node_hash = (1 as u64, &sum, hash).hash();
|
||||
assert_eq!(pmmr.root(), HashSum{hash: node_hash, sum: sum});
|
||||
assert_eq!(
|
||||
pmmr.root(),
|
||||
HashSum {
|
||||
hash: node_hash,
|
||||
sum: sum,
|
||||
}
|
||||
);
|
||||
assert_eq!(pmmr.unpruned_size(), 1);
|
||||
|
||||
// two elements
|
||||
|
@ -802,7 +855,8 @@ mod test {
|
|||
|
||||
// four elements
|
||||
pmmr.push(elems[3]).unwrap();
|
||||
let sum4 = sum2 + (HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3]));
|
||||
let sum4 = sum2 +
|
||||
(HashSum::from_summable(4, &elems[2]) + HashSum::from_summable(5, &elems[3]));
|
||||
assert_eq!(pmmr.root(), sum4);
|
||||
assert_eq!(pmmr.unpruned_size(), 7);
|
||||
|
||||
|
@ -814,7 +868,8 @@ mod test {
|
|||
|
||||
// six elements
|
||||
pmmr.push(elems[5]).unwrap();
|
||||
let sum6 = sum4.clone() + (HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5]));
|
||||
let sum6 = sum4.clone() +
|
||||
(HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5]));
|
||||
assert_eq!(pmmr.root(), sum6.clone());
|
||||
assert_eq!(pmmr.unpruned_size(), 10);
|
||||
|
||||
|
@ -826,7 +881,9 @@ mod test {
|
|||
|
||||
// eight elements
|
||||
pmmr.push(elems[7]).unwrap();
|
||||
let sum8 = sum4 + ((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) + (HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7])));
|
||||
let sum8 = sum4 +
|
||||
((HashSum::from_summable(8, &elems[4]) + HashSum::from_summable(9, &elems[5])) +
|
||||
(HashSum::from_summable(11, &elems[6]) + HashSum::from_summable(12, &elems[7])));
|
||||
assert_eq!(pmmr.root(), sum8);
|
||||
assert_eq!(pmmr.unpruned_size(), 15);
|
||||
|
||||
|
|
|
@ -59,8 +59,8 @@ impl Difficulty {
|
|||
/// provided hash.
|
||||
pub fn from_hash(h: &Hash) -> Difficulty {
|
||||
let max_target = BigEndian::read_u64(&MAX_TARGET);
|
||||
//Use the first 64 bits of the given hash
|
||||
let mut in_vec=h.to_vec();
|
||||
// Use the first 64 bits of the given hash
|
||||
let mut in_vec = h.to_vec();
|
||||
in_vec.truncate(8);
|
||||
let num = BigEndian::read_u64(&in_vec);
|
||||
Difficulty { num: max_target / num }
|
||||
|
@ -121,7 +121,8 @@ impl Readable for Difficulty {
|
|||
|
||||
impl Serialize for Difficulty {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where S: Serializer
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_u64(self.num)
|
||||
}
|
||||
|
@ -129,7 +130,8 @@ impl Serialize for Difficulty {
|
|||
|
||||
impl<'de> Deserialize<'de> for Difficulty {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Difficulty, D::Error>
|
||||
where D: Deserializer<'de>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_u64(DiffVisitor)
|
||||
}
|
||||
|
@ -145,12 +147,16 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
|
|||
}
|
||||
|
||||
fn visit_str<E>(self, s: &str) -> Result<Self::Value, E>
|
||||
where E: de::Error
|
||||
where
|
||||
E: de::Error,
|
||||
{
|
||||
let num_in = s.parse::<u64>();
|
||||
if let Err(_)=num_in {
|
||||
return Err(de::Error::invalid_value(de::Unexpected::Str(s), &"a value number"));
|
||||
};
|
||||
if let Err(_) = num_in {
|
||||
return Err(de::Error::invalid_value(
|
||||
de::Unexpected::Str(s),
|
||||
&"a value number",
|
||||
));
|
||||
};
|
||||
Ok(Difficulty { num: num_in.unwrap() })
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,11 +54,13 @@ pub struct TxKernel {
|
|||
|
||||
impl Writeable for TxKernel {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u8, self.features.bits()],
|
||||
[write_fixed_bytes, &self.excess],
|
||||
[write_bytes, &self.excess_sig],
|
||||
[write_u64, self.fee]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u8, self.features.bits()],
|
||||
[write_fixed_bytes, &self.excess],
|
||||
[write_bytes, &self.excess_sig],
|
||||
[write_u64, self.fee]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -66,8 +68,9 @@ impl Writeable for TxKernel {
|
|||
impl Readable for TxKernel {
|
||||
fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> {
|
||||
Ok(TxKernel {
|
||||
features:
|
||||
KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?,
|
||||
features: KernelFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?,
|
||||
excess: Commitment::read(reader)?,
|
||||
excess_sig: reader.read_vec()?,
|
||||
fee: reader.read_u64()?,
|
||||
|
@ -104,11 +107,13 @@ pub struct Transaction {
|
|||
/// write the transaction as binary.
|
||||
impl Writeable for Transaction {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u64, self.fee],
|
||||
[write_bytes, &self.excess_sig],
|
||||
[write_u64, self.inputs.len() as u64],
|
||||
[write_u64, self.outputs.len() as u64]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u64, self.fee],
|
||||
[write_bytes, &self.excess_sig],
|
||||
[write_u64, self.inputs.len() as u64],
|
||||
[write_u64, self.outputs.len() as u64]
|
||||
);
|
||||
for inp in &self.inputs {
|
||||
try!(inp.write(writer));
|
||||
}
|
||||
|
@ -185,7 +190,10 @@ impl Transaction {
|
|||
pub fn with_input(self, input: Input) -> Transaction {
|
||||
let mut new_ins = self.inputs;
|
||||
new_ins.push(input);
|
||||
Transaction { inputs: new_ins, ..self }
|
||||
Transaction {
|
||||
inputs: new_ins,
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds a new transaction with the provided output added. Existing
|
||||
|
@ -193,7 +201,10 @@ impl Transaction {
|
|||
pub fn with_output(self, output: Output) -> Transaction {
|
||||
let mut new_outs = self.outputs;
|
||||
new_outs.push(output);
|
||||
Transaction { outputs: new_outs, ..self }
|
||||
Transaction {
|
||||
outputs: new_outs,
|
||||
..self
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds a new transaction with the provided fee.
|
||||
|
@ -304,9 +315,11 @@ pub struct Output {
|
|||
/// an Output as binary.
|
||||
impl Writeable for Output {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u8, self.features.bits()],
|
||||
[write_fixed_bytes, &self.commit]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u8, self.features.bits()],
|
||||
[write_fixed_bytes, &self.commit]
|
||||
);
|
||||
// The hash of an output doesn't include the range proof
|
||||
if writer.serialization_mode() == ser::SerializationMode::Full {
|
||||
writer.write_bytes(&self.proof)?
|
||||
|
@ -320,8 +333,9 @@ impl Writeable for Output {
|
|||
impl Readable for Output {
|
||||
fn read(reader: &mut Reader) -> Result<Output, ser::Error> {
|
||||
Ok(Output {
|
||||
features:
|
||||
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?,
|
||||
features: OutputFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?,
|
||||
commit: Commitment::read(reader)?,
|
||||
proof: RangeProof::read(reader)?,
|
||||
})
|
||||
|
@ -341,8 +355,6 @@ impl Output {
|
|||
|
||||
/// Validates the range proof using the commitment
|
||||
pub fn verify_proof(&self, secp: &Secp256k1) -> Result<(), secp::Error> {
|
||||
/// secp.verify_range_proof returns range if and only if both min_value and max_value less than 2^64
|
||||
/// since group order is much larger (~2^256) we can be sure overflow is not the case
|
||||
secp.verify_range_proof(self.commit, self.proof).map(|_| ())
|
||||
}
|
||||
}
|
||||
|
@ -392,7 +404,10 @@ impl ops::Add for SumCommit {
|
|||
type Output = SumCommit;
|
||||
|
||||
fn add(self, other: SumCommit) -> SumCommit {
|
||||
let sum = match self.secp.commit_sum(vec![self.commit.clone(), other.commit.clone()], vec![]) {
|
||||
let sum = match self.secp.commit_sum(
|
||||
vec![self.commit.clone(), other.commit.clone()],
|
||||
vec![],
|
||||
) {
|
||||
Ok(s) => s,
|
||||
Err(_) => Commitment::from_vec(vec![1; 33]),
|
||||
};
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
/// different sets of parameters for different purposes,
|
||||
/// e.g. CI, User testing, production values
|
||||
|
||||
use std::sync::{RwLock};
|
||||
use std::sync::RwLock;
|
||||
use consensus::PROOFSIZE;
|
||||
use consensus::DEFAULT_SIZESHIFT;
|
||||
|
||||
|
@ -29,16 +29,16 @@ use consensus::DEFAULT_SIZESHIFT;
|
|||
/// by users
|
||||
|
||||
/// Automated testing sizeshift
|
||||
pub const AUTOMATED_TESTING_SIZESHIFT:u8 = 10;
|
||||
pub const AUTOMATED_TESTING_SIZESHIFT: u8 = 10;
|
||||
|
||||
/// Automated testing proof size
|
||||
pub const AUTOMATED_TESTING_PROOF_SIZE:usize = 4;
|
||||
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 4;
|
||||
|
||||
/// User testing sizeshift
|
||||
pub const USER_TESTING_SIZESHIFT:u8 = 16;
|
||||
pub const USER_TESTING_SIZESHIFT: u8 = 16;
|
||||
|
||||
/// User testing proof size
|
||||
pub const USER_TESTING_PROOF_SIZE:usize = 42;
|
||||
pub const USER_TESTING_PROOF_SIZE: usize = 42;
|
||||
|
||||
/// Mining parameter modes
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
@ -55,18 +55,19 @@ pub enum MiningParameterMode {
|
|||
|
||||
lazy_static!{
|
||||
/// The mining parameter mode
|
||||
pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> = RwLock::new(MiningParameterMode::Production);
|
||||
pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> =
|
||||
RwLock::new(MiningParameterMode::Production);
|
||||
}
|
||||
|
||||
/// Set the mining mode
|
||||
pub fn set_mining_mode(mode:MiningParameterMode){
|
||||
let mut param_ref=MINING_PARAMETER_MODE.write().unwrap();
|
||||
*param_ref=mode;
|
||||
pub fn set_mining_mode(mode: MiningParameterMode) {
|
||||
let mut param_ref = MINING_PARAMETER_MODE.write().unwrap();
|
||||
*param_ref = mode;
|
||||
}
|
||||
|
||||
/// The sizeshift
|
||||
pub fn sizeshift() -> u8 {
|
||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
||||
match *param_ref {
|
||||
MiningParameterMode::AutomatedTesting => AUTOMATED_TESTING_SIZESHIFT,
|
||||
MiningParameterMode::UserTesting => USER_TESTING_SIZESHIFT,
|
||||
|
@ -76,7 +77,7 @@ pub fn sizeshift() -> u8 {
|
|||
|
||||
/// The proofsize
|
||||
pub fn proofsize() -> usize {
|
||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
||||
match *param_ref {
|
||||
MiningParameterMode::AutomatedTesting => AUTOMATED_TESTING_PROOF_SIZE,
|
||||
MiningParameterMode::UserTesting => USER_TESTING_PROOF_SIZE,
|
||||
|
@ -86,8 +87,8 @@ pub fn proofsize() -> usize {
|
|||
|
||||
/// Are we in automated testing mode?
|
||||
pub fn is_automated_testing_mode() -> bool {
|
||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||
if let MiningParameterMode::AutomatedTesting=*param_ref {
|
||||
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
||||
if let MiningParameterMode::AutomatedTesting = *param_ref {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
@ -96,8 +97,8 @@ pub fn is_automated_testing_mode() -> bool {
|
|||
|
||||
/// Are we in production mode?
|
||||
pub fn is_production_mode() -> bool {
|
||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||
if let MiningParameterMode::Production=*param_ref {
|
||||
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
||||
if let MiningParameterMode::Production = *param_ref {
|
||||
return true;
|
||||
} else {
|
||||
return false;
|
||||
|
@ -105,30 +106,72 @@ pub fn is_production_mode() -> bool {
|
|||
}
|
||||
|
||||
|
||||
/// Helper function to get a nonce known to create a valid POW on
|
||||
/// Helper function to get a nonce known to create a valid POW on
|
||||
/// the genesis block, to prevent it taking ages. Should be fine for now
|
||||
/// as the genesis block POW solution turns out to be the same for every new block chain
|
||||
/// as the genesis block POW solution turns out to be the same for every new
|
||||
/// block chain
|
||||
/// at the moment
|
||||
|
||||
pub fn get_genesis_nonce() -> u64 {
|
||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
||||
match *param_ref {
|
||||
MiningParameterMode::AutomatedTesting => 0, //won't make a difference
|
||||
MiningParameterMode::UserTesting => 22141, //Magic nonce for current genesis block at cuckoo16
|
||||
MiningParameterMode::Production => 1429942738856787200, //Magic nonce for current genesis at cuckoo30
|
||||
// won't make a difference
|
||||
MiningParameterMode::AutomatedTesting => 0,
|
||||
// Magic nonce for current genesis block at cuckoo16
|
||||
MiningParameterMode::UserTesting => 22141,
|
||||
// Magic nonce for current genesis at cuckoo30
|
||||
MiningParameterMode::Production => 1429942738856787200,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the genesis POW for production, because it takes far too long to mine at production values
|
||||
/// Returns the genesis POW for production, because it takes far too long to
|
||||
/// mine at production values
|
||||
/// using the internal miner
|
||||
|
||||
pub fn get_genesis_pow() -> [u32;42]{
|
||||
//TODO: This is diff 26, probably just want a 10: mine one
|
||||
[7444824, 11926557, 28520390, 30594072, 50854023, 52797085, 57882033,
|
||||
59816511, 61404804, 84947619, 87779345, 115270337, 162618676,
|
||||
166860710, 178656003, 178971372, 200454733, 209197630, 221231015,
|
||||
228598741, 241012783, 245401183, 279080304, 295848517, 327300943,
|
||||
329741709, 366394532, 382493153, 389329248, 404353381, 406012911,
|
||||
418813499, 426573907, 452566575, 456930760, 463021458, 474340589,
|
||||
476248039, 478197093, 487576917, 495653489, 501862896]
|
||||
pub fn get_genesis_pow() -> [u32; 42] {
|
||||
// TODO: This is diff 26, probably just want a 10: mine one
|
||||
[
|
||||
7444824,
|
||||
11926557,
|
||||
28520390,
|
||||
30594072,
|
||||
50854023,
|
||||
52797085,
|
||||
57882033,
|
||||
59816511,
|
||||
61404804,
|
||||
84947619,
|
||||
87779345,
|
||||
115270337,
|
||||
162618676,
|
||||
166860710,
|
||||
178656003,
|
||||
178971372,
|
||||
200454733,
|
||||
209197630,
|
||||
221231015,
|
||||
228598741,
|
||||
241012783,
|
||||
245401183,
|
||||
279080304,
|
||||
295848517,
|
||||
327300943,
|
||||
329741709,
|
||||
366394532,
|
||||
382493153,
|
||||
389329248,
|
||||
404353381,
|
||||
406012911,
|
||||
418813499,
|
||||
426573907,
|
||||
452566575,
|
||||
456930760,
|
||||
463021458,
|
||||
474340589,
|
||||
476248039,
|
||||
478197093,
|
||||
487576917,
|
||||
495653489,
|
||||
501862896,
|
||||
]
|
||||
}
|
||||
|
|
|
@ -55,9 +55,10 @@ impl fmt::Display for Error {
|
|||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
Error::IOErr(ref e) => write!(f, "{}", e),
|
||||
Error::UnexpectedData { expected: ref e, received: ref r } => {
|
||||
write!(f, "expected {:?}, got {:?}", e, r)
|
||||
}
|
||||
Error::UnexpectedData {
|
||||
expected: ref e,
|
||||
received: ref r,
|
||||
} => write!(f, "expected {:?}, got {:?}", e, r),
|
||||
Error::CorruptedData => f.write_str("corrupted data"),
|
||||
Error::TooLargeReadErr => f.write_str("too large read"),
|
||||
}
|
||||
|
@ -75,7 +76,10 @@ impl error::Error for Error {
|
|||
fn description(&self) -> &str {
|
||||
match *self {
|
||||
Error::IOErr(ref e) => error::Error::description(e),
|
||||
Error::UnexpectedData { expected: _, received: _ } => "unexpected data",
|
||||
Error::UnexpectedData {
|
||||
expected: _,
|
||||
received: _,
|
||||
} => "unexpected data",
|
||||
Error::CorruptedData => "corrupted data",
|
||||
Error::TooLargeReadErr => "too large read",
|
||||
}
|
||||
|
@ -180,7 +184,8 @@ pub trait Writeable {
|
|||
/// Reads directly to a Reader, a utility type thinly wrapping an
|
||||
/// underlying Read implementation.
|
||||
pub trait Readable
|
||||
where Self: Sized
|
||||
where
|
||||
Self: Sized,
|
||||
{
|
||||
/// Reads the data necessary to this Readable from the provided reader
|
||||
fn read(reader: &mut Reader) -> Result<Self, Error>;
|
||||
|
@ -245,7 +250,9 @@ impl<'a> Reader for BinReader<'a> {
|
|||
return Err(Error::TooLargeReadErr);
|
||||
}
|
||||
let mut buf = vec![0; length];
|
||||
self.source.read_exact(&mut buf).map(move |_| buf).map_err(Error::IOErr)
|
||||
self.source.read_exact(&mut buf).map(move |_| buf).map_err(
|
||||
Error::IOErr,
|
||||
)
|
||||
}
|
||||
|
||||
fn expect_u8(&mut self, val: u8) -> Result<u8, Error> {
|
||||
|
@ -338,14 +345,19 @@ impl_int!(u32, write_u32, read_u32);
|
|||
impl_int!(u64, write_u64, read_u64);
|
||||
impl_int!(i64, write_i64, read_i64);
|
||||
|
||||
impl<T> Readable for Vec<T> where T: Readable {
|
||||
impl<T> Readable for Vec<T>
|
||||
where
|
||||
T: Readable,
|
||||
{
|
||||
fn read(reader: &mut Reader) -> Result<Vec<T>, Error> {
|
||||
let mut buf = Vec::new();
|
||||
loop {
|
||||
let elem = T::read(reader);
|
||||
match elem {
|
||||
Ok(e) => buf.push(e),
|
||||
Err(Error::IOErr(ref ioerr)) if ioerr.kind() == io::ErrorKind::UnexpectedEof => break,
|
||||
Err(Error::IOErr(ref ioerr)) if ioerr.kind() == io::ErrorKind::UnexpectedEof => {
|
||||
break
|
||||
}
|
||||
Err(e) => return Err(e),
|
||||
}
|
||||
}
|
||||
|
@ -353,7 +365,10 @@ impl<T> Readable for Vec<T> where T: Readable {
|
|||
}
|
||||
}
|
||||
|
||||
impl<T> Writeable for Vec<T> where T: Writeable {
|
||||
impl<T> Writeable for Vec<T>
|
||||
where
|
||||
T: Writeable,
|
||||
{
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
for elmt in self {
|
||||
elmt.write(writer)?;
|
||||
|
@ -400,18 +415,22 @@ impl<A: Writeable, B: Writeable, C: Writeable, D: Writeable> Writeable for (A, B
|
|||
|
||||
impl<A: Readable, B: Readable, C: Readable> Readable for (A, B, C) {
|
||||
fn read(reader: &mut Reader) -> Result<(A, B, C), Error> {
|
||||
Ok((try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader))))
|
||||
Ok((
|
||||
try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader)),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: Readable, B: Readable, C: Readable, D: Readable> Readable for (A, B, C, D) {
|
||||
fn read(reader: &mut Reader) -> Result<(A, B, C, D), Error> {
|
||||
Ok((try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader))))
|
||||
Ok((
|
||||
try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader)),
|
||||
try!(Readable::read(reader)),
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ use secp::pedersen::Commitment;
|
|||
use util::OneTime;
|
||||
use store;
|
||||
use sync;
|
||||
use core::global::{MiningParameterMode,MINING_PARAMETER_MODE};
|
||||
use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
|
||||
|
||||
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
|
||||
/// blocks and transactions are received and forwards to the chain and pool
|
||||
|
@ -56,7 +56,7 @@ impl NetAdapter for NetToChainAdapter {
|
|||
}
|
||||
|
||||
fn block_received(&self, b: core::Block) {
|
||||
let bhash = b.hash();
|
||||
let bhash = b.hash();
|
||||
debug!("Received block {} from network, going to process.", bhash);
|
||||
|
||||
// pushing the new block through the chain pipeline
|
||||
|
@ -81,10 +81,12 @@ impl NetAdapter for NetToChainAdapter {
|
|||
added_hs.push(bh.hash());
|
||||
}
|
||||
Err(chain::Error::Unfit(s)) => {
|
||||
info!("Received unfit block header {} at {}: {}.",
|
||||
bh.hash(),
|
||||
bh.height,
|
||||
s);
|
||||
info!(
|
||||
"Received unfit block header {} at {}: {}.",
|
||||
bh.hash(),
|
||||
bh.height,
|
||||
s
|
||||
);
|
||||
}
|
||||
Err(chain::Error::StoreErr(e)) => {
|
||||
error!("Store error processing block header {}: {:?}", bh.hash(), e);
|
||||
|
@ -150,7 +152,11 @@ impl NetAdapter for NetToChainAdapter {
|
|||
/// Find good peers we know with the provided capability and return their
|
||||
/// addresses.
|
||||
fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec<SocketAddr> {
|
||||
let peers = self.peer_store.find_peers(State::Healthy, capab, p2p::MAX_PEER_ADDRS as usize);
|
||||
let peers = self.peer_store.find_peers(
|
||||
State::Healthy,
|
||||
capab,
|
||||
p2p::MAX_PEER_ADDRS as usize,
|
||||
);
|
||||
debug!("Got {} peer addrs to send.", peers.len());
|
||||
map_vec!(peers, |p| p.addr)
|
||||
}
|
||||
|
@ -192,10 +198,11 @@ impl NetAdapter for NetToChainAdapter {
|
|||
}
|
||||
|
||||
impl NetToChainAdapter {
|
||||
pub fn new(chain_ref: Arc<chain::Chain>,
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
||||
peer_store: Arc<PeerStore>)
|
||||
-> NetToChainAdapter {
|
||||
pub fn new(
|
||||
chain_ref: Arc<chain::Chain>,
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
||||
peer_store: Arc<PeerStore>,
|
||||
) -> NetToChainAdapter {
|
||||
NetToChainAdapter {
|
||||
chain: chain_ref,
|
||||
peer_store: peer_store,
|
||||
|
@ -209,13 +216,15 @@ impl NetToChainAdapter {
|
|||
pub fn start_sync(&self, sync: sync::Syncer) {
|
||||
let arc_sync = Arc::new(sync);
|
||||
self.syncer.init(arc_sync.clone());
|
||||
let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(move || {
|
||||
let sync_run_result = arc_sync.run();
|
||||
match sync_run_result {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
});
|
||||
let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(
|
||||
move || {
|
||||
let sync_run_result = arc_sync.run();
|
||||
match sync_run_result {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
},
|
||||
);
|
||||
match spawn_result {
|
||||
Ok(_) => {}
|
||||
Err(_) => {}
|
||||
|
@ -229,7 +238,7 @@ impl NetToChainAdapter {
|
|||
} else {
|
||||
chain::NONE
|
||||
};
|
||||
let param_ref=MINING_PARAMETER_MODE.read().unwrap();
|
||||
let param_ref = MINING_PARAMETER_MODE.read().unwrap();
|
||||
let opts = match *param_ref {
|
||||
MiningParameterMode::AutomatedTesting => opts | chain::EASY_POW,
|
||||
MiningParameterMode::UserTesting => opts | chain::EASY_POW,
|
||||
|
@ -251,9 +260,11 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
|
|||
fn block_accepted(&self, b: &core::Block) {
|
||||
{
|
||||
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
|
||||
error!("Pool could not update itself at block {}: {:?}",
|
||||
b.hash(),
|
||||
e);
|
||||
error!(
|
||||
"Pool could not update itself at block {}: {:?}",
|
||||
b.hash(),
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
self.p2p.borrow().broadcast_block(b);
|
||||
|
@ -261,8 +272,9 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
|
|||
}
|
||||
|
||||
impl ChainToPoolAndNetAdapter {
|
||||
pub fn new(tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>)
|
||||
-> ChainToPoolAndNetAdapter {
|
||||
pub fn new(
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
|
||||
) -> ChainToPoolAndNetAdapter {
|
||||
ChainToPoolAndNetAdapter {
|
||||
tx_pool: tx_pool,
|
||||
p2p: OneTime::new(),
|
||||
|
@ -294,21 +306,28 @@ impl PoolToChainAdapter {
|
|||
|
||||
impl pool::BlockChain for PoolToChainAdapter {
|
||||
fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, pool::PoolError> {
|
||||
self.chain.borrow().get_unspent(output_ref)
|
||||
.map_err(|e| match e {
|
||||
self.chain.borrow().get_unspent(output_ref).map_err(
|
||||
|e| match e {
|
||||
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
|
||||
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
|
||||
_ => pool::PoolError::GenericPoolError,
|
||||
})
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, pool::PoolError> {
|
||||
self.chain.borrow().get_block_header_by_output_commit(commit)
|
||||
fn get_block_header_by_output_commit(
|
||||
&self,
|
||||
commit: &Commitment,
|
||||
) -> Result<BlockHeader, pool::PoolError> {
|
||||
self.chain
|
||||
.borrow()
|
||||
.get_block_header_by_output_commit(commit)
|
||||
.map_err(|_| pool::PoolError::GenericPoolError)
|
||||
}
|
||||
|
||||
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
|
||||
self.chain.borrow().head_header()
|
||||
.map_err(|_| pool::PoolError::GenericPoolError)
|
||||
self.chain.borrow().head_header().map_err(|_| {
|
||||
pool::PoolError::GenericPoolError
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -55,5 +55,5 @@ mod sync;
|
|||
mod types;
|
||||
mod miner;
|
||||
|
||||
pub use server::{Server};
|
||||
pub use server::Server;
|
||||
pub use types::{ServerConfig, Seeding, ServerStats};
|
||||
|
|
|
@ -231,7 +231,7 @@ impl Miner {
|
|||
next_stat_output = time::get_time().sec + stat_output_interval;
|
||||
}
|
||||
}
|
||||
//avoid busy wait
|
||||
// avoid busy wait
|
||||
let sleep_dur = std::time::Duration::from_millis(100);
|
||||
thread::sleep(sleep_dur);
|
||||
}
|
||||
|
@ -540,7 +540,9 @@ impl Miner {
|
|||
b.header.nonce = rng.gen();
|
||||
b.header.difficulty = difficulty;
|
||||
b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0));
|
||||
self.chain.set_sumtree_roots(&mut b).expect("Error setting sum tree roots");
|
||||
self.chain.set_sumtree_roots(&mut b).expect(
|
||||
"Error setting sum tree roots",
|
||||
);
|
||||
b
|
||||
}
|
||||
|
||||
|
|
171
grin/src/seed.rs
171
grin/src/seed.rs
|
@ -44,10 +44,11 @@ pub struct Seeder {
|
|||
}
|
||||
|
||||
impl Seeder {
|
||||
pub fn new(capabilities: p2p::Capabilities,
|
||||
peer_store: Arc<p2p::PeerStore>,
|
||||
p2p: Arc<p2p::Server>)
|
||||
-> Seeder {
|
||||
pub fn new(
|
||||
capabilities: p2p::Capabilities,
|
||||
peer_store: Arc<p2p::PeerStore>,
|
||||
p2p: Arc<p2p::Server>,
|
||||
) -> Seeder {
|
||||
Seeder {
|
||||
peer_store: peer_store,
|
||||
p2p: p2p,
|
||||
|
@ -55,17 +56,20 @@ impl Seeder {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn connect_and_monitor(&self,
|
||||
h: reactor::Handle,
|
||||
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>) {
|
||||
pub fn connect_and_monitor(
|
||||
&self,
|
||||
h: reactor::Handle,
|
||||
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>,
|
||||
) {
|
||||
// open a channel with a listener that connects every peer address sent below
|
||||
// max peer count
|
||||
let (tx, rx) = futures::sync::mpsc::unbounded();
|
||||
h.spawn(self.listen_for_addrs(h.clone(), rx));
|
||||
|
||||
// check seeds and start monitoring connections
|
||||
let seeder = self.connect_to_seeds(tx.clone(), seed_list)
|
||||
.join(self.monitor_peers(tx.clone()));
|
||||
let seeder = self.connect_to_seeds(tx.clone(), seed_list).join(
|
||||
self.monitor_peers(tx.clone()),
|
||||
);
|
||||
|
||||
h.spawn(seeder.map(|_| ()).map_err(|e| {
|
||||
error!("Seeding or peer monitoring error: {}", e);
|
||||
|
@ -73,9 +77,10 @@ impl Seeder {
|
|||
}));
|
||||
}
|
||||
|
||||
fn monitor_peers(&self,
|
||||
tx: mpsc::UnboundedSender<SocketAddr>)
|
||||
-> Box<Future<Item = (), Error = String>> {
|
||||
fn monitor_peers(
|
||||
&self,
|
||||
tx: mpsc::UnboundedSender<SocketAddr>,
|
||||
) -> Box<Future<Item = (), Error = String>> {
|
||||
let peer_store = self.peer_store.clone();
|
||||
let p2p_server = self.p2p.clone();
|
||||
|
||||
|
@ -91,8 +96,8 @@ impl Seeder {
|
|||
for p in disconnected {
|
||||
if p.is_banned() {
|
||||
debug!("Marking peer {} as banned.", p.info.addr);
|
||||
let update_result = peer_store.update_state(
|
||||
p.info.addr, p2p::State::Banned);
|
||||
let update_result =
|
||||
peer_store.update_state(p.info.addr, p2p::State::Banned);
|
||||
match update_result {
|
||||
Ok(()) => {}
|
||||
Err(_) => {}
|
||||
|
@ -102,9 +107,11 @@ impl Seeder {
|
|||
|
||||
// we don't have enough peers, getting more from db
|
||||
if p2p_server.peer_count() < PEER_PREFERRED_COUNT {
|
||||
let mut peers = peer_store.find_peers(p2p::State::Healthy,
|
||||
p2p::UNKNOWN,
|
||||
(2 * PEER_MAX_COUNT) as usize);
|
||||
let mut peers = peer_store.find_peers(
|
||||
p2p::State::Healthy,
|
||||
p2p::UNKNOWN,
|
||||
(2 * PEER_MAX_COUNT) as usize,
|
||||
);
|
||||
peers.retain(|p| !p2p_server.is_known(p.addr));
|
||||
if peers.len() > 0 {
|
||||
debug!("Got {} more peers from db, trying to connect.", peers.len());
|
||||
|
@ -124,20 +131,24 @@ impl Seeder {
|
|||
|
||||
// Check if we have any pre-existing peer in db. If so, start with those,
|
||||
// otherwise use the seeds provided.
|
||||
fn connect_to_seeds(&self,
|
||||
tx: mpsc::UnboundedSender<SocketAddr>,
|
||||
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>)
|
||||
-> Box<Future<Item = (), Error = String>> {
|
||||
fn connect_to_seeds(
|
||||
&self,
|
||||
tx: mpsc::UnboundedSender<SocketAddr>,
|
||||
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>,
|
||||
) -> Box<Future<Item = (), Error = String>> {
|
||||
let peer_store = self.peer_store.clone();
|
||||
|
||||
// a thread pool is required so we don't block the event loop with a
|
||||
// db query
|
||||
let thread_pool = cpupool::CpuPool::new(1);
|
||||
let seeder = thread_pool.spawn_fn(move || {
|
||||
let seeder = thread_pool
|
||||
.spawn_fn(move || {
|
||||
// check if we have some peers in db
|
||||
let peers = peer_store.find_peers(p2p::State::Healthy,
|
||||
p2p::FULL_HIST,
|
||||
(2 * PEER_MAX_COUNT) as usize);
|
||||
let peers = peer_store.find_peers(
|
||||
p2p::State::Healthy,
|
||||
p2p::FULL_HIST,
|
||||
(2 * PEER_MAX_COUNT) as usize,
|
||||
);
|
||||
Ok(peers)
|
||||
})
|
||||
.and_then(|mut peers| {
|
||||
|
@ -168,10 +179,11 @@ impl Seeder {
|
|||
/// addresses to and initiate a connection if the max peer count isn't
|
||||
/// exceeded. A request for more peers is also automatically sent after
|
||||
/// connection.
|
||||
fn listen_for_addrs(&self,
|
||||
h: reactor::Handle,
|
||||
rx: mpsc::UnboundedReceiver<SocketAddr>)
|
||||
-> Box<Future<Item = (), Error = ()>> {
|
||||
fn listen_for_addrs(
|
||||
&self,
|
||||
h: reactor::Handle,
|
||||
rx: mpsc::UnboundedReceiver<SocketAddr>,
|
||||
) -> Box<Future<Item = (), Error = ()>> {
|
||||
let capab = self.capabilities;
|
||||
let p2p_store = self.peer_store.clone();
|
||||
let p2p_server = self.p2p.clone();
|
||||
|
@ -180,11 +192,13 @@ impl Seeder {
|
|||
debug!("New peer address to connect to: {}.", peer_addr);
|
||||
let inner_h = h.clone();
|
||||
if p2p_server.peer_count() < PEER_MAX_COUNT {
|
||||
connect_and_req(capab,
|
||||
p2p_store.clone(),
|
||||
p2p_server.clone(),
|
||||
inner_h,
|
||||
peer_addr)
|
||||
connect_and_req(
|
||||
capab,
|
||||
p2p_store.clone(),
|
||||
p2p_server.clone(),
|
||||
inner_h,
|
||||
peer_addr,
|
||||
)
|
||||
} else {
|
||||
Box::new(future::ok(()))
|
||||
}
|
||||
|
@ -201,7 +215,8 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
|
|||
let client = hyper::Client::new(&h);
|
||||
|
||||
// http get, filtering out non 200 results
|
||||
client.get(url)
|
||||
client
|
||||
.get(url)
|
||||
.map_err(|e| e.to_string())
|
||||
.and_then(|res| {
|
||||
if res.status() != hyper::Ok {
|
||||
|
@ -211,14 +226,17 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
|
|||
})
|
||||
.and_then(|res| {
|
||||
// collect all chunks and split around whitespace to get a list of SocketAddr
|
||||
res.body().collect().map_err(|e| e.to_string()).and_then(|chunks| {
|
||||
let res = chunks.iter().fold("".to_string(), |acc, ref chunk| {
|
||||
acc + str::from_utf8(&chunk[..]).unwrap()
|
||||
});
|
||||
let addrs =
|
||||
res.split_whitespace().map(|s| s.parse().unwrap()).collect::<Vec<_>>();
|
||||
Ok(addrs)
|
||||
})
|
||||
res.body().collect().map_err(|e| e.to_string()).and_then(
|
||||
|chunks| {
|
||||
let res = chunks.iter().fold("".to_string(), |acc, ref chunk| {
|
||||
acc + str::from_utf8(&chunk[..]).unwrap()
|
||||
});
|
||||
let addrs = res.split_whitespace()
|
||||
.map(|s| s.parse().unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
Ok(addrs)
|
||||
},
|
||||
)
|
||||
})
|
||||
});
|
||||
Box::new(seeds)
|
||||
|
@ -226,40 +244,47 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
|
|||
|
||||
/// Convenience function when the seed list is immediately known. Mostly used
|
||||
/// for tests.
|
||||
pub fn predefined_seeds(addrs_str: Vec<String>)
|
||||
-> Box<Future<Item = Vec<SocketAddr>, Error = String>> {
|
||||
let seeds = future::ok(())
|
||||
.and_then(move |_| Ok(addrs_str.iter().map(|s| s.parse().unwrap()).collect::<Vec<_>>()));
|
||||
pub fn predefined_seeds(
|
||||
addrs_str: Vec<String>,
|
||||
) -> Box<Future<Item = Vec<SocketAddr>, Error = String>> {
|
||||
let seeds = future::ok(()).and_then(move |_| {
|
||||
Ok(
|
||||
addrs_str
|
||||
.iter()
|
||||
.map(|s| s.parse().unwrap())
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
});
|
||||
Box::new(seeds)
|
||||
}
|
||||
|
||||
fn connect_and_req(capab: p2p::Capabilities,
|
||||
peer_store: Arc<p2p::PeerStore>,
|
||||
p2p: Arc<p2p::Server>,
|
||||
h: reactor::Handle,
|
||||
addr: SocketAddr)
|
||||
-> Box<Future<Item = (), Error = ()>> {
|
||||
let fut = p2p.connect_peer(addr, h)
|
||||
.then(move |p| {
|
||||
match p {
|
||||
Ok(Some(p)) => {
|
||||
let peer_result = p.send_peer_request(capab);
|
||||
match peer_result {
|
||||
Ok(()) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
fn connect_and_req(
|
||||
capab: p2p::Capabilities,
|
||||
peer_store: Arc<p2p::PeerStore>,
|
||||
p2p: Arc<p2p::Server>,
|
||||
h: reactor::Handle,
|
||||
addr: SocketAddr,
|
||||
) -> Box<Future<Item = (), Error = ()>> {
|
||||
let fut = p2p.connect_peer(addr, h).then(move |p| {
|
||||
match p {
|
||||
Ok(Some(p)) => {
|
||||
let peer_result = p.send_peer_request(capab);
|
||||
match peer_result {
|
||||
Ok(()) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Peer request error: {:?}", e);
|
||||
let update_result = peer_store.update_state(addr, p2p::State::Defunct);
|
||||
match update_result {
|
||||
Ok(()) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
Err(e) => {
|
||||
error!("Peer request error: {:?}", e);
|
||||
let update_result = peer_store.update_state(addr, p2p::State::Defunct);
|
||||
match update_result {
|
||||
Ok(()) => {}
|
||||
Err(_) => {}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
Box::new(fut)
|
||||
}
|
||||
|
|
|
@ -79,35 +79,47 @@ impl Server {
|
|||
pub fn future(mut config: ServerConfig, evt_handle: &reactor::Handle) -> Result<Server, Error> {
|
||||
|
||||
let pool_adapter = Arc::new(PoolToChainAdapter::new());
|
||||
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(pool_adapter.clone())));
|
||||
let tx_pool = Arc::new(RwLock::new(
|
||||
pool::TransactionPool::new(pool_adapter.clone()),
|
||||
));
|
||||
|
||||
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(tx_pool.clone()));
|
||||
|
||||
let mut genesis_block = None;
|
||||
if !chain::Chain::chain_exists(config.db_root.clone()){
|
||||
genesis_block=pow::mine_genesis_block(config.mining_config.clone());
|
||||
if !chain::Chain::chain_exists(config.db_root.clone()) {
|
||||
genesis_block = pow::mine_genesis_block(config.mining_config.clone());
|
||||
}
|
||||
|
||||
let shared_chain = Arc::new(chain::Chain::init(config.db_root.clone(),
|
||||
chain_adapter.clone(),
|
||||
genesis_block,
|
||||
pow::verify_size)?);
|
||||
|
||||
let shared_chain = Arc::new(chain::Chain::init(
|
||||
config.db_root.clone(),
|
||||
chain_adapter.clone(),
|
||||
genesis_block,
|
||||
pow::verify_size,
|
||||
)?);
|
||||
|
||||
pool_adapter.set_chain(shared_chain.clone());
|
||||
|
||||
let peer_store = Arc::new(p2p::PeerStore::new(config.db_root.clone())?);
|
||||
let net_adapter = Arc::new(NetToChainAdapter::new(shared_chain.clone(),
|
||||
tx_pool.clone(),
|
||||
peer_store.clone()));
|
||||
let p2p_server =
|
||||
Arc::new(p2p::Server::new(config.capabilities, config.p2p_config.unwrap(), net_adapter.clone()));
|
||||
let net_adapter = Arc::new(NetToChainAdapter::new(
|
||||
shared_chain.clone(),
|
||||
tx_pool.clone(),
|
||||
peer_store.clone(),
|
||||
));
|
||||
let p2p_server = Arc::new(p2p::Server::new(
|
||||
config.capabilities,
|
||||
config.p2p_config.unwrap(),
|
||||
net_adapter.clone(),
|
||||
));
|
||||
chain_adapter.init(p2p_server.clone());
|
||||
|
||||
let seed = seed::Seeder::new(config.capabilities, peer_store.clone(), p2p_server.clone());
|
||||
match config.seeding_type.clone() {
|
||||
Seeding::None => {}
|
||||
Seeding::List => {
|
||||
seed.connect_and_monitor(evt_handle.clone(), seed::predefined_seeds(config.seeds.as_mut().unwrap().clone()));
|
||||
seed.connect_and_monitor(
|
||||
evt_handle.clone(),
|
||||
seed::predefined_seeds(config.seeds.as_mut().unwrap().clone()),
|
||||
);
|
||||
}
|
||||
Seeding::WebStatic => {
|
||||
seed.connect_and_monitor(evt_handle.clone(), seed::web_seeds(evt_handle.clone()));
|
||||
|
@ -121,9 +133,11 @@ impl Server {
|
|||
|
||||
info!("Starting rest apis at: {}", &config.api_http_addr);
|
||||
|
||||
api::start_rest_apis(config.api_http_addr.clone(),
|
||||
shared_chain.clone(),
|
||||
tx_pool.clone());
|
||||
api::start_rest_apis(
|
||||
config.api_http_addr.clone(),
|
||||
shared_chain.clone(),
|
||||
tx_pool.clone(),
|
||||
);
|
||||
|
||||
warn!("Grin server started.");
|
||||
Ok(Server {
|
||||
|
@ -138,7 +152,12 @@ impl Server {
|
|||
/// Asks the server to connect to a peer at the provided network address.
|
||||
pub fn connect_peer(&self, addr: SocketAddr) -> Result<(), Error> {
|
||||
let handle = self.evt_handle.clone();
|
||||
handle.spawn(self.p2p.connect_peer(addr, handle.clone()).map(|_| ()).map_err(|_| ()));
|
||||
handle.spawn(
|
||||
self.p2p
|
||||
.connect_peer(addr, handle.clone())
|
||||
.map(|_| ())
|
||||
.map_err(|_| ()),
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -154,7 +173,7 @@ impl Server {
|
|||
let proof_size = global::proofsize();
|
||||
|
||||
let mut miner = miner::Miner::new(config.clone(), self.chain.clone(), self.tx_pool.clone());
|
||||
miner.set_debug_output_id(format!("Port {}",self.config.p2p_config.unwrap().port));
|
||||
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.unwrap().port));
|
||||
thread::spawn(move || {
|
||||
miner.run_loop(config.clone(), cuckoo_size as u32, proof_size);
|
||||
});
|
||||
|
@ -165,12 +184,14 @@ impl Server {
|
|||
self.chain.head().unwrap()
|
||||
}
|
||||
|
||||
/// Returns a set of stats about this server. This and the ServerStats structure
|
||||
/// can be updated over time to include any information needed by tests or other
|
||||
/// Returns a set of stats about this server. This and the ServerStats
|
||||
/// structure
|
||||
/// can be updated over time to include any information needed by tests or
|
||||
/// other
|
||||
/// consumers
|
||||
|
||||
pub fn get_server_stats(&self) -> Result<ServerStats, Error>{
|
||||
Ok(ServerStats{
|
||||
pub fn get_server_stats(&self) -> Result<ServerStats, Error> {
|
||||
Ok(ServerStats {
|
||||
peer_count: self.peer_count(),
|
||||
head: self.head(),
|
||||
})
|
||||
|
|
|
@ -129,8 +129,10 @@ impl Syncer {
|
|||
prev_h = header.previous;
|
||||
}
|
||||
|
||||
debug!("Added {} full block hashes to download.",
|
||||
blocks_to_download.len());
|
||||
debug!(
|
||||
"Added {} full block hashes to download.",
|
||||
blocks_to_download.len()
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -141,7 +143,8 @@ impl Syncer {
|
|||
if blocks_downloading.len() > MAX_BODY_DOWNLOADS {
|
||||
// clean up potentially dead downloads
|
||||
let twenty_sec_ago = Instant::now() - Duration::from_secs(20);
|
||||
blocks_downloading.iter()
|
||||
blocks_downloading
|
||||
.iter()
|
||||
.position(|&h| h.1 < twenty_sec_ago)
|
||||
.map(|n| blocks_downloading.remove(n));
|
||||
} else {
|
||||
|
@ -158,8 +161,10 @@ impl Syncer {
|
|||
}
|
||||
blocks_downloading.push((h, Instant::now()));
|
||||
}
|
||||
debug!("Requesting more full block hashes to download, total: {}.",
|
||||
blocks_to_download.len());
|
||||
debug!(
|
||||
"Requesting more full block hashes to download, total: {}.",
|
||||
blocks_to_download.len()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -181,10 +186,12 @@ impl Syncer {
|
|||
let peer = self.p2p.most_work_peer();
|
||||
let locator = self.get_locator(&tip)?;
|
||||
if let Some(p) = peer {
|
||||
debug!("Asking peer {} for more block headers starting from {} at {}.",
|
||||
p.info.addr,
|
||||
tip.last_block_h,
|
||||
tip.height);
|
||||
debug!(
|
||||
"Asking peer {} for more block headers starting from {} at {}.",
|
||||
p.info.addr,
|
||||
tip.last_block_h,
|
||||
tip.height
|
||||
);
|
||||
p.send_header_request(locator)?;
|
||||
} else {
|
||||
warn!("Could not get most worked peer to request headers.");
|
||||
|
|
|
@ -119,12 +119,10 @@ impl Default for ServerConfig {
|
|||
///
|
||||
///
|
||||
///
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ServerStats {
|
||||
/// Number of peers
|
||||
pub peer_count:u32,
|
||||
pub peer_count: u32,
|
||||
/// Chain head
|
||||
pub head: chain::Tip,
|
||||
}
|
||||
|
||||
|
|
118
p2p/src/conn.rs
118
p2p/src/conn.rs
|
@ -42,22 +42,26 @@ pub trait Handler: Sync + Send {
|
|||
/// Handle function to implement to process incoming messages. A sender to
|
||||
/// reply immediately as well as the message header and its unparsed body
|
||||
/// are provided.
|
||||
fn handle(&self,
|
||||
sender: UnboundedSender<Vec<u8>>,
|
||||
header: MsgHeader,
|
||||
body: Vec<u8>)
|
||||
-> Result<Option<Hash>, ser::Error>;
|
||||
fn handle(
|
||||
&self,
|
||||
sender: UnboundedSender<Vec<u8>>,
|
||||
header: MsgHeader,
|
||||
body: Vec<u8>,
|
||||
) -> Result<Option<Hash>, ser::Error>;
|
||||
}
|
||||
|
||||
impl<F> Handler for F
|
||||
where F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>) -> Result<Option<Hash>, ser::Error>,
|
||||
F: Sync + Send
|
||||
where
|
||||
F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>)
|
||||
-> Result<Option<Hash>, ser::Error>,
|
||||
F: Sync + Send,
|
||||
{
|
||||
fn handle(&self,
|
||||
sender: UnboundedSender<Vec<u8>>,
|
||||
header: MsgHeader,
|
||||
body: Vec<u8>)
|
||||
-> Result<Option<Hash>, ser::Error> {
|
||||
fn handle(
|
||||
&self,
|
||||
sender: UnboundedSender<Vec<u8>>,
|
||||
header: MsgHeader,
|
||||
body: Vec<u8>,
|
||||
) -> Result<Option<Hash>, ser::Error> {
|
||||
self(sender, header, body)
|
||||
}
|
||||
}
|
||||
|
@ -87,10 +91,12 @@ impl Connection {
|
|||
/// Start listening on the provided connection and wraps it. Does not hang
|
||||
/// the current thread, instead just returns a future and the Connection
|
||||
/// itself.
|
||||
pub fn listen<F>(conn: TcpStream,
|
||||
handler: F)
|
||||
-> (Connection, Box<Future<Item = (), Error = Error>>)
|
||||
where F: Handler + 'static
|
||||
pub fn listen<F>(
|
||||
conn: TcpStream,
|
||||
handler: F,
|
||||
) -> (Connection, Box<Future<Item = (), Error = Error>>)
|
||||
where
|
||||
F: Handler + 'static,
|
||||
{
|
||||
|
||||
let (reader, writer) = conn.split();
|
||||
|
@ -105,7 +111,9 @@ impl Connection {
|
|||
|
||||
// same for closing the connection
|
||||
let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
|
||||
let close_conn = close_rx.for_each(|_| Ok(())).map_err(|_| Error::ConnectionClose);
|
||||
let close_conn = close_rx.for_each(|_| Ok(())).map_err(
|
||||
|_| Error::ConnectionClose,
|
||||
);
|
||||
|
||||
let me = Connection {
|
||||
outbound_chan: tx.clone(),
|
||||
|
@ -123,21 +131,25 @@ impl Connection {
|
|||
let write_msg = me.write_msg(rx, writer).map(|_| ());
|
||||
|
||||
// select between our different futures and return them
|
||||
let fut =
|
||||
Box::new(close_conn.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e))
|
||||
let fut = Box::new(
|
||||
close_conn
|
||||
.select(read_msg.select(write_msg).map(|_| ()).map_err(|(e, _)| e))
|
||||
.map(|_| ())
|
||||
.map_err(|(e, _)| e));
|
||||
.map_err(|(e, _)| e),
|
||||
);
|
||||
|
||||
(me, fut)
|
||||
}
|
||||
|
||||
/// Prepares the future that gets message data produced by our system and
|
||||
/// sends it to the peer connection
|
||||
fn write_msg<W>(&self,
|
||||
rx: UnboundedReceiver<Vec<u8>>,
|
||||
writer: W)
|
||||
-> Box<Future<Item = W, Error = Error>>
|
||||
where W: AsyncWrite + 'static
|
||||
fn write_msg<W>(
|
||||
&self,
|
||||
rx: UnboundedReceiver<Vec<u8>>,
|
||||
writer: W,
|
||||
) -> Box<Future<Item = W, Error = Error>>
|
||||
where
|
||||
W: AsyncWrite + 'static,
|
||||
{
|
||||
|
||||
let sent_bytes = self.sent_bytes.clone();
|
||||
|
@ -158,13 +170,15 @@ impl Connection {
|
|||
|
||||
/// Prepares the future reading from the peer connection, parsing each
|
||||
/// message and forwarding them appropriately based on their type
|
||||
fn read_msg<F, R>(&self,
|
||||
sender: UnboundedSender<Vec<u8>>,
|
||||
reader: R,
|
||||
handler: F)
|
||||
-> Box<Future<Item = R, Error = Error>>
|
||||
where F: Handler + 'static,
|
||||
R: AsyncRead + 'static
|
||||
fn read_msg<F, R>(
|
||||
&self,
|
||||
sender: UnboundedSender<Vec<u8>>,
|
||||
reader: R,
|
||||
handler: F,
|
||||
) -> Box<Future<Item = R, Error = Error>>
|
||||
where
|
||||
F: Handler + 'static,
|
||||
R: AsyncRead + 'static,
|
||||
{
|
||||
|
||||
// infinite iterator stream so we repeat the message reading logic until the
|
||||
|
@ -218,10 +232,15 @@ impl Connection {
|
|||
let mut body_data = vec![];
|
||||
try!(ser::serialize(&mut body_data, body));
|
||||
let mut data = vec![];
|
||||
try!(ser::serialize(&mut data, &MsgHeader::new(t, body_data.len() as u64)));
|
||||
try!(ser::serialize(
|
||||
&mut data,
|
||||
&MsgHeader::new(t, body_data.len() as u64),
|
||||
));
|
||||
data.append(&mut body_data);
|
||||
|
||||
self.outbound_chan.send(data).map_err(|_| Error::ConnectionClose)
|
||||
self.outbound_chan.send(data).map_err(
|
||||
|_| Error::ConnectionClose,
|
||||
)
|
||||
}
|
||||
|
||||
/// Bytes sent and received by this peer to the remote peer.
|
||||
|
@ -242,10 +261,12 @@ pub struct TimeoutConnection {
|
|||
|
||||
impl TimeoutConnection {
|
||||
/// Same as Connection
|
||||
pub fn listen<F>(conn: TcpStream,
|
||||
handler: F)
|
||||
-> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
|
||||
where F: Handler + 'static
|
||||
pub fn listen<F>(
|
||||
conn: TcpStream,
|
||||
handler: F,
|
||||
) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
|
||||
where
|
||||
F: Handler + 'static,
|
||||
{
|
||||
|
||||
let expects = Arc::new(Mutex::new(vec![]));
|
||||
|
@ -258,7 +279,8 @@ impl TimeoutConnection {
|
|||
let recv_h = try!(handler.handle(sender, header, data));
|
||||
|
||||
let mut expects = exp.lock().unwrap();
|
||||
let filtered = expects.iter()
|
||||
let filtered = expects
|
||||
.iter()
|
||||
.filter(|&&(typ, h, _): &&(Type, Option<Hash>, Instant)| {
|
||||
msg_type != typ || h.is_some() && recv_h != h
|
||||
})
|
||||
|
@ -288,17 +310,21 @@ impl TimeoutConnection {
|
|||
underlying: conn,
|
||||
expected_responses: expects,
|
||||
};
|
||||
(me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)))
|
||||
(
|
||||
me,
|
||||
Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)),
|
||||
)
|
||||
}
|
||||
|
||||
/// Sends a request and registers a timer on the provided message type and
|
||||
/// optionally the hash of the sent data.
|
||||
pub fn send_request<W: ser::Writeable>(&self,
|
||||
t: Type,
|
||||
rt: Type,
|
||||
body: &W,
|
||||
expect_h: Option<(Hash)>)
|
||||
-> Result<(), Error> {
|
||||
pub fn send_request<W: ser::Writeable>(
|
||||
&self,
|
||||
t: Type,
|
||||
rt: Type,
|
||||
body: &W,
|
||||
expect_h: Option<(Hash)>,
|
||||
) -> Result<(), Error> {
|
||||
let _sent = try!(self.underlying.send_msg(t, body));
|
||||
|
||||
let mut expects = self.expected_responses.lock().unwrap();
|
||||
|
|
|
@ -47,12 +47,13 @@ impl Handshake {
|
|||
}
|
||||
|
||||
/// Handles connecting to a new remote peer, starting the version handshake.
|
||||
pub fn connect(&self,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
self_addr: SocketAddr,
|
||||
conn: TcpStream)
|
||||
-> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
|
||||
pub fn connect(
|
||||
&self,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
self_addr: SocketAddr,
|
||||
conn: TcpStream,
|
||||
) -> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
|
||||
// prepare the first part of the hanshake
|
||||
let nonce = self.next_nonce();
|
||||
let hand = Hand {
|
||||
|
@ -66,79 +67,84 @@ impl Handshake {
|
|||
};
|
||||
|
||||
// write and read the handshake response
|
||||
Box::new(write_msg(conn, hand, Type::Hand)
|
||||
.and_then(|conn| read_msg::<Shake>(conn))
|
||||
.and_then(|(conn, shake)| {
|
||||
if shake.version != 1 {
|
||||
Err(Error::Serialization(ser::Error::UnexpectedData {
|
||||
expected: vec![PROTOCOL_VERSION as u8],
|
||||
received: vec![shake.version as u8],
|
||||
}))
|
||||
} else {
|
||||
let peer_info = PeerInfo {
|
||||
capabilities: shake.capabilities,
|
||||
user_agent: shake.user_agent,
|
||||
addr: conn.peer_addr().unwrap(),
|
||||
version: shake.version,
|
||||
total_difficulty: shake.total_difficulty,
|
||||
};
|
||||
Box::new(
|
||||
write_msg(conn, hand, Type::Hand)
|
||||
.and_then(|conn| read_msg::<Shake>(conn))
|
||||
.and_then(|(conn, shake)| {
|
||||
if shake.version != 1 {
|
||||
Err(Error::Serialization(ser::Error::UnexpectedData {
|
||||
expected: vec![PROTOCOL_VERSION as u8],
|
||||
received: vec![shake.version as u8],
|
||||
}))
|
||||
} else {
|
||||
let peer_info = PeerInfo {
|
||||
capabilities: shake.capabilities,
|
||||
user_agent: shake.user_agent,
|
||||
addr: conn.peer_addr().unwrap(),
|
||||
version: shake.version,
|
||||
total_difficulty: shake.total_difficulty,
|
||||
};
|
||||
|
||||
info!("Connected to peer {:?}", peer_info);
|
||||
// when more than one protocol version is supported, choosing should go here
|
||||
Ok((conn, ProtocolV1::new(), peer_info))
|
||||
}
|
||||
}))
|
||||
info!("Connected to peer {:?}", peer_info);
|
||||
// when more than one protocol version is supported, choosing should go here
|
||||
Ok((conn, ProtocolV1::new(), peer_info))
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Handles receiving a connection from a new remote peer that started the
|
||||
/// version handshake.
|
||||
pub fn handshake(&self,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
conn: TcpStream)
|
||||
-> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
|
||||
pub fn handshake(
|
||||
&self,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
conn: TcpStream,
|
||||
) -> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
|
||||
let nonces = self.nonces.clone();
|
||||
Box::new(read_msg::<Hand>(conn)
|
||||
.and_then(move |(conn, hand)| {
|
||||
if hand.version != 1 {
|
||||
return Err(Error::Serialization(ser::Error::UnexpectedData {
|
||||
expected: vec![PROTOCOL_VERSION as u8],
|
||||
received: vec![hand.version as u8],
|
||||
}));
|
||||
}
|
||||
{
|
||||
// check the nonce to see if we could be trying to connect to ourselves
|
||||
let nonces = nonces.read().unwrap();
|
||||
if nonces.contains(&hand.nonce) {
|
||||
Box::new(
|
||||
read_msg::<Hand>(conn)
|
||||
.and_then(move |(conn, hand)| {
|
||||
if hand.version != 1 {
|
||||
return Err(Error::Serialization(ser::Error::UnexpectedData {
|
||||
expected: vec![],
|
||||
received: vec![],
|
||||
expected: vec![PROTOCOL_VERSION as u8],
|
||||
received: vec![hand.version as u8],
|
||||
}));
|
||||
}
|
||||
}
|
||||
// all good, keep peer info
|
||||
let peer_info = PeerInfo {
|
||||
capabilities: hand.capabilities,
|
||||
user_agent: hand.user_agent,
|
||||
addr: hand.sender_addr.0,
|
||||
version: hand.version,
|
||||
total_difficulty: hand.total_difficulty,
|
||||
};
|
||||
// send our reply with our info
|
||||
let shake = Shake {
|
||||
version: PROTOCOL_VERSION,
|
||||
capabilities: capab,
|
||||
total_difficulty: total_difficulty,
|
||||
user_agent: USER_AGENT.to_string(),
|
||||
};
|
||||
Ok((conn, shake, peer_info))
|
||||
})
|
||||
.and_then(|(conn, shake, peer_info)| {
|
||||
debug!("Success handshake with {}.", peer_info.addr);
|
||||
write_msg(conn, shake, Type::Shake)
|
||||
{
|
||||
// check the nonce to see if we could be trying to connect to ourselves
|
||||
let nonces = nonces.read().unwrap();
|
||||
if nonces.contains(&hand.nonce) {
|
||||
return Err(Error::Serialization(ser::Error::UnexpectedData {
|
||||
expected: vec![],
|
||||
received: vec![],
|
||||
}));
|
||||
}
|
||||
}
|
||||
// all good, keep peer info
|
||||
let peer_info = PeerInfo {
|
||||
capabilities: hand.capabilities,
|
||||
user_agent: hand.user_agent,
|
||||
addr: hand.sender_addr.0,
|
||||
version: hand.version,
|
||||
total_difficulty: hand.total_difficulty,
|
||||
};
|
||||
// send our reply with our info
|
||||
let shake = Shake {
|
||||
version: PROTOCOL_VERSION,
|
||||
capabilities: capab,
|
||||
total_difficulty: total_difficulty,
|
||||
user_agent: USER_AGENT.to_string(),
|
||||
};
|
||||
Ok((conn, shake, peer_info))
|
||||
})
|
||||
.and_then(|(conn, shake, peer_info)| {
|
||||
debug!("Success handshake with {}.", peer_info.addr);
|
||||
write_msg(conn, shake, Type::Shake)
|
||||
// when more than one protocol version is supported, choosing should go here
|
||||
.map(|conn| (conn, ProtocolV1::new(), peer_info))
|
||||
}))
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Generate a new random nonce and store it in our ring buffer
|
||||
|
|
109
p2p/src/msg.rs
109
p2p/src/msg.rs
|
@ -70,7 +70,8 @@ enum_from_primitive! {
|
|||
/// the header first, handles its validation and then reads the Readable body,
|
||||
/// allocating buffers of the right size.
|
||||
pub fn read_msg<T>(conn: TcpStream) -> Box<Future<Item = (TcpStream, T), Error = Error>>
|
||||
where T: Readable + 'static
|
||||
where
|
||||
T: Readable + 'static,
|
||||
{
|
||||
let read_header = read_exact(conn, vec![0u8; HEADER_LEN as usize])
|
||||
.from_err()
|
||||
|
@ -84,7 +85,8 @@ pub fn read_msg<T>(conn: TcpStream) -> Box<Future<Item = (TcpStream, T), Error =
|
|||
Ok((reader, header))
|
||||
});
|
||||
|
||||
let read_msg = read_header.and_then(|(reader, header)| {
|
||||
let read_msg = read_header
|
||||
.and_then(|(reader, header)| {
|
||||
read_exact(reader, vec![0u8; header.msg_len as usize]).from_err()
|
||||
})
|
||||
.and_then(|(reader, buf)| {
|
||||
|
@ -97,11 +99,13 @@ pub fn read_msg<T>(conn: TcpStream) -> Box<Future<Item = (TcpStream, T), Error =
|
|||
/// Future combinator to write a full message from a Writeable payload.
|
||||
/// Serializes the payload first and then sends the message header and that
|
||||
/// payload.
|
||||
pub fn write_msg<T>(conn: TcpStream,
|
||||
msg: T,
|
||||
msg_type: Type)
|
||||
-> Box<Future<Item = TcpStream, Error = Error>>
|
||||
where T: Writeable + 'static
|
||||
pub fn write_msg<T>(
|
||||
conn: TcpStream,
|
||||
msg: T,
|
||||
msg_type: Type,
|
||||
) -> Box<Future<Item = TcpStream, Error = Error>>
|
||||
where
|
||||
T: Writeable + 'static,
|
||||
{
|
||||
let write_msg = ok((conn)).and_then(move |conn| {
|
||||
// prepare the body first so we know its serialized length
|
||||
|
@ -149,11 +153,13 @@ impl MsgHeader {
|
|||
|
||||
impl Writeable for MsgHeader {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u8, self.magic[0]],
|
||||
[write_u8, self.magic[1]],
|
||||
[write_u8, self.msg_type as u8],
|
||||
[write_u64, self.msg_len]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u8, self.magic[0]],
|
||||
[write_u8, self.magic[1]],
|
||||
[write_u8, self.msg_type as u8],
|
||||
[write_u64, self.msg_len]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -199,10 +205,12 @@ pub struct Hand {
|
|||
|
||||
impl Writeable for Hand {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u32, self.version],
|
||||
[write_u32, self.capabilities.bits()],
|
||||
[write_u64, self.nonce]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u32, self.version],
|
||||
[write_u32, self.capabilities.bits()],
|
||||
[write_u64, self.nonce]
|
||||
);
|
||||
self.total_difficulty.write(writer).unwrap();
|
||||
self.sender_addr.write(writer).unwrap();
|
||||
self.receiver_addr.write(writer).unwrap();
|
||||
|
@ -218,7 +226,9 @@ impl Readable for Hand {
|
|||
let receiver_addr = try!(SockAddr::read(reader));
|
||||
let ua = try!(reader.read_vec());
|
||||
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
));
|
||||
Ok(Hand {
|
||||
version: version,
|
||||
capabilities: capabilities,
|
||||
|
@ -248,9 +258,11 @@ pub struct Shake {
|
|||
|
||||
impl Writeable for Shake {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u32, self.version],
|
||||
[write_u32, self.capabilities.bits()]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u32, self.version],
|
||||
[write_u32, self.capabilities.bits()]
|
||||
);
|
||||
self.total_difficulty.write(writer).unwrap();
|
||||
writer.write_bytes(&self.user_agent).unwrap();
|
||||
Ok(())
|
||||
|
@ -263,7 +275,9 @@ impl Readable for Shake {
|
|||
let total_diff = try!(Difficulty::read(reader));
|
||||
let ua = try!(reader.read_vec());
|
||||
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
));
|
||||
Ok(Shake {
|
||||
version: version,
|
||||
capabilities: capabilities,
|
||||
|
@ -288,7 +302,9 @@ impl Writeable for GetPeerAddrs {
|
|||
impl Readable for GetPeerAddrs {
|
||||
fn read(reader: &mut Reader) -> Result<GetPeerAddrs, ser::Error> {
|
||||
let capab = try!(reader.read_u32());
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
));
|
||||
Ok(GetPeerAddrs { capabilities: capabilities })
|
||||
}
|
||||
}
|
||||
|
@ -345,7 +361,9 @@ impl Writeable for PeerError {
|
|||
impl Readable for PeerError {
|
||||
fn read(reader: &mut Reader) -> Result<PeerError, ser::Error> {
|
||||
let (code, msg) = ser_multiread!(reader, read_u32, read_vec);
|
||||
let message = try!(String::from_utf8(msg).map_err(|_| ser::Error::CorruptedData));
|
||||
let message = try!(String::from_utf8(msg).map_err(
|
||||
|_| ser::Error::CorruptedData,
|
||||
));
|
||||
Ok(PeerError {
|
||||
code: code,
|
||||
message: message,
|
||||
|
@ -362,10 +380,12 @@ impl Writeable for SockAddr {
|
|||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
match self.0 {
|
||||
SocketAddr::V4(sav4) => {
|
||||
ser_multiwrite!(writer,
|
||||
[write_u8, 0],
|
||||
[write_fixed_bytes, &sav4.ip().octets().to_vec()],
|
||||
[write_u16, sav4.port()]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u8, 0],
|
||||
[write_fixed_bytes, &sav4.ip().octets().to_vec()],
|
||||
[write_u16, sav4.port()]
|
||||
);
|
||||
}
|
||||
SocketAddr::V6(sav6) => {
|
||||
try!(writer.write_u8(1));
|
||||
|
@ -385,25 +405,28 @@ impl Readable for SockAddr {
|
|||
if v4_or_v6 == 0 {
|
||||
let ip = try!(reader.read_fixed_bytes(4));
|
||||
let port = try!(reader.read_u16());
|
||||
Ok(SockAddr(SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(ip[0],
|
||||
ip[1],
|
||||
ip[2],
|
||||
ip[3]),
|
||||
port))))
|
||||
Ok(SockAddr(SocketAddr::V4(SocketAddrV4::new(
|
||||
Ipv4Addr::new(ip[0], ip[1], ip[2], ip[3]),
|
||||
port,
|
||||
))))
|
||||
} else {
|
||||
let ip = try_map_vec!([0..8], |_| reader.read_u16());
|
||||
let port = try!(reader.read_u16());
|
||||
Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new(Ipv6Addr::new(ip[0],
|
||||
ip[1],
|
||||
ip[2],
|
||||
ip[3],
|
||||
ip[4],
|
||||
ip[5],
|
||||
ip[6],
|
||||
ip[7]),
|
||||
port,
|
||||
0,
|
||||
0))))
|
||||
Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new(
|
||||
Ipv6Addr::new(
|
||||
ip[0],
|
||||
ip[1],
|
||||
ip[2],
|
||||
ip[3],
|
||||
ip[4],
|
||||
ip[5],
|
||||
ip[6],
|
||||
ip[7],
|
||||
),
|
||||
port,
|
||||
0,
|
||||
0,
|
||||
))))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -42,48 +42,58 @@ unsafe impl Send for Peer {}
|
|||
|
||||
impl Peer {
|
||||
/// Initiates the handshake with another peer.
|
||||
pub fn connect(conn: TcpStream,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
self_addr: SocketAddr,
|
||||
hs: &Handshake)
|
||||
-> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
|
||||
pub fn connect(
|
||||
conn: TcpStream,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
self_addr: SocketAddr,
|
||||
hs: &Handshake,
|
||||
) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
|
||||
let connect_peer = hs.connect(capab, total_difficulty, self_addr, conn)
|
||||
.and_then(|(conn, proto, info)| {
|
||||
Ok((conn,
|
||||
Peer {
|
||||
info: info,
|
||||
proto: Box::new(proto),
|
||||
state: Arc::new(RwLock::new(State::Connected)),
|
||||
}))
|
||||
Ok((
|
||||
conn,
|
||||
Peer {
|
||||
info: info,
|
||||
proto: Box::new(proto),
|
||||
state: Arc::new(RwLock::new(State::Connected)),
|
||||
},
|
||||
))
|
||||
});
|
||||
Box::new(connect_peer)
|
||||
}
|
||||
|
||||
/// Accept a handshake initiated by another peer.
|
||||
pub fn accept(conn: TcpStream,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
hs: &Handshake)
|
||||
-> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
|
||||
let hs_peer = hs.handshake(capab, total_difficulty, conn)
|
||||
.and_then(|(conn, proto, info)| {
|
||||
Ok((conn,
|
||||
Peer {
|
||||
info: info,
|
||||
proto: Box::new(proto),
|
||||
state: Arc::new(RwLock::new(State::Connected)),
|
||||
}))
|
||||
});
|
||||
pub fn accept(
|
||||
conn: TcpStream,
|
||||
capab: Capabilities,
|
||||
total_difficulty: Difficulty,
|
||||
hs: &Handshake,
|
||||
) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
|
||||
let hs_peer = hs.handshake(capab, total_difficulty, conn).and_then(
|
||||
|(conn,
|
||||
proto,
|
||||
info)| {
|
||||
Ok((
|
||||
conn,
|
||||
Peer {
|
||||
info: info,
|
||||
proto: Box::new(proto),
|
||||
state: Arc::new(RwLock::new(State::Connected)),
|
||||
},
|
||||
))
|
||||
},
|
||||
);
|
||||
Box::new(hs_peer)
|
||||
}
|
||||
|
||||
/// Main peer loop listening for messages and forwarding to the rest of the
|
||||
/// system.
|
||||
pub fn run(&self,
|
||||
conn: TcpStream,
|
||||
na: Arc<NetAdapter>)
|
||||
-> Box<Future<Item = (), Error = Error>> {
|
||||
pub fn run(
|
||||
&self,
|
||||
conn: TcpStream,
|
||||
na: Arc<NetAdapter>,
|
||||
) -> Box<Future<Item = (), Error = Error>> {
|
||||
|
||||
let addr = self.info.addr;
|
||||
let state = self.state.clone();
|
||||
|
|
|
@ -44,10 +44,11 @@ impl ProtocolV1 {
|
|||
|
||||
impl Protocol for ProtocolV1 {
|
||||
/// Sets up the protocol reading, writing and closing logic.
|
||||
fn handle(&self,
|
||||
conn: TcpStream,
|
||||
adapter: Arc<NetAdapter>)
|
||||
-> Box<Future<Item = (), Error = Error>> {
|
||||
fn handle(
|
||||
&self,
|
||||
conn: TcpStream,
|
||||
adapter: Arc<NetAdapter>,
|
||||
) -> Box<Future<Item = (), Error = Error>> {
|
||||
|
||||
let (conn, listener) = TimeoutConnection::listen(conn, move |sender, header, data| {
|
||||
let adapt = adapter.as_ref();
|
||||
|
@ -81,10 +82,12 @@ impl Protocol for ProtocolV1 {
|
|||
}
|
||||
|
||||
fn send_header_request(&self, locator: Vec<Hash>) -> Result<(), Error> {
|
||||
self.send_request(Type::GetHeaders,
|
||||
Type::Headers,
|
||||
&Locator { hashes: locator },
|
||||
None)
|
||||
self.send_request(
|
||||
Type::GetHeaders,
|
||||
Type::Headers,
|
||||
&Locator { hashes: locator },
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
fn send_block_request(&self, h: Hash) -> Result<(), Error> {
|
||||
|
@ -92,10 +95,12 @@ impl Protocol for ProtocolV1 {
|
|||
}
|
||||
|
||||
fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
|
||||
self.send_request(Type::GetPeerAddrs,
|
||||
Type::PeerAddrs,
|
||||
&GetPeerAddrs { capabilities: capab },
|
||||
None)
|
||||
self.send_request(
|
||||
Type::GetPeerAddrs,
|
||||
Type::PeerAddrs,
|
||||
&GetPeerAddrs { capabilities: capab },
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
||||
/// Close the connection to the remote peer
|
||||
|
@ -109,21 +114,23 @@ impl ProtocolV1 {
|
|||
self.conn.borrow().send_msg(t, body)
|
||||
}
|
||||
|
||||
fn send_request<W: ser::Writeable>(&self,
|
||||
t: Type,
|
||||
rt: Type,
|
||||
body: &W,
|
||||
expect_resp: Option<Hash>)
|
||||
-> Result<(), Error> {
|
||||
fn send_request<W: ser::Writeable>(
|
||||
&self,
|
||||
t: Type,
|
||||
rt: Type,
|
||||
body: &W,
|
||||
expect_resp: Option<Hash>,
|
||||
) -> Result<(), Error> {
|
||||
self.conn.borrow().send_request(t, rt, body, expect_resp)
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_payload(adapter: &NetAdapter,
|
||||
sender: UnboundedSender<Vec<u8>>,
|
||||
header: MsgHeader,
|
||||
buf: Vec<u8>)
|
||||
-> Result<Option<Hash>, ser::Error> {
|
||||
fn handle_payload(
|
||||
adapter: &NetAdapter,
|
||||
sender: UnboundedSender<Vec<u8>>,
|
||||
header: MsgHeader,
|
||||
buf: Vec<u8>,
|
||||
) -> Result<Option<Hash>, ser::Error> {
|
||||
match header.msg_type {
|
||||
Type::Ping => {
|
||||
let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?;
|
||||
|
@ -144,8 +151,10 @@ fn handle_payload(adapter: &NetAdapter,
|
|||
let mut body_data = vec![];
|
||||
try!(ser::serialize(&mut body_data, &b));
|
||||
let mut data = vec![];
|
||||
try!(ser::serialize(&mut data,
|
||||
&MsgHeader::new(Type::Block, body_data.len() as u64)));
|
||||
try!(ser::serialize(
|
||||
&mut data,
|
||||
&MsgHeader::new(Type::Block, body_data.len() as u64),
|
||||
));
|
||||
data.append(&mut body_data);
|
||||
sender.send(data).unwrap();
|
||||
}
|
||||
|
@ -164,10 +173,15 @@ fn handle_payload(adapter: &NetAdapter,
|
|||
|
||||
// serialize and send all the headers over
|
||||
let mut body_data = vec![];
|
||||
try!(ser::serialize(&mut body_data, &Headers { headers: headers }));
|
||||
try!(ser::serialize(
|
||||
&mut body_data,
|
||||
&Headers { headers: headers },
|
||||
));
|
||||
let mut data = vec![];
|
||||
try!(ser::serialize(&mut data,
|
||||
&MsgHeader::new(Type::Headers, body_data.len() as u64)));
|
||||
try!(ser::serialize(
|
||||
&mut data,
|
||||
&MsgHeader::new(Type::Headers, body_data.len() as u64),
|
||||
));
|
||||
data.append(&mut body_data);
|
||||
sender.send(data).unwrap();
|
||||
|
||||
|
@ -184,13 +198,17 @@ fn handle_payload(adapter: &NetAdapter,
|
|||
|
||||
// serialize and send all the headers over
|
||||
let mut body_data = vec![];
|
||||
try!(ser::serialize(&mut body_data,
|
||||
&PeerAddrs {
|
||||
peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(),
|
||||
}));
|
||||
try!(ser::serialize(
|
||||
&mut body_data,
|
||||
&PeerAddrs {
|
||||
peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(),
|
||||
},
|
||||
));
|
||||
let mut data = vec![];
|
||||
try!(ser::serialize(&mut data,
|
||||
&MsgHeader::new(Type::PeerAddrs, body_data.len() as u64)));
|
||||
try!(ser::serialize(
|
||||
&mut data,
|
||||
&MsgHeader::new(Type::PeerAddrs, body_data.len() as u64),
|
||||
));
|
||||
data.append(&mut body_data);
|
||||
sender.send(data).unwrap();
|
||||
|
||||
|
|
|
@ -77,11 +77,18 @@ impl<R: AsyncRead> io::Read for ThrottledReader<R> {
|
|||
|
||||
// Check if Allowed
|
||||
if self.allowed < 1 {
|
||||
return Err(io::Error::new(io::ErrorKind::WouldBlock, "Reached Allowed Read Limit"))
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::WouldBlock,
|
||||
"Reached Allowed Read Limit",
|
||||
));
|
||||
}
|
||||
|
||||
// Read Max Allowed
|
||||
let buf = if buf.len() > self.allowed { &mut buf[0..self.allowed]} else { buf };
|
||||
let buf = if buf.len() > self.allowed {
|
||||
&mut buf[0..self.allowed]
|
||||
} else {
|
||||
buf
|
||||
};
|
||||
let res = self.reader.read(buf);
|
||||
|
||||
// Decrement Allowed amount written
|
||||
|
@ -92,7 +99,7 @@ impl<R: AsyncRead> io::Read for ThrottledReader<R> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<R: AsyncRead> AsyncRead for ThrottledReader<R> { }
|
||||
impl<R: AsyncRead> AsyncRead for ThrottledReader<R> {}
|
||||
|
||||
/// A Rate Limited Writer
|
||||
#[derive(Debug)]
|
||||
|
@ -151,11 +158,18 @@ impl<W: AsyncWrite> io::Write for ThrottledWriter<W> {
|
|||
|
||||
// Check if Allowed
|
||||
if self.allowed < 1 {
|
||||
return Err(io::Error::new(io::ErrorKind::WouldBlock, "Reached Allowed Write Limit"))
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::WouldBlock,
|
||||
"Reached Allowed Write Limit",
|
||||
));
|
||||
}
|
||||
|
||||
// Write max allowed
|
||||
let buf = if buf.len() > self.allowed { &buf[0..self.allowed]} else { buf };
|
||||
let buf = if buf.len() > self.allowed {
|
||||
&buf[0..self.allowed]
|
||||
} else {
|
||||
buf
|
||||
};
|
||||
let res = self.writer.write(buf);
|
||||
|
||||
// Decrement Allowed amount written
|
||||
|
|
|
@ -132,17 +132,22 @@ impl Server {
|
|||
let mut stop_mut = self.stop.borrow_mut();
|
||||
*stop_mut = Some(stop);
|
||||
}
|
||||
Box::new(server.select(stop_rx.map_err(|_| Error::ConnectionClose)).then(|res| match res {
|
||||
Ok((_, _)) => Ok(()),
|
||||
Err((e, _)) => Err(e),
|
||||
}))
|
||||
Box::new(
|
||||
server
|
||||
.select(stop_rx.map_err(|_| Error::ConnectionClose))
|
||||
.then(|res| match res {
|
||||
Ok((_, _)) => Ok(()),
|
||||
Err((e, _)) => Err(e),
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
/// Asks the server to connect to a new peer.
|
||||
pub fn connect_peer(&self,
|
||||
addr: SocketAddr,
|
||||
h: reactor::Handle)
|
||||
-> Box<Future<Item = Option<Arc<Peer>>, Error = Error>> {
|
||||
pub fn connect_peer(
|
||||
&self,
|
||||
addr: SocketAddr,
|
||||
h: reactor::Handle,
|
||||
) -> Box<Future<Item = Option<Arc<Peer>>, Error = Error>> {
|
||||
if let Some(p) = self.get_peer(addr) {
|
||||
// if we're already connected to the addr, just return the peer
|
||||
return Box::new(future::ok(Some(p)));
|
||||
|
@ -163,7 +168,8 @@ impl Server {
|
|||
|
||||
let socket = TcpStream::connect(&addr, &h).map_err(|e| Error::Connection(e));
|
||||
let h2 = h.clone();
|
||||
let request = socket.and_then(move |socket| {
|
||||
let request = socket
|
||||
.and_then(move |socket| {
|
||||
let peers = peers.clone();
|
||||
let total_diff = adapter1.clone().total_difficulty();
|
||||
|
||||
|
@ -280,11 +286,13 @@ impl Server {
|
|||
}
|
||||
|
||||
// Adds the peer built by the provided future in the peers map
|
||||
fn add_to_peers<A>(peers: Arc<RwLock<Vec<Arc<Peer>>>>,
|
||||
adapter: Arc<NetAdapter>,
|
||||
peer_fut: A)
|
||||
-> Box<Future<Item = Result<(TcpStream, Arc<Peer>), ()>, Error = Error>>
|
||||
where A: IntoFuture<Item = (TcpStream, Peer), Error = Error> + 'static
|
||||
fn add_to_peers<A>(
|
||||
peers: Arc<RwLock<Vec<Arc<Peer>>>>,
|
||||
adapter: Arc<NetAdapter>,
|
||||
peer_fut: A,
|
||||
) -> Box<Future<Item = Result<(TcpStream, Arc<Peer>), ()>, Error = Error>>
|
||||
where
|
||||
A: IntoFuture<Item = (TcpStream, Peer), Error = Error> + 'static,
|
||||
{
|
||||
let peer_add = peer_fut.into_future().map(move |(conn, peer)| {
|
||||
adapter.peer_connected(&peer.info);
|
||||
|
@ -297,15 +305,17 @@ fn add_to_peers<A>(peers: Arc<RwLock<Vec<Arc<Peer>>>>,
|
|||
}
|
||||
|
||||
// Adds a timeout to a future
|
||||
fn with_timeout<T: 'static>(fut: Box<Future<Item = Result<T, ()>, Error = Error>>,
|
||||
h: &reactor::Handle)
|
||||
-> Box<Future<Item = T, Error = Error>> {
|
||||
fn with_timeout<T: 'static>(
|
||||
fut: Box<Future<Item = Result<T, ()>, Error = Error>>,
|
||||
h: &reactor::Handle,
|
||||
) -> Box<Future<Item = T, Error = Error>> {
|
||||
let timeout = reactor::Timeout::new(Duration::new(5, 0), h).unwrap();
|
||||
let timed = fut.select(timeout.map(Err).from_err())
|
||||
.then(|res| match res {
|
||||
let timed = fut.select(timeout.map(Err).from_err()).then(
|
||||
|res| match res {
|
||||
Ok((Ok(inner), _timeout)) => Ok(inner),
|
||||
Ok((_, _accept)) => Err(Error::Timeout),
|
||||
Err((e, _other)) => Err(e),
|
||||
});
|
||||
},
|
||||
);
|
||||
Box::new(timed)
|
||||
}
|
||||
|
|
|
@ -53,10 +53,12 @@ pub struct PeerData {
|
|||
impl Writeable for PeerData {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
SockAddr(self.addr).write(writer)?;
|
||||
ser_multiwrite!(writer,
|
||||
[write_u32, self.capabilities.bits()],
|
||||
[write_bytes, &self.user_agent],
|
||||
[write_u8, self.flags as u8]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u32, self.capabilities.bits()],
|
||||
[write_bytes, &self.user_agent],
|
||||
[write_u8, self.flags as u8]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -66,7 +68,9 @@ impl Readable for PeerData {
|
|||
let addr = SockAddr::read(reader)?;
|
||||
let (capab, ua, fl) = ser_multiread!(reader, read_u32, read_vec, read_u8);
|
||||
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
|
||||
let capabilities = Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)?;
|
||||
let capabilities = Capabilities::from_bits(capab).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
match State::from_u8(fl) {
|
||||
Some(flags) => {
|
||||
Ok(PeerData {
|
||||
|
@ -94,8 +98,10 @@ impl PeerStore {
|
|||
}
|
||||
|
||||
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
|
||||
self.db.put_ser(&to_key(PEER_PREFIX, &mut format!("{}", p.addr).into_bytes())[..],
|
||||
p)
|
||||
self.db.put_ser(
|
||||
&to_key(PEER_PREFIX, &mut format!("{}", p.addr).into_bytes())[..],
|
||||
p,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_peer(&self, peer_addr: SocketAddr) -> Result<PeerData, Error> {
|
||||
|
@ -103,16 +109,22 @@ impl PeerStore {
|
|||
}
|
||||
|
||||
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
|
||||
self.db.exists(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..])
|
||||
self.db.exists(
|
||||
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..],
|
||||
)
|
||||
}
|
||||
|
||||
pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> {
|
||||
self.db.delete(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..])
|
||||
self.db.delete(
|
||||
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..],
|
||||
)
|
||||
}
|
||||
|
||||
pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> {
|
||||
let peers_iter = self.db
|
||||
.iter::<PeerData>(&to_key(PEER_PREFIX, &mut "".to_string().into_bytes()));
|
||||
let peers_iter = self.db.iter::<PeerData>(&to_key(
|
||||
PEER_PREFIX,
|
||||
&mut "".to_string().into_bytes(),
|
||||
));
|
||||
let mut peers = Vec::with_capacity(count);
|
||||
for p in peers_iter {
|
||||
if p.flags == state && p.capabilities.contains(cap) {
|
||||
|
|
|
@ -117,10 +117,8 @@ pub trait Protocol {
|
|||
/// be known already, usually passed during construction. Will typically
|
||||
/// block so needs to be called withing a coroutine. Should also be called
|
||||
/// only once.
|
||||
fn handle(&self,
|
||||
conn: TcpStream,
|
||||
na: Arc<NetAdapter>)
|
||||
-> Box<Future<Item = (), Error = Error>>;
|
||||
fn handle(&self, conn: TcpStream, na: Arc<NetAdapter>)
|
||||
-> Box<Future<Item = (), Error = Error>>;
|
||||
|
||||
/// Sends a ping message to the remote peer.
|
||||
fn send_ping(&self) -> Result<(), Error>;
|
||||
|
|
|
@ -47,40 +47,47 @@ fn peer_handshake() {
|
|||
let rhandle = handle.clone();
|
||||
let timeout = reactor::Timeout::new(time::Duration::new(1, 0), &handle).unwrap();
|
||||
let timeout_send = reactor::Timeout::new(time::Duration::new(2, 0), &handle).unwrap();
|
||||
handle.spawn(timeout.from_err()
|
||||
.and_then(move |_| {
|
||||
let p2p_conf = p2p::P2PConfig::default();
|
||||
let addr = SocketAddr::new(p2p_conf.host, p2p_conf.port);
|
||||
let socket = TcpStream::connect(&addr, &phandle).map_err(|e| p2p::Error::Connection(e));
|
||||
socket.and_then(move |socket| {
|
||||
Peer::connect(socket,
|
||||
p2p::UNKNOWN,
|
||||
Difficulty::one(),
|
||||
my_addr,
|
||||
&p2p::handshake::Handshake::new())
|
||||
})
|
||||
.and_then(move |(socket, peer)| {
|
||||
rhandle.spawn(peer.run(socket, net_adapter.clone()).map_err(|e| {
|
||||
panic!("Client run failed: {:?}", e);
|
||||
}));
|
||||
peer.send_ping().unwrap();
|
||||
timeout_send.from_err().map(|_| peer)
|
||||
})
|
||||
.and_then(|peer| {
|
||||
let (sent, recv) = peer.transmitted_bytes();
|
||||
assert!(sent > 0);
|
||||
assert!(recv > 0);
|
||||
Ok(())
|
||||
})
|
||||
.and_then(|_| {
|
||||
assert!(server.peer_count() > 0);
|
||||
server.stop();
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.map_err(|e| {
|
||||
panic!("Client connection failed: {:?}", e);
|
||||
}));
|
||||
handle.spawn(
|
||||
timeout
|
||||
.from_err()
|
||||
.and_then(move |_| {
|
||||
let p2p_conf = p2p::P2PConfig::default();
|
||||
let addr = SocketAddr::new(p2p_conf.host, p2p_conf.port);
|
||||
let socket =
|
||||
TcpStream::connect(&addr, &phandle).map_err(|e| p2p::Error::Connection(e));
|
||||
socket
|
||||
.and_then(move |socket| {
|
||||
Peer::connect(
|
||||
socket,
|
||||
p2p::UNKNOWN,
|
||||
Difficulty::one(),
|
||||
my_addr,
|
||||
&p2p::handshake::Handshake::new(),
|
||||
)
|
||||
})
|
||||
.and_then(move |(socket, peer)| {
|
||||
rhandle.spawn(peer.run(socket, net_adapter.clone()).map_err(|e| {
|
||||
panic!("Client run failed: {:?}", e);
|
||||
}));
|
||||
peer.send_ping().unwrap();
|
||||
timeout_send.from_err().map(|_| peer)
|
||||
})
|
||||
.and_then(|peer| {
|
||||
let (sent, recv) = peer.transmitted_bytes();
|
||||
assert!(sent > 0);
|
||||
assert!(recv > 0);
|
||||
Ok(())
|
||||
})
|
||||
.and_then(|_| {
|
||||
assert!(server.peer_count() > 0);
|
||||
server.stop();
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.map_err(|e| {
|
||||
panic!("Client connection failed: {:?}", e);
|
||||
}),
|
||||
);
|
||||
|
||||
evtlp.run(run_server).unwrap();
|
||||
|
||||
|
|
|
@ -22,142 +22,162 @@ use types::{BlockChain, PoolError};
|
|||
|
||||
#[derive(Debug)]
|
||||
pub struct DummyBlockHeaderIndex {
|
||||
block_headers: HashMap<Commitment, block::BlockHeader>
|
||||
block_headers: HashMap<Commitment, block::BlockHeader>,
|
||||
}
|
||||
|
||||
impl DummyBlockHeaderIndex {
|
||||
pub fn insert(&mut self, commit: Commitment, block_header: block::BlockHeader) {
|
||||
self.block_headers.insert(commit, block_header);
|
||||
}
|
||||
pub fn insert(&mut self, commit: Commitment, block_header: block::BlockHeader) {
|
||||
self.block_headers.insert(commit, block_header);
|
||||
}
|
||||
|
||||
pub fn get_block_header_by_output_commit(&self, commit: Commitment) -> Result<&block::BlockHeader, PoolError> {
|
||||
match self.block_headers.get(&commit) {
|
||||
Some(h) => Ok(h),
|
||||
None => Err(PoolError::GenericPoolError)
|
||||
}
|
||||
}
|
||||
pub fn get_block_header_by_output_commit(
|
||||
&self,
|
||||
commit: Commitment,
|
||||
) -> Result<&block::BlockHeader, PoolError> {
|
||||
match self.block_headers.get(&commit) {
|
||||
Some(h) => Ok(h),
|
||||
None => Err(PoolError::GenericPoolError),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A DummyUtxoSet for mocking up the chain
|
||||
pub struct DummyUtxoSet {
|
||||
outputs : HashMap<Commitment, transaction::Output>
|
||||
outputs: HashMap<Commitment, transaction::Output>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl DummyUtxoSet {
|
||||
pub fn empty() -> DummyUtxoSet{
|
||||
DummyUtxoSet{outputs: HashMap::new()}
|
||||
}
|
||||
pub fn root(&self) -> hash::Hash {
|
||||
hash::ZERO_HASH
|
||||
}
|
||||
pub fn apply(&self, b: &block::Block) -> DummyUtxoSet {
|
||||
let mut new_hashmap = self.outputs.clone();
|
||||
for input in &b.inputs {
|
||||
new_hashmap.remove(&input.commitment());
|
||||
}
|
||||
for output in &b.outputs {
|
||||
new_hashmap.insert(output.commitment(), output.clone());
|
||||
}
|
||||
DummyUtxoSet{outputs: new_hashmap}
|
||||
}
|
||||
pub fn with_block(&mut self, b: &block::Block) {
|
||||
for input in &b.inputs {
|
||||
self.outputs.remove(&input.commitment());
|
||||
}
|
||||
for output in &b.outputs {
|
||||
self.outputs.insert(output.commitment(), output.clone());
|
||||
}
|
||||
}
|
||||
pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet {
|
||||
DummyUtxoSet{outputs: HashMap::new()}
|
||||
}
|
||||
pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> {
|
||||
self.outputs.get(output_ref)
|
||||
}
|
||||
pub fn empty() -> DummyUtxoSet {
|
||||
DummyUtxoSet { outputs: HashMap::new() }
|
||||
}
|
||||
pub fn root(&self) -> hash::Hash {
|
||||
hash::ZERO_HASH
|
||||
}
|
||||
pub fn apply(&self, b: &block::Block) -> DummyUtxoSet {
|
||||
let mut new_hashmap = self.outputs.clone();
|
||||
for input in &b.inputs {
|
||||
new_hashmap.remove(&input.commitment());
|
||||
}
|
||||
for output in &b.outputs {
|
||||
new_hashmap.insert(output.commitment(), output.clone());
|
||||
}
|
||||
DummyUtxoSet { outputs: new_hashmap }
|
||||
}
|
||||
pub fn with_block(&mut self, b: &block::Block) {
|
||||
for input in &b.inputs {
|
||||
self.outputs.remove(&input.commitment());
|
||||
}
|
||||
for output in &b.outputs {
|
||||
self.outputs.insert(output.commitment(), output.clone());
|
||||
}
|
||||
}
|
||||
pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet {
|
||||
DummyUtxoSet { outputs: HashMap::new() }
|
||||
}
|
||||
pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> {
|
||||
self.outputs.get(output_ref)
|
||||
}
|
||||
|
||||
fn clone(&self) -> DummyUtxoSet {
|
||||
DummyUtxoSet{outputs: self.outputs.clone()}
|
||||
}
|
||||
fn clone(&self) -> DummyUtxoSet {
|
||||
DummyUtxoSet { outputs: self.outputs.clone() }
|
||||
}
|
||||
|
||||
// only for testing: add an output to the map
|
||||
pub fn add_output(&mut self, output: transaction::Output) {
|
||||
self.outputs.insert(output.commitment(), output);
|
||||
}
|
||||
// like above, but doesn't modify in-place so no mut ref needed
|
||||
pub fn with_output(&self, output: transaction::Output) -> DummyUtxoSet {
|
||||
let mut new_map = self.outputs.clone();
|
||||
new_map.insert(output.commitment(), output);
|
||||
DummyUtxoSet{outputs: new_map}
|
||||
}
|
||||
// only for testing: add an output to the map
|
||||
pub fn add_output(&mut self, output: transaction::Output) {
|
||||
self.outputs.insert(output.commitment(), output);
|
||||
}
|
||||
// like above, but doesn't modify in-place so no mut ref needed
|
||||
pub fn with_output(&self, output: transaction::Output) -> DummyUtxoSet {
|
||||
let mut new_map = self.outputs.clone();
|
||||
new_map.insert(output.commitment(), output);
|
||||
DummyUtxoSet { outputs: new_map }
|
||||
}
|
||||
}
|
||||
|
||||
/// A DummyChain is the mocked chain for playing with what methods we would
|
||||
/// need
|
||||
#[allow(dead_code)]
|
||||
pub struct DummyChainImpl {
|
||||
utxo: RwLock<DummyUtxoSet>,
|
||||
block_headers: RwLock<DummyBlockHeaderIndex>,
|
||||
head_header: RwLock<Vec<block::BlockHeader>>,
|
||||
utxo: RwLock<DummyUtxoSet>,
|
||||
block_headers: RwLock<DummyBlockHeaderIndex>,
|
||||
head_header: RwLock<Vec<block::BlockHeader>>,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl DummyChainImpl {
|
||||
pub fn new() -> DummyChainImpl {
|
||||
DummyChainImpl{
|
||||
utxo: RwLock::new(DummyUtxoSet{outputs: HashMap::new()}),
|
||||
block_headers: RwLock::new(DummyBlockHeaderIndex{block_headers: HashMap::new()}),
|
||||
head_header: RwLock::new(vec![]),
|
||||
}
|
||||
}
|
||||
pub fn new() -> DummyChainImpl {
|
||||
DummyChainImpl {
|
||||
utxo: RwLock::new(DummyUtxoSet { outputs: HashMap::new() }),
|
||||
block_headers: RwLock::new(DummyBlockHeaderIndex { block_headers: HashMap::new() }),
|
||||
head_header: RwLock::new(vec![]),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BlockChain for DummyChainImpl {
|
||||
fn get_unspent(&self, commitment: &Commitment) -> Result<transaction::Output, PoolError> {
|
||||
let output = self.utxo.read().unwrap().get_output(commitment).cloned();
|
||||
match output {
|
||||
Some(o) => Ok(o),
|
||||
None => Err(PoolError::GenericPoolError),
|
||||
}
|
||||
}
|
||||
fn get_unspent(&self, commitment: &Commitment) -> Result<transaction::Output, PoolError> {
|
||||
let output = self.utxo.read().unwrap().get_output(commitment).cloned();
|
||||
match output {
|
||||
Some(o) => Ok(o),
|
||||
None => Err(PoolError::GenericPoolError),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<block::BlockHeader, PoolError> {
|
||||
match self.block_headers.read().unwrap().get_block_header_by_output_commit(*commit) {
|
||||
Ok(h) => Ok(h.clone()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
fn get_block_header_by_output_commit(
|
||||
&self,
|
||||
commit: &Commitment,
|
||||
) -> Result<block::BlockHeader, PoolError> {
|
||||
match self.block_headers
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_block_header_by_output_commit(*commit) {
|
||||
Ok(h) => Ok(h.clone()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
fn head_header(&self) -> Result<block::BlockHeader, PoolError> {
|
||||
let headers = self.head_header.read().unwrap();
|
||||
if headers.len() > 0 {
|
||||
Ok(headers[0].clone())
|
||||
} else {
|
||||
Err(PoolError::GenericPoolError)
|
||||
}
|
||||
}
|
||||
fn head_header(&self) -> Result<block::BlockHeader, PoolError> {
|
||||
let headers = self.head_header.read().unwrap();
|
||||
if headers.len() > 0 {
|
||||
Ok(headers[0].clone())
|
||||
} else {
|
||||
Err(PoolError::GenericPoolError)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DummyChain for DummyChainImpl {
|
||||
fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet) {
|
||||
self.utxo = RwLock::new(new_utxo);
|
||||
}
|
||||
fn apply_block(&self, b: &block::Block) {
|
||||
self.utxo.write().unwrap().with_block(b);
|
||||
}
|
||||
fn store_header_by_output_commitment(&self, commitment: Commitment, block_header: &block::BlockHeader) {
|
||||
self.block_headers.write().unwrap().insert(commitment, block_header.clone());
|
||||
}
|
||||
fn store_head_header(&self, block_header: &block::BlockHeader) {
|
||||
let mut h = self.head_header.write().unwrap();
|
||||
h.clear();
|
||||
h.insert(0, block_header.clone());
|
||||
}
|
||||
fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet) {
|
||||
self.utxo = RwLock::new(new_utxo);
|
||||
}
|
||||
fn apply_block(&self, b: &block::Block) {
|
||||
self.utxo.write().unwrap().with_block(b);
|
||||
}
|
||||
fn store_header_by_output_commitment(
|
||||
&self,
|
||||
commitment: Commitment,
|
||||
block_header: &block::BlockHeader,
|
||||
) {
|
||||
self.block_headers.write().unwrap().insert(
|
||||
commitment,
|
||||
block_header.clone(),
|
||||
);
|
||||
}
|
||||
fn store_head_header(&self, block_header: &block::BlockHeader) {
|
||||
let mut h = self.head_header.write().unwrap();
|
||||
h.clear();
|
||||
h.insert(0, block_header.clone());
|
||||
}
|
||||
}
|
||||
|
||||
pub trait DummyChain: BlockChain {
|
||||
fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet);
|
||||
fn apply_block(&self, b: &block::Block);
|
||||
fn store_header_by_output_commitment(&self, commitment: Commitment, block_header: &block::BlockHeader);
|
||||
fn store_head_header(&self, block_header: &block::BlockHeader);
|
||||
fn update_utxo_set(&mut self, new_utxo: DummyUtxoSet);
|
||||
fn apply_block(&self, b: &block::Block);
|
||||
fn store_header_by_output_commitment(
|
||||
&self,
|
||||
commitment: Commitment,
|
||||
block_header: &block::BlockHeader,
|
||||
);
|
||||
fn store_head_header(&self, block_header: &block::BlockHeader);
|
||||
}
|
||||
|
|
|
@ -30,184 +30,210 @@ use core::core::hash::Hashed;
|
|||
/// An entry in the transaction pool.
|
||||
/// These are the vertices of both of the graph structures
|
||||
pub struct PoolEntry {
|
||||
// Core data
|
||||
/// Unique identifier of this pool entry and the corresponding transaction
|
||||
pub transaction_hash: core::hash::Hash,
|
||||
// Core data
|
||||
/// Unique identifier of this pool entry and the corresponding transaction
|
||||
pub transaction_hash: core::hash::Hash,
|
||||
|
||||
// Metadata
|
||||
/// Size estimate
|
||||
pub size_estimate: u64,
|
||||
/// Receive timestamp
|
||||
pub receive_ts: time::Tm,
|
||||
// Metadata
|
||||
/// Size estimate
|
||||
pub size_estimate: u64,
|
||||
/// Receive timestamp
|
||||
pub receive_ts: time::Tm,
|
||||
}
|
||||
|
||||
impl PoolEntry {
|
||||
/// Create new transaction pool entry
|
||||
pub fn new(tx: &core::transaction::Transaction) -> PoolEntry {
|
||||
PoolEntry{
|
||||
transaction_hash: transaction_identifier(tx),
|
||||
size_estimate : estimate_transaction_size(tx),
|
||||
receive_ts: time::now_utc()}
|
||||
}
|
||||
/// Create new transaction pool entry
|
||||
pub fn new(tx: &core::transaction::Transaction) -> PoolEntry {
|
||||
PoolEntry {
|
||||
transaction_hash: transaction_identifier(tx),
|
||||
size_estimate: estimate_transaction_size(tx),
|
||||
receive_ts: time::now_utc(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// TODO guessing this needs implementing
|
||||
fn estimate_transaction_size(_tx: &core::transaction::Transaction) -> u64 {
|
||||
0
|
||||
0
|
||||
}
|
||||
|
||||
/// An edge connecting graph vertices.
|
||||
/// For various use cases, one of either the source or destination may be
|
||||
/// unpopulated
|
||||
pub struct Edge {
|
||||
// Source and Destination are the vertex id's, the transaction (kernel)
|
||||
// hash.
|
||||
source: Option<core::hash::Hash>,
|
||||
destination: Option<core::hash::Hash>,
|
||||
// Source and Destination are the vertex id's, the transaction (kernel)
|
||||
// hash.
|
||||
source: Option<core::hash::Hash>,
|
||||
destination: Option<core::hash::Hash>,
|
||||
|
||||
// Output is the output hash which this input/output pairing corresponds
|
||||
// to.
|
||||
output: Commitment,
|
||||
// Output is the output hash which this input/output pairing corresponds
|
||||
// to.
|
||||
output: Commitment,
|
||||
}
|
||||
|
||||
impl Edge{
|
||||
/// Create new edge
|
||||
pub fn new(source: Option<core::hash::Hash>, destination: Option<core::hash::Hash>, output: Commitment) -> Edge {
|
||||
Edge{source: source, destination: destination, output: output}
|
||||
}
|
||||
impl Edge {
|
||||
/// Create new edge
|
||||
pub fn new(
|
||||
source: Option<core::hash::Hash>,
|
||||
destination: Option<core::hash::Hash>,
|
||||
output: Commitment,
|
||||
) -> Edge {
|
||||
Edge {
|
||||
source: source,
|
||||
destination: destination,
|
||||
output: output,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new edge with a source
|
||||
pub fn with_source(&self, src: Option<core::hash::Hash>) -> Edge {
|
||||
Edge{source: src, destination: self.destination, output: self.output}
|
||||
}
|
||||
/// Create new edge with a source
|
||||
pub fn with_source(&self, src: Option<core::hash::Hash>) -> Edge {
|
||||
Edge {
|
||||
source: src,
|
||||
destination: self.destination,
|
||||
output: self.output,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new edge with destination
|
||||
pub fn with_destination(&self, dst: Option<core::hash::Hash>) -> Edge {
|
||||
Edge{source: self.source, destination: dst, output: self.output}
|
||||
}
|
||||
/// Create new edge with destination
|
||||
pub fn with_destination(&self, dst: Option<core::hash::Hash>) -> Edge {
|
||||
Edge {
|
||||
source: self.source,
|
||||
destination: dst,
|
||||
output: self.output,
|
||||
}
|
||||
}
|
||||
|
||||
/// The output commitment of the edge
|
||||
pub fn output_commitment(&self) -> Commitment {
|
||||
self.output
|
||||
}
|
||||
/// The output commitment of the edge
|
||||
pub fn output_commitment(&self) -> Commitment {
|
||||
self.output
|
||||
}
|
||||
|
||||
/// The destination hash of the edge
|
||||
pub fn destination_hash(&self) -> Option<core::hash::Hash> {
|
||||
self.destination
|
||||
}
|
||||
/// The destination hash of the edge
|
||||
pub fn destination_hash(&self) -> Option<core::hash::Hash> {
|
||||
self.destination
|
||||
}
|
||||
|
||||
/// The source hash of the edge
|
||||
pub fn source_hash(&self) -> Option<core::hash::Hash> {
|
||||
self.source
|
||||
}
|
||||
/// The source hash of the edge
|
||||
pub fn source_hash(&self) -> Option<core::hash::Hash> {
|
||||
self.source
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for Edge {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(f, "Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}",
|
||||
self.source, self.destination, self.output)
|
||||
}
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
"Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}",
|
||||
self.source,
|
||||
self.destination,
|
||||
self.output
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// The generic graph container. Both graphs, the pool and orphans, embed this
|
||||
/// structure and add additional capability on top of it.
|
||||
pub struct DirectedGraph {
|
||||
edges: HashMap<Commitment, Edge>,
|
||||
vertices: Vec<PoolEntry>,
|
||||
edges: HashMap<Commitment, Edge>,
|
||||
vertices: Vec<PoolEntry>,
|
||||
|
||||
// A small optimization: keeping roots (vertices with in-degree 0) in a
|
||||
// separate list makes topological sort a bit faster. (This is true for
|
||||
// Kahn's, not sure about other implementations)
|
||||
roots: Vec<PoolEntry>,
|
||||
// A small optimization: keeping roots (vertices with in-degree 0) in a
|
||||
// separate list makes topological sort a bit faster. (This is true for
|
||||
// Kahn's, not sure about other implementations)
|
||||
roots: Vec<PoolEntry>,
|
||||
}
|
||||
|
||||
impl DirectedGraph {
|
||||
/// Create an empty directed graph
|
||||
pub fn empty() -> DirectedGraph {
|
||||
DirectedGraph{
|
||||
edges: HashMap::new(),
|
||||
vertices: Vec::new(),
|
||||
roots: Vec::new(),
|
||||
}
|
||||
}
|
||||
/// Create an empty directed graph
|
||||
pub fn empty() -> DirectedGraph {
|
||||
DirectedGraph {
|
||||
edges: HashMap::new(),
|
||||
vertices: Vec::new(),
|
||||
roots: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get an edge by its commitment
|
||||
pub fn get_edge_by_commitment(&self, output_commitment: &Commitment) -> Option<&Edge> {
|
||||
self.edges.get(output_commitment)
|
||||
}
|
||||
/// Get an edge by its commitment
|
||||
pub fn get_edge_by_commitment(&self, output_commitment: &Commitment) -> Option<&Edge> {
|
||||
self.edges.get(output_commitment)
|
||||
}
|
||||
|
||||
/// Remove an edge by its commitment
|
||||
pub fn remove_edge_by_commitment(&mut self, output_commitment: &Commitment) -> Option<Edge> {
|
||||
self.edges.remove(output_commitment)
|
||||
}
|
||||
/// Remove an edge by its commitment
|
||||
pub fn remove_edge_by_commitment(&mut self, output_commitment: &Commitment) -> Option<Edge> {
|
||||
self.edges.remove(output_commitment)
|
||||
}
|
||||
|
||||
/// Remove a vertex by its hash
|
||||
pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> {
|
||||
match self.roots.iter().position(|x| x.transaction_hash == tx_hash) {
|
||||
Some(i) => Some(self.roots.swap_remove(i)),
|
||||
None => {
|
||||
match self.vertices.iter().position(|x| x.transaction_hash == tx_hash) {
|
||||
Some(i) => Some(self.vertices.swap_remove(i)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Remove a vertex by its hash
|
||||
pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> {
|
||||
match self.roots.iter().position(
|
||||
|x| x.transaction_hash == tx_hash,
|
||||
) {
|
||||
Some(i) => Some(self.roots.swap_remove(i)),
|
||||
None => {
|
||||
match self.vertices.iter().position(
|
||||
|x| x.transaction_hash == tx_hash,
|
||||
) {
|
||||
Some(i) => Some(self.vertices.swap_remove(i)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a vertex and a set of incoming edges to the graph.
|
||||
///
|
||||
/// The PoolEntry at vertex is added to the graph; depending on the
|
||||
/// number of incoming edges, the vertex is either added to the vertices
|
||||
/// or to the roots.
|
||||
///
|
||||
/// Outgoing edges must not be included in edges; this method is designed
|
||||
/// for adding vertices one at a time and only accepts incoming edges as
|
||||
/// internal edges.
|
||||
pub fn add_entry(&mut self, vertex: PoolEntry, mut edges: Vec<Edge>) {
|
||||
if edges.len() == 0 {
|
||||
self.roots.push(vertex);
|
||||
} else {
|
||||
self.vertices.push(vertex);
|
||||
for edge in edges.drain(..) {
|
||||
self.edges.insert(edge.output_commitment(), edge);
|
||||
}
|
||||
}
|
||||
}
|
||||
/// Adds a vertex and a set of incoming edges to the graph.
|
||||
///
|
||||
/// The PoolEntry at vertex is added to the graph; depending on the
|
||||
/// number of incoming edges, the vertex is either added to the vertices
|
||||
/// or to the roots.
|
||||
///
|
||||
/// Outgoing edges must not be included in edges; this method is designed
|
||||
/// for adding vertices one at a time and only accepts incoming edges as
|
||||
/// internal edges.
|
||||
pub fn add_entry(&mut self, vertex: PoolEntry, mut edges: Vec<Edge>) {
|
||||
if edges.len() == 0 {
|
||||
self.roots.push(vertex);
|
||||
} else {
|
||||
self.vertices.push(vertex);
|
||||
for edge in edges.drain(..) {
|
||||
self.edges.insert(edge.output_commitment(), edge);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// add_vertex_only adds a vertex, meant to be complemented by add_edge_only
|
||||
/// in cases where delivering a vector of edges is not feasible or efficient
|
||||
pub fn add_vertex_only(&mut self, vertex: PoolEntry, is_root: bool) {
|
||||
if is_root {
|
||||
self.roots.push(vertex);
|
||||
} else {
|
||||
self.vertices.push(vertex);
|
||||
}
|
||||
}
|
||||
/// add_vertex_only adds a vertex, meant to be complemented by add_edge_only
|
||||
/// in cases where delivering a vector of edges is not feasible or efficient
|
||||
pub fn add_vertex_only(&mut self, vertex: PoolEntry, is_root: bool) {
|
||||
if is_root {
|
||||
self.roots.push(vertex);
|
||||
} else {
|
||||
self.vertices.push(vertex);
|
||||
}
|
||||
}
|
||||
|
||||
/// add_edge_only adds an edge
|
||||
pub fn add_edge_only(&mut self, edge: Edge) {
|
||||
self.edges.insert(edge.output_commitment(), edge);
|
||||
}
|
||||
/// add_edge_only adds an edge
|
||||
pub fn add_edge_only(&mut self, edge: Edge) {
|
||||
self.edges.insert(edge.output_commitment(), edge);
|
||||
}
|
||||
|
||||
/// Number of vertices (root + internal)
|
||||
pub fn len_vertices(&self) -> usize {
|
||||
self.vertices.len() + self.roots.len()
|
||||
}
|
||||
/// Number of vertices (root + internal)
|
||||
pub fn len_vertices(&self) -> usize {
|
||||
self.vertices.len() + self.roots.len()
|
||||
}
|
||||
|
||||
/// Number of root vertices only
|
||||
pub fn len_roots(&self) -> usize {
|
||||
self.roots.len()
|
||||
}
|
||||
/// Number of root vertices only
|
||||
pub fn len_roots(&self) -> usize {
|
||||
self.roots.len()
|
||||
}
|
||||
|
||||
/// Number of edges
|
||||
pub fn len_edges(&self) -> usize {
|
||||
self.edges.len()
|
||||
}
|
||||
/// Number of edges
|
||||
pub fn len_edges(&self) -> usize {
|
||||
self.edges.len()
|
||||
}
|
||||
|
||||
/// Get the current list of roots
|
||||
pub fn get_roots(&self) -> Vec<core::hash::Hash> {
|
||||
self.roots.iter().map(|x| x.transaction_hash).collect()
|
||||
}
|
||||
/// Get the current list of roots
|
||||
pub fn get_roots(&self) -> Vec<core::hash::Hash> {
|
||||
self.roots.iter().map(|x| x.transaction_hash).collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Using transaction merkle_inputs_outputs to calculate a deterministic hash;
|
||||
|
@ -215,50 +241,57 @@ impl DirectedGraph {
|
|||
/// proofs and any extra data the kernel may cover, but it is used initially
|
||||
/// for testing purposes.
|
||||
pub fn transaction_identifier(tx: &core::transaction::Transaction) -> core::hash::Hash {
|
||||
// core::transaction::merkle_inputs_outputs(&tx.inputs, &tx.outputs)
|
||||
tx.hash()
|
||||
// core::transaction::merkle_inputs_outputs(&tx.inputs, &tx.outputs)
|
||||
tx.hash()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use secp::{Secp256k1, ContextFlag};
|
||||
use secp::key;
|
||||
use super::*;
|
||||
use secp::{Secp256k1, ContextFlag};
|
||||
use secp::key;
|
||||
|
||||
#[test]
|
||||
fn test_add_entry() {
|
||||
let ec = Secp256k1::with_caps(ContextFlag::Commit);
|
||||
#[test]
|
||||
fn test_add_entry() {
|
||||
let ec = Secp256k1::with_caps(ContextFlag::Commit);
|
||||
|
||||
let output_commit = ec.commit_value(70).unwrap();
|
||||
let inputs = vec![core::transaction::Input(ec.commit_value(50).unwrap()),
|
||||
core::transaction::Input(ec.commit_value(25).unwrap())];
|
||||
let outputs = vec![core::transaction::Output{
|
||||
features: core::transaction::DEFAULT_OUTPUT,
|
||||
commit: output_commit,
|
||||
proof: ec.range_proof(0, 100, key::ZERO_KEY, output_commit, ec.nonce())}];
|
||||
let test_transaction = core::transaction::Transaction::new(inputs,
|
||||
outputs, 5);
|
||||
let output_commit = ec.commit_value(70).unwrap();
|
||||
let inputs = vec![
|
||||
core::transaction::Input(ec.commit_value(50).unwrap()),
|
||||
core::transaction::Input(ec.commit_value(25).unwrap()),
|
||||
];
|
||||
let outputs = vec![
|
||||
core::transaction::Output {
|
||||
features: core::transaction::DEFAULT_OUTPUT,
|
||||
commit: output_commit,
|
||||
proof: ec.range_proof(0, 100, key::ZERO_KEY, output_commit, ec.nonce()),
|
||||
},
|
||||
];
|
||||
let test_transaction = core::transaction::Transaction::new(inputs, outputs, 5);
|
||||
|
||||
let test_pool_entry = PoolEntry::new(&test_transaction);
|
||||
let test_pool_entry = PoolEntry::new(&test_transaction);
|
||||
|
||||
let incoming_edge_1 = Edge::new(Some(random_hash()),
|
||||
Some(core::hash::ZERO_HASH), output_commit);
|
||||
let incoming_edge_1 = Edge::new(
|
||||
Some(random_hash()),
|
||||
Some(core::hash::ZERO_HASH),
|
||||
output_commit,
|
||||
);
|
||||
|
||||
|
||||
let mut test_graph = DirectedGraph::empty();
|
||||
let mut test_graph = DirectedGraph::empty();
|
||||
|
||||
test_graph.add_entry(test_pool_entry, vec![incoming_edge_1]);
|
||||
test_graph.add_entry(test_pool_entry, vec![incoming_edge_1]);
|
||||
|
||||
assert_eq!(test_graph.vertices.len(), 1);
|
||||
assert_eq!(test_graph.roots.len(), 0);
|
||||
assert_eq!(test_graph.edges.len(), 1);
|
||||
}
|
||||
assert_eq!(test_graph.vertices.len(), 1);
|
||||
assert_eq!(test_graph.roots.len(), 0);
|
||||
assert_eq!(test_graph.edges.len(), 1);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/// For testing/debugging: a random tx hash
|
||||
pub fn random_hash() -> core::hash::Hash {
|
||||
let hash_bytes: [u8;32]= rand::random();
|
||||
core::hash::Hash(hash_bytes)
|
||||
let hash_bytes: [u8; 32] = rand::random();
|
||||
core::hash::Hash(hash_bytes)
|
||||
}
|
||||
|
|
1710
pool/src/pool.rs
1710
pool/src/pool.rs
File diff suppressed because it is too large
Load diff
|
@ -37,90 +37,93 @@ use core::core::hash;
|
|||
/// Most likely this will evolve to contain some sort of network identifier,
|
||||
/// once we get a better sense of what transaction building might look like.
|
||||
pub struct TxSource {
|
||||
/// Human-readable name used for logging and errors.
|
||||
pub debug_name: String,
|
||||
/// Unique identifier used to distinguish this peer from others.
|
||||
pub identifier: String,
|
||||
/// Human-readable name used for logging and errors.
|
||||
pub debug_name: String,
|
||||
/// Unique identifier used to distinguish this peer from others.
|
||||
pub identifier: String,
|
||||
}
|
||||
|
||||
/// This enum describes the parent for a given input of a transaction.
|
||||
#[derive(Clone)]
|
||||
pub enum Parent {
|
||||
Unknown,
|
||||
BlockTransaction{output: transaction::Output},
|
||||
PoolTransaction{tx_ref: hash::Hash},
|
||||
AlreadySpent{other_tx: hash::Hash},
|
||||
Unknown,
|
||||
BlockTransaction { output: transaction::Output },
|
||||
PoolTransaction { tx_ref: hash::Hash },
|
||||
AlreadySpent { other_tx: hash::Hash },
|
||||
}
|
||||
|
||||
impl fmt::Debug for Parent {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
&Parent::Unknown => write!(f, "Parent: Unknown"),
|
||||
&Parent::BlockTransaction{output: _} => write!(f, "Parent: Block Transaction"),
|
||||
&Parent::PoolTransaction{tx_ref: x} => write!(f,
|
||||
"Parent: Pool Transaction ({:?})", x),
|
||||
&Parent::AlreadySpent{other_tx: x} => write!(f,
|
||||
"Parent: Already Spent By {:?}", x),
|
||||
}
|
||||
}
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
&Parent::Unknown => write!(f, "Parent: Unknown"),
|
||||
&Parent::BlockTransaction { output: _ } => write!(f, "Parent: Block Transaction"),
|
||||
&Parent::PoolTransaction { tx_ref: x } => {
|
||||
write!(f, "Parent: Pool Transaction ({:?})", x)
|
||||
}
|
||||
&Parent::AlreadySpent { other_tx: x } => write!(f, "Parent: Already Spent By {:?}", x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO document this enum more accurately
|
||||
/// Enum of errors
|
||||
#[derive(Debug)]
|
||||
pub enum PoolError {
|
||||
/// An invalid pool entry
|
||||
Invalid,
|
||||
/// An entry already in the pool
|
||||
AlreadyInPool,
|
||||
/// A duplicate output
|
||||
DuplicateOutput{
|
||||
/// The other transaction
|
||||
other_tx: Option<hash::Hash>,
|
||||
/// Is in chain?
|
||||
in_chain: bool,
|
||||
/// The output
|
||||
output: Commitment
|
||||
},
|
||||
/// A double spend
|
||||
DoubleSpend{
|
||||
/// The other transaction
|
||||
other_tx: hash::Hash,
|
||||
/// The spent output
|
||||
spent_output: Commitment
|
||||
},
|
||||
/// Attempt to spend a coinbase output before it matures (1000 blocks?)
|
||||
ImmatureCoinbase{
|
||||
/// The block header of the block containing the coinbase output
|
||||
header: block::BlockHeader,
|
||||
/// The unspent coinbase output
|
||||
output: Commitment,
|
||||
},
|
||||
/// An orphan successfully added to the orphans set
|
||||
OrphanTransaction,
|
||||
/// TODO - wip, just getting imports working, remove this and use more specific errors
|
||||
GenericPoolError,
|
||||
/// TODO - is this the right level of abstraction for pool errors?
|
||||
OutputNotFound,
|
||||
/// TODO - is this the right level of abstraction for pool errors?
|
||||
OutputSpent,
|
||||
/// An invalid pool entry
|
||||
Invalid,
|
||||
/// An entry already in the pool
|
||||
AlreadyInPool,
|
||||
/// A duplicate output
|
||||
DuplicateOutput {
|
||||
/// The other transaction
|
||||
other_tx: Option<hash::Hash>,
|
||||
/// Is in chain?
|
||||
in_chain: bool,
|
||||
/// The output
|
||||
output: Commitment,
|
||||
},
|
||||
/// A double spend
|
||||
DoubleSpend {
|
||||
/// The other transaction
|
||||
other_tx: hash::Hash,
|
||||
/// The spent output
|
||||
spent_output: Commitment,
|
||||
},
|
||||
/// Attempt to spend a coinbase output before it matures (1000 blocks?)
|
||||
ImmatureCoinbase {
|
||||
/// The block header of the block containing the coinbase output
|
||||
header: block::BlockHeader,
|
||||
/// The unspent coinbase output
|
||||
output: Commitment,
|
||||
},
|
||||
/// An orphan successfully added to the orphans set
|
||||
OrphanTransaction,
|
||||
/// TODO - wip, just getting imports working, remove this and use more
|
||||
/// specific errors
|
||||
GenericPoolError,
|
||||
/// TODO - is this the right level of abstraction for pool errors?
|
||||
OutputNotFound,
|
||||
/// TODO - is this the right level of abstraction for pool errors?
|
||||
OutputSpent,
|
||||
}
|
||||
|
||||
/// Interface that the pool requires from a blockchain implementation.
|
||||
pub trait BlockChain {
|
||||
/// Get an unspent output by its commitment. Will return None if the output
|
||||
/// is spent or if it doesn't exist. The blockchain is expected to produce
|
||||
/// a result with its current view of the most worked chain, ignoring
|
||||
/// orphans, etc.
|
||||
fn get_unspent(&self, output_ref: &Commitment)
|
||||
-> Result<transaction::Output, PoolError>;
|
||||
/// Get an unspent output by its commitment. Will return None if the output
|
||||
/// is spent or if it doesn't exist. The blockchain is expected to produce
|
||||
/// a result with its current view of the most worked chain, ignoring
|
||||
/// orphans, etc.
|
||||
fn get_unspent(&self, output_ref: &Commitment) -> Result<transaction::Output, PoolError>;
|
||||
|
||||
/// Get the block header by output commitment (needed for spending coinbase after n blocks)
|
||||
fn get_block_header_by_output_commit(&self, commit: &Commitment)
|
||||
-> Result<block::BlockHeader, PoolError>;
|
||||
/// Get the block header by output commitment (needed for spending coinbase
|
||||
/// after n blocks)
|
||||
fn get_block_header_by_output_commit(
|
||||
&self,
|
||||
commit: &Commitment,
|
||||
) -> Result<block::BlockHeader, PoolError>;
|
||||
|
||||
/// Get the block header at the head
|
||||
fn head_header(&self) -> Result<block::BlockHeader, PoolError>;
|
||||
/// Get the block header at the head
|
||||
fn head_header(&self) -> Result<block::BlockHeader, PoolError>;
|
||||
}
|
||||
|
||||
/// Pool contains the elements of the graph that are connected, in full, to
|
||||
|
@ -135,230 +138,270 @@ pub trait BlockChain {
|
|||
/// connections are in the pool edge set, while unspent (dangling) references
|
||||
/// exist in the available_outputs set.
|
||||
pub struct Pool {
|
||||
graph : graph::DirectedGraph,
|
||||
graph: graph::DirectedGraph,
|
||||
|
||||
// available_outputs are unspent outputs of the current pool set,
|
||||
// maintained as edges with empty destinations, keyed by the
|
||||
// output's hash.
|
||||
available_outputs: HashMap<Commitment, graph::Edge>,
|
||||
// available_outputs are unspent outputs of the current pool set,
|
||||
// maintained as edges with empty destinations, keyed by the
|
||||
// output's hash.
|
||||
available_outputs: HashMap<Commitment, graph::Edge>,
|
||||
|
||||
// Consumed blockchain utxo's are kept in a separate map.
|
||||
consumed_blockchain_outputs: HashMap<Commitment, graph::Edge>
|
||||
// Consumed blockchain utxo's are kept in a separate map.
|
||||
consumed_blockchain_outputs: HashMap<Commitment, graph::Edge>,
|
||||
}
|
||||
|
||||
impl Pool {
|
||||
pub fn empty() -> Pool {
|
||||
Pool{
|
||||
graph: graph::DirectedGraph::empty(),
|
||||
available_outputs: HashMap::new(),
|
||||
consumed_blockchain_outputs: HashMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn empty() -> Pool {
|
||||
Pool {
|
||||
graph: graph::DirectedGraph::empty(),
|
||||
available_outputs: HashMap::new(),
|
||||
consumed_blockchain_outputs: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Given an output, check if a spending reference (input -> output)
|
||||
/// already exists in the pool.
|
||||
/// Returns the transaction (kernel) hash corresponding to the conflicting
|
||||
/// transaction
|
||||
pub fn check_double_spend(&self, o: &transaction::Output) -> Option<hash::Hash> {
|
||||
self.graph.get_edge_by_commitment(&o.commitment()).or(self.consumed_blockchain_outputs.get(&o.commitment())).map(|x| x.destination_hash().unwrap())
|
||||
}
|
||||
/// Given an output, check if a spending reference (input -> output)
|
||||
/// already exists in the pool.
|
||||
/// Returns the transaction (kernel) hash corresponding to the conflicting
|
||||
/// transaction
|
||||
pub fn check_double_spend(&self, o: &transaction::Output) -> Option<hash::Hash> {
|
||||
self.graph
|
||||
.get_edge_by_commitment(&o.commitment())
|
||||
.or(self.consumed_blockchain_outputs.get(&o.commitment()))
|
||||
.map(|x| x.destination_hash().unwrap())
|
||||
}
|
||||
|
||||
|
||||
pub fn get_blockchain_spent(&self, c: &Commitment) -> Option<&graph::Edge> {
|
||||
self.consumed_blockchain_outputs.get(c)
|
||||
}
|
||||
pub fn get_blockchain_spent(&self, c: &Commitment) -> Option<&graph::Edge> {
|
||||
self.consumed_blockchain_outputs.get(c)
|
||||
}
|
||||
|
||||
pub fn add_pool_transaction(&mut self, pool_entry: graph::PoolEntry,
|
||||
mut blockchain_refs: Vec<graph::Edge>, pool_refs: Vec<graph::Edge>,
|
||||
mut new_unspents: Vec<graph::Edge>) {
|
||||
pub fn add_pool_transaction(
|
||||
&mut self,
|
||||
pool_entry: graph::PoolEntry,
|
||||
mut blockchain_refs: Vec<graph::Edge>,
|
||||
pool_refs: Vec<graph::Edge>,
|
||||
mut new_unspents: Vec<graph::Edge>,
|
||||
) {
|
||||
|
||||
// Removing consumed available_outputs
|
||||
for new_edge in &pool_refs {
|
||||
// All of these should correspond to an existing unspent
|
||||
assert!(self.available_outputs.remove(&new_edge.output_commitment()).is_some());
|
||||
}
|
||||
// Removing consumed available_outputs
|
||||
for new_edge in &pool_refs {
|
||||
// All of these should correspond to an existing unspent
|
||||
assert!(
|
||||
self.available_outputs
|
||||
.remove(&new_edge.output_commitment())
|
||||
.is_some()
|
||||
);
|
||||
}
|
||||
|
||||
// Accounting for consumed blockchain outputs
|
||||
for new_blockchain_edge in blockchain_refs.drain(..) {
|
||||
self.consumed_blockchain_outputs.insert(
|
||||
new_blockchain_edge.output_commitment(),
|
||||
new_blockchain_edge);
|
||||
}
|
||||
// Accounting for consumed blockchain outputs
|
||||
for new_blockchain_edge in blockchain_refs.drain(..) {
|
||||
self.consumed_blockchain_outputs.insert(
|
||||
new_blockchain_edge
|
||||
.output_commitment(),
|
||||
new_blockchain_edge,
|
||||
);
|
||||
}
|
||||
|
||||
// Adding the transaction to the vertices list along with internal
|
||||
// pool edges
|
||||
self.graph.add_entry(pool_entry, pool_refs);
|
||||
// Adding the transaction to the vertices list along with internal
|
||||
// pool edges
|
||||
self.graph.add_entry(pool_entry, pool_refs);
|
||||
|
||||
// Adding the new unspents to the unspent map
|
||||
for unspent_output in new_unspents.drain(..) {
|
||||
self.available_outputs.insert(
|
||||
unspent_output.output_commitment(), unspent_output);
|
||||
}
|
||||
}
|
||||
// Adding the new unspents to the unspent map
|
||||
for unspent_output in new_unspents.drain(..) {
|
||||
self.available_outputs.insert(
|
||||
unspent_output.output_commitment(),
|
||||
unspent_output,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove_pool_transaction(&mut self, tx: &transaction::Transaction,
|
||||
marked_txs: &HashMap<hash::Hash, ()>) {
|
||||
pub fn remove_pool_transaction(
|
||||
&mut self,
|
||||
tx: &transaction::Transaction,
|
||||
marked_txs: &HashMap<hash::Hash, ()>,
|
||||
) {
|
||||
|
||||
self.graph.remove_vertex(graph::transaction_identifier(tx));
|
||||
self.graph.remove_vertex(graph::transaction_identifier(tx));
|
||||
|
||||
for input in tx.inputs.iter().map(|x| x.commitment()) {
|
||||
match self.graph.remove_edge_by_commitment(&input) {
|
||||
Some(x) => {
|
||||
if !marked_txs.contains_key(&x.source_hash().unwrap()) {
|
||||
self.available_outputs.insert(x.output_commitment(),
|
||||
x.with_destination(None));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
self.consumed_blockchain_outputs.remove(&input);
|
||||
},
|
||||
};
|
||||
}
|
||||
for input in tx.inputs.iter().map(|x| x.commitment()) {
|
||||
match self.graph.remove_edge_by_commitment(&input) {
|
||||
Some(x) => {
|
||||
if !marked_txs.contains_key(&x.source_hash().unwrap()) {
|
||||
self.available_outputs.insert(
|
||||
x.output_commitment(),
|
||||
x.with_destination(None),
|
||||
);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.consumed_blockchain_outputs.remove(&input);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
for output in tx.outputs.iter().map(|x| x.commitment()) {
|
||||
match self.graph.remove_edge_by_commitment(&output) {
|
||||
Some(x) => {
|
||||
if !marked_txs.contains_key(
|
||||
&x.destination_hash().unwrap()) {
|
||||
for output in tx.outputs.iter().map(|x| x.commitment()) {
|
||||
match self.graph.remove_edge_by_commitment(&output) {
|
||||
Some(x) => {
|
||||
if !marked_txs.contains_key(&x.destination_hash().unwrap()) {
|
||||
|
||||
self.consumed_blockchain_outputs.insert(
|
||||
x.output_commitment(),
|
||||
x.with_source(None));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
self.available_outputs.remove(&output);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
self.consumed_blockchain_outputs.insert(
|
||||
x.output_commitment(),
|
||||
x.with_source(None),
|
||||
);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
self.available_outputs.remove(&output);
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/// Simplest possible implementation: just return the roots
|
||||
pub fn get_mineable_transactions(&self, num_to_fetch: u32) -> Vec<hash::Hash> {
|
||||
let mut roots = self.graph.get_roots();
|
||||
roots.truncate(num_to_fetch as usize);
|
||||
roots
|
||||
}
|
||||
/// Simplest possible implementation: just return the roots
|
||||
pub fn get_mineable_transactions(&self, num_to_fetch: u32) -> Vec<hash::Hash> {
|
||||
let mut roots = self.graph.get_roots();
|
||||
roots.truncate(num_to_fetch as usize);
|
||||
roots
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionGraphContainer for Pool {
|
||||
fn get_graph(&self) -> &graph::DirectedGraph {
|
||||
&self.graph
|
||||
}
|
||||
fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.available_outputs.get(output)
|
||||
}
|
||||
fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.consumed_blockchain_outputs.get(output)
|
||||
}
|
||||
fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.graph.get_edge_by_commitment(output)
|
||||
}
|
||||
fn get_graph(&self) -> &graph::DirectedGraph {
|
||||
&self.graph
|
||||
}
|
||||
fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.available_outputs.get(output)
|
||||
}
|
||||
fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.consumed_blockchain_outputs.get(output)
|
||||
}
|
||||
fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.graph.get_edge_by_commitment(output)
|
||||
}
|
||||
}
|
||||
|
||||
/// Orphans contains the elements of the transaction graph that have not been
|
||||
/// connected in full to the blockchain.
|
||||
pub struct Orphans {
|
||||
graph : graph::DirectedGraph,
|
||||
graph: graph::DirectedGraph,
|
||||
|
||||
// available_outputs are unspent outputs of the current orphan set,
|
||||
// maintained as edges with empty destinations.
|
||||
available_outputs: HashMap<Commitment, graph::Edge>,
|
||||
// available_outputs are unspent outputs of the current orphan set,
|
||||
// maintained as edges with empty destinations.
|
||||
available_outputs: HashMap<Commitment, graph::Edge>,
|
||||
|
||||
// missing_outputs are spending references (inputs) with missing
|
||||
// corresponding outputs, maintained as edges with empty sources.
|
||||
missing_outputs: HashMap<Commitment, graph::Edge>,
|
||||
// missing_outputs are spending references (inputs) with missing
|
||||
// corresponding outputs, maintained as edges with empty sources.
|
||||
missing_outputs: HashMap<Commitment, graph::Edge>,
|
||||
|
||||
// pool_connections are bidirectional edges which connect to the pool
|
||||
// graph. They should map one-to-one to pool graph available_outputs.
|
||||
// pool_connections should not be viewed authoritatively, they are
|
||||
// merely informational until the transaction is officially connected to
|
||||
// the pool.
|
||||
pool_connections: HashMap<Commitment, graph::Edge>,
|
||||
// pool_connections are bidirectional edges which connect to the pool
|
||||
// graph. They should map one-to-one to pool graph available_outputs.
|
||||
// pool_connections should not be viewed authoritatively, they are
|
||||
// merely informational until the transaction is officially connected to
|
||||
// the pool.
|
||||
pool_connections: HashMap<Commitment, graph::Edge>,
|
||||
}
|
||||
|
||||
impl Orphans {
|
||||
pub fn empty() -> Orphans {
|
||||
Orphans{
|
||||
graph: graph::DirectedGraph::empty(),
|
||||
available_outputs : HashMap::new(),
|
||||
missing_outputs: HashMap::new(),
|
||||
pool_connections: HashMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn empty() -> Orphans {
|
||||
Orphans {
|
||||
graph: graph::DirectedGraph::empty(),
|
||||
available_outputs: HashMap::new(),
|
||||
missing_outputs: HashMap::new(),
|
||||
pool_connections: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks for a double spent output, given the hash of the output,
|
||||
/// ONLY in the data maintained by the orphans set. This includes links
|
||||
/// to the pool as well as links internal to orphan transactions.
|
||||
/// Returns the transaction hash corresponding to the conflicting
|
||||
/// transaction.
|
||||
pub fn check_double_spend(&self, o: transaction::Output) -> Option<hash::Hash> {
|
||||
self.graph.get_edge_by_commitment(&o.commitment()).or(self.pool_connections.get(&o.commitment())).map(|x| x.destination_hash().unwrap())
|
||||
}
|
||||
/// Checks for a double spent output, given the hash of the output,
|
||||
/// ONLY in the data maintained by the orphans set. This includes links
|
||||
/// to the pool as well as links internal to orphan transactions.
|
||||
/// Returns the transaction hash corresponding to the conflicting
|
||||
/// transaction.
|
||||
pub fn check_double_spend(&self, o: transaction::Output) -> Option<hash::Hash> {
|
||||
self.graph
|
||||
.get_edge_by_commitment(&o.commitment())
|
||||
.or(self.pool_connections.get(&o.commitment()))
|
||||
.map(|x| x.destination_hash().unwrap())
|
||||
}
|
||||
|
||||
pub fn get_unknown_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.missing_outputs.get(output)
|
||||
}
|
||||
pub fn get_unknown_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.missing_outputs.get(output)
|
||||
}
|
||||
|
||||
/// Add an orphan transaction to the orphans set.
|
||||
///
|
||||
/// This method adds a given transaction (represented by the PoolEntry at
|
||||
/// orphan_entry) to the orphans set.
|
||||
///
|
||||
/// This method has no failure modes. All checks should be passed before
|
||||
/// entry.
|
||||
///
|
||||
/// Expects a HashMap at is_missing describing the indices of orphan_refs
|
||||
/// which correspond to missing (vs orphan-to-orphan) links.
|
||||
pub fn add_orphan_transaction(&mut self, orphan_entry: graph::PoolEntry,
|
||||
mut pool_refs: Vec<graph::Edge>, mut orphan_refs: Vec<graph::Edge>,
|
||||
is_missing: HashMap<usize, ()>, mut new_unspents: Vec<graph::Edge>) {
|
||||
/// Add an orphan transaction to the orphans set.
|
||||
///
|
||||
/// This method adds a given transaction (represented by the PoolEntry at
|
||||
/// orphan_entry) to the orphans set.
|
||||
///
|
||||
/// This method has no failure modes. All checks should be passed before
|
||||
/// entry.
|
||||
///
|
||||
/// Expects a HashMap at is_missing describing the indices of orphan_refs
|
||||
/// which correspond to missing (vs orphan-to-orphan) links.
|
||||
pub fn add_orphan_transaction(
|
||||
&mut self,
|
||||
orphan_entry: graph::PoolEntry,
|
||||
mut pool_refs: Vec<graph::Edge>,
|
||||
mut orphan_refs: Vec<graph::Edge>,
|
||||
is_missing: HashMap<usize, ()>,
|
||||
mut new_unspents: Vec<graph::Edge>,
|
||||
) {
|
||||
|
||||
// Removing consumed available_outputs
|
||||
for (i, new_edge) in orphan_refs.drain(..).enumerate() {
|
||||
if is_missing.contains_key(&i) {
|
||||
self.missing_outputs.insert(new_edge.output_commitment(),
|
||||
new_edge);
|
||||
} else {
|
||||
assert!(self.available_outputs.remove(&new_edge.output_commitment()).is_some());
|
||||
self.graph.add_edge_only(new_edge);
|
||||
}
|
||||
}
|
||||
// Removing consumed available_outputs
|
||||
for (i, new_edge) in orphan_refs.drain(..).enumerate() {
|
||||
if is_missing.contains_key(&i) {
|
||||
self.missing_outputs.insert(
|
||||
new_edge.output_commitment(),
|
||||
new_edge,
|
||||
);
|
||||
} else {
|
||||
assert!(
|
||||
self.available_outputs
|
||||
.remove(&new_edge.output_commitment())
|
||||
.is_some()
|
||||
);
|
||||
self.graph.add_edge_only(new_edge);
|
||||
}
|
||||
}
|
||||
|
||||
// Accounting for consumed blockchain and pool outputs
|
||||
for external_edge in pool_refs.drain(..) {
|
||||
self.pool_connections.insert(
|
||||
external_edge.output_commitment(), external_edge);
|
||||
}
|
||||
// Accounting for consumed blockchain and pool outputs
|
||||
for external_edge in pool_refs.drain(..) {
|
||||
self.pool_connections.insert(
|
||||
external_edge.output_commitment(),
|
||||
external_edge,
|
||||
);
|
||||
}
|
||||
|
||||
// if missing_refs is the same length as orphan_refs, we have
|
||||
// no orphan-orphan links for this transaction and it is a
|
||||
// root transaction of the orphans set
|
||||
self.graph.add_vertex_only(orphan_entry,
|
||||
is_missing.len() == orphan_refs.len());
|
||||
// if missing_refs is the same length as orphan_refs, we have
|
||||
// no orphan-orphan links for this transaction and it is a
|
||||
// root transaction of the orphans set
|
||||
self.graph.add_vertex_only(
|
||||
orphan_entry,
|
||||
is_missing.len() == orphan_refs.len(),
|
||||
);
|
||||
|
||||
|
||||
// Adding the new unspents to the unspent map
|
||||
for unspent_output in new_unspents.drain(..) {
|
||||
self.available_outputs.insert(
|
||||
unspent_output.output_commitment(), unspent_output);
|
||||
}
|
||||
}
|
||||
// Adding the new unspents to the unspent map
|
||||
for unspent_output in new_unspents.drain(..) {
|
||||
self.available_outputs.insert(
|
||||
unspent_output.output_commitment(),
|
||||
unspent_output,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl TransactionGraphContainer for Orphans {
|
||||
fn get_graph(&self) -> &graph::DirectedGraph {
|
||||
&self.graph
|
||||
}
|
||||
fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.available_outputs.get(output)
|
||||
}
|
||||
fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.pool_connections.get(output)
|
||||
}
|
||||
fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.graph.get_edge_by_commitment(output)
|
||||
}
|
||||
fn get_graph(&self) -> &graph::DirectedGraph {
|
||||
&self.graph
|
||||
}
|
||||
fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.available_outputs.get(output)
|
||||
}
|
||||
fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.pool_connections.get(output)
|
||||
}
|
||||
fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge> {
|
||||
self.graph.get_edge_by_commitment(output)
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for types that embed a graph and connect to external state.
|
||||
|
@ -382,44 +425,43 @@ impl TransactionGraphContainer for Orphans {
|
|||
/// in the child. This ensures that no descendent set must modify state in a
|
||||
/// set of higher priority.
|
||||
pub trait TransactionGraphContainer {
|
||||
/// Accessor for graph object
|
||||
fn get_graph(&self) -> &graph::DirectedGraph;
|
||||
/// Accessor for internal spents
|
||||
fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge>;
|
||||
/// Accessor for external unspents
|
||||
fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge>;
|
||||
/// Accessor for external spents
|
||||
fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge>;
|
||||
/// Accessor for graph object
|
||||
fn get_graph(&self) -> &graph::DirectedGraph;
|
||||
/// Accessor for internal spents
|
||||
fn get_internal_spent_output(&self, output: &Commitment) -> Option<&graph::Edge>;
|
||||
/// Accessor for external unspents
|
||||
fn get_available_output(&self, output: &Commitment) -> Option<&graph::Edge>;
|
||||
/// Accessor for external spents
|
||||
fn get_external_spent_output(&self, output: &Commitment) -> Option<&graph::Edge>;
|
||||
|
||||
/// Checks if the available_output set has the output at the given
|
||||
/// commitment
|
||||
fn has_available_output(&self, c: &Commitment) -> bool {
|
||||
self.get_available_output(c).is_some()
|
||||
}
|
||||
/// Checks if the available_output set has the output at the given
|
||||
/// commitment
|
||||
fn has_available_output(&self, c: &Commitment) -> bool {
|
||||
self.get_available_output(c).is_some()
|
||||
}
|
||||
|
||||
/// Checks if the pool has anything by this output already, between
|
||||
/// available outputs and internal ones.
|
||||
fn find_output(&self, c: &Commitment) -> Option<hash::Hash> {
|
||||
self.get_available_output(c).
|
||||
or(self.get_internal_spent_output(c)).
|
||||
map(|x| x.source_hash().unwrap())
|
||||
}
|
||||
/// Checks if the pool has anything by this output already, between
|
||||
/// available outputs and internal ones.
|
||||
fn find_output(&self, c: &Commitment) -> Option<hash::Hash> {
|
||||
self.get_available_output(c)
|
||||
.or(self.get_internal_spent_output(c))
|
||||
.map(|x| x.source_hash().unwrap())
|
||||
}
|
||||
|
||||
/// Search for a spent reference internal to the graph
|
||||
fn get_internal_spent(&self, c: &Commitment) -> Option<&graph::Edge> {
|
||||
self.get_internal_spent_output(c)
|
||||
}
|
||||
/// Search for a spent reference internal to the graph
|
||||
fn get_internal_spent(&self, c: &Commitment) -> Option<&graph::Edge> {
|
||||
self.get_internal_spent_output(c)
|
||||
}
|
||||
|
||||
fn num_root_transactions(&self) -> usize {
|
||||
self.get_graph().len_roots()
|
||||
}
|
||||
fn num_root_transactions(&self) -> usize {
|
||||
self.get_graph().len_roots()
|
||||
}
|
||||
|
||||
fn num_transactions(&self) -> usize {
|
||||
self.get_graph().len_vertices()
|
||||
}
|
||||
|
||||
fn num_output_edges(&self) -> usize {
|
||||
self.get_graph().len_edges()
|
||||
}
|
||||
fn num_transactions(&self) -> usize {
|
||||
self.get_graph().len_vertices()
|
||||
}
|
||||
|
||||
fn num_output_edges(&self) -> usize {
|
||||
self.get_graph().len_edges()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,8 +57,8 @@ impl Cuckoo {
|
|||
/// serialized block header.
|
||||
pub fn new(header: &[u8], sizeshift: u32) -> Cuckoo {
|
||||
let size = 1 << sizeshift;
|
||||
let hashed=blake2::blake2b::blake2b(32, &[], header);
|
||||
let hashed=hashed.as_bytes();
|
||||
let hashed = blake2::blake2b::blake2b(32, &[], header);
|
||||
let hashed = hashed.as_bytes();
|
||||
|
||||
let k0 = u8_to_u64(hashed, 0);
|
||||
let k1 = u8_to_u64(hashed, 8);
|
||||
|
@ -157,11 +157,8 @@ pub struct Miner {
|
|||
}
|
||||
|
||||
impl MiningWorker for Miner {
|
||||
|
||||
/// Creates a new miner
|
||||
fn new(ease: u32,
|
||||
sizeshift: u32,
|
||||
proof_size: usize) -> Miner {
|
||||
fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Miner {
|
||||
let size = 1 << sizeshift;
|
||||
let graph = vec![0; size + 1];
|
||||
let easiness = (ease as u64) * (size as u64) / 100;
|
||||
|
@ -173,11 +170,11 @@ impl MiningWorker for Miner {
|
|||
proof_size: proof_size,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn mine(&mut self, header: &[u8]) -> Result<Proof, Error> {
|
||||
let size = 1 << self.sizeshift;
|
||||
self.graph = vec![0; size + 1];
|
||||
self.cuckoo=Some(Cuckoo::new(header, self.sizeshift));
|
||||
self.cuckoo = Some(Cuckoo::new(header, self.sizeshift));
|
||||
self.mine_impl()
|
||||
}
|
||||
}
|
||||
|
@ -193,8 +190,6 @@ enum CycleSol {
|
|||
}
|
||||
|
||||
impl Miner {
|
||||
|
||||
|
||||
/// Searches for a solution
|
||||
pub fn mine_impl(&mut self) -> Result<Proof, Error> {
|
||||
let mut us = [0; MAXPATHLEN];
|
||||
|
@ -214,7 +209,7 @@ impl Miner {
|
|||
match sol {
|
||||
CycleSol::ValidProof(res) => {
|
||||
return Ok(Proof::new(res.to_vec()));
|
||||
},
|
||||
}
|
||||
CycleSol::InvalidCycle(_) => continue,
|
||||
CycleSol::NoCycle => {
|
||||
self.update_graph(nu, &us, nv, &vs);
|
||||
|
@ -317,10 +312,10 @@ impl Miner {
|
|||
|
||||
|
||||
/// Utility to transform a 8 bytes of a byte array into a u64.
|
||||
fn u8_to_u64(p:&[u8], i: usize) -> u64 {
|
||||
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24 |
|
||||
(p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 |
|
||||
(p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56
|
||||
fn u8_to_u64(p: &[u8], i: usize) -> u64 {
|
||||
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 |
|
||||
(p[i + 3] as u64) << 24 | (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 |
|
||||
(p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -329,31 +324,183 @@ mod test {
|
|||
use core::core::Proof;
|
||||
|
||||
|
||||
static V1:[u32;42] = [0x1fe9, 0x2050, 0x4581, 0x6322, 0x65ab, 0xb3c1, 0xc1a4,
|
||||
0xe257, 0x106ae, 0x17b11, 0x202d4, 0x2705d, 0x2deb2, 0x2f80e,
|
||||
0x32298, 0x34782, 0x35c5a, 0x37458, 0x38f28, 0x406b2, 0x40e34,
|
||||
0x40fc6, 0x42220, 0x42d13, 0x46c0f, 0x4fd47, 0x55ad2, 0x598f7,
|
||||
0x5aa8f, 0x62aa3, 0x65725, 0x65dcb, 0x671c7, 0x6eb20, 0x752fe,
|
||||
0x7594f, 0x79b9c, 0x7f775, 0x81635, 0x8401c, 0x844e5, 0x89fa8];
|
||||
static V2:[u32;42] = [0x2a37, 0x7557, 0xa3c3, 0xfce6, 0x1248e, 0x15837, 0x1827f,
|
||||
0x18a93, 0x1a7dd, 0x1b56b, 0x1ceb4, 0x1f962, 0x1fe2a, 0x29cb9,
|
||||
0x2f30e, 0x2f771, 0x336bf, 0x34355, 0x391d7, 0x39495, 0x3be0c,
|
||||
0x463be, 0x4d0c2, 0x4eead, 0x50214, 0x520de, 0x52a86, 0x53818,
|
||||
0x53b3b, 0x54c0b, 0x572fa, 0x5d79c, 0x5e3c2, 0x6769e, 0x6a0fe,
|
||||
0x6d835, 0x6fc7c, 0x70f03, 0x79d4a, 0x7b03e, 0x81e09, 0x9bd44];
|
||||
static V3:[u32;42] = [0x8158, 0x9f18, 0xc4ba, 0x108c7, 0x11caa, 0x13b82, 0x1618f,
|
||||
0x1c83b, 0x1ec89, 0x24354, 0x28864, 0x2a0fb, 0x2ce50, 0x2e8fa,
|
||||
0x32b36, 0x343e6, 0x34dc9, 0x36881, 0x3ffca, 0x40f79, 0x42721,
|
||||
0x43b8c, 0x44b9d, 0x47ed3, 0x4cd34, 0x5278a, 0x5ab64, 0x5b4d4,
|
||||
0x5d842, 0x5fa33, 0x6464e, 0x676ee, 0x685d6, 0x69df0, 0x6a5fd,
|
||||
0x6bda3, 0x72544, 0x77974, 0x7908c, 0x80e67, 0x81ef4, 0x8d882];
|
||||
static V1: [u32; 42] = [
|
||||
0x1fe9,
|
||||
0x2050,
|
||||
0x4581,
|
||||
0x6322,
|
||||
0x65ab,
|
||||
0xb3c1,
|
||||
0xc1a4,
|
||||
0xe257,
|
||||
0x106ae,
|
||||
0x17b11,
|
||||
0x202d4,
|
||||
0x2705d,
|
||||
0x2deb2,
|
||||
0x2f80e,
|
||||
0x32298,
|
||||
0x34782,
|
||||
0x35c5a,
|
||||
0x37458,
|
||||
0x38f28,
|
||||
0x406b2,
|
||||
0x40e34,
|
||||
0x40fc6,
|
||||
0x42220,
|
||||
0x42d13,
|
||||
0x46c0f,
|
||||
0x4fd47,
|
||||
0x55ad2,
|
||||
0x598f7,
|
||||
0x5aa8f,
|
||||
0x62aa3,
|
||||
0x65725,
|
||||
0x65dcb,
|
||||
0x671c7,
|
||||
0x6eb20,
|
||||
0x752fe,
|
||||
0x7594f,
|
||||
0x79b9c,
|
||||
0x7f775,
|
||||
0x81635,
|
||||
0x8401c,
|
||||
0x844e5,
|
||||
0x89fa8,
|
||||
];
|
||||
static V2: [u32; 42] = [
|
||||
0x2a37,
|
||||
0x7557,
|
||||
0xa3c3,
|
||||
0xfce6,
|
||||
0x1248e,
|
||||
0x15837,
|
||||
0x1827f,
|
||||
0x18a93,
|
||||
0x1a7dd,
|
||||
0x1b56b,
|
||||
0x1ceb4,
|
||||
0x1f962,
|
||||
0x1fe2a,
|
||||
0x29cb9,
|
||||
0x2f30e,
|
||||
0x2f771,
|
||||
0x336bf,
|
||||
0x34355,
|
||||
0x391d7,
|
||||
0x39495,
|
||||
0x3be0c,
|
||||
0x463be,
|
||||
0x4d0c2,
|
||||
0x4eead,
|
||||
0x50214,
|
||||
0x520de,
|
||||
0x52a86,
|
||||
0x53818,
|
||||
0x53b3b,
|
||||
0x54c0b,
|
||||
0x572fa,
|
||||
0x5d79c,
|
||||
0x5e3c2,
|
||||
0x6769e,
|
||||
0x6a0fe,
|
||||
0x6d835,
|
||||
0x6fc7c,
|
||||
0x70f03,
|
||||
0x79d4a,
|
||||
0x7b03e,
|
||||
0x81e09,
|
||||
0x9bd44,
|
||||
];
|
||||
static V3: [u32; 42] = [
|
||||
0x8158,
|
||||
0x9f18,
|
||||
0xc4ba,
|
||||
0x108c7,
|
||||
0x11caa,
|
||||
0x13b82,
|
||||
0x1618f,
|
||||
0x1c83b,
|
||||
0x1ec89,
|
||||
0x24354,
|
||||
0x28864,
|
||||
0x2a0fb,
|
||||
0x2ce50,
|
||||
0x2e8fa,
|
||||
0x32b36,
|
||||
0x343e6,
|
||||
0x34dc9,
|
||||
0x36881,
|
||||
0x3ffca,
|
||||
0x40f79,
|
||||
0x42721,
|
||||
0x43b8c,
|
||||
0x44b9d,
|
||||
0x47ed3,
|
||||
0x4cd34,
|
||||
0x5278a,
|
||||
0x5ab64,
|
||||
0x5b4d4,
|
||||
0x5d842,
|
||||
0x5fa33,
|
||||
0x6464e,
|
||||
0x676ee,
|
||||
0x685d6,
|
||||
0x69df0,
|
||||
0x6a5fd,
|
||||
0x6bda3,
|
||||
0x72544,
|
||||
0x77974,
|
||||
0x7908c,
|
||||
0x80e67,
|
||||
0x81ef4,
|
||||
0x8d882,
|
||||
];
|
||||
// cuckoo28 at 50% edges of letter 'u'
|
||||
static V4:[u32;42] = [0x1CBBFD, 0x2C5452, 0x520338, 0x6740C5, 0x8C6997, 0xC77150, 0xFD4972,
|
||||
0x1060FA7, 0x11BFEA0, 0x1343E8D, 0x14CE02A, 0x1533515, 0x1715E61, 0x1996D9B,
|
||||
0x1CB296B, 0x1FCA180, 0x209A367, 0x20AD02E, 0x23CD2E4, 0x2A3B360, 0x2DD1C0C,
|
||||
0x333A200, 0x33D77BC, 0x3620C78, 0x3DD7FB8, 0x3FBFA49, 0x41BDED2, 0x4A86FD9,
|
||||
0x570DE24, 0x57CAB86, 0x594B886, 0x5C74C94, 0x5DE7572, 0x60ADD6F, 0x635918B,
|
||||
0x6C9E120, 0x6EFA583, 0x7394ACA, 0x7556A23, 0x77F70AA, 0x7CF750A, 0x7F60790];
|
||||
static V4: [u32; 42] = [
|
||||
0x1CBBFD,
|
||||
0x2C5452,
|
||||
0x520338,
|
||||
0x6740C5,
|
||||
0x8C6997,
|
||||
0xC77150,
|
||||
0xFD4972,
|
||||
0x1060FA7,
|
||||
0x11BFEA0,
|
||||
0x1343E8D,
|
||||
0x14CE02A,
|
||||
0x1533515,
|
||||
0x1715E61,
|
||||
0x1996D9B,
|
||||
0x1CB296B,
|
||||
0x1FCA180,
|
||||
0x209A367,
|
||||
0x20AD02E,
|
||||
0x23CD2E4,
|
||||
0x2A3B360,
|
||||
0x2DD1C0C,
|
||||
0x333A200,
|
||||
0x33D77BC,
|
||||
0x3620C78,
|
||||
0x3DD7FB8,
|
||||
0x3FBFA49,
|
||||
0x41BDED2,
|
||||
0x4A86FD9,
|
||||
0x570DE24,
|
||||
0x57CAB86,
|
||||
0x594B886,
|
||||
0x5C74C94,
|
||||
0x5DE7572,
|
||||
0x60ADD6F,
|
||||
0x635918B,
|
||||
0x6C9E120,
|
||||
0x6EFA583,
|
||||
0x7394ACA,
|
||||
0x7556A23,
|
||||
0x77F70AA,
|
||||
0x7CF750A,
|
||||
0x7F60790,
|
||||
];
|
||||
|
||||
/// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few
|
||||
/// known cycle proofs
|
||||
|
@ -372,29 +519,52 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn validate20_vectors() {
|
||||
assert!(Cuckoo::new(&[49], 20).verify(Proof::new(V1.to_vec().clone()), 75));
|
||||
assert!(Cuckoo::new(&[50], 20).verify(Proof::new(V2.to_vec().clone()), 70));
|
||||
assert!(Cuckoo::new(&[51], 20).verify(Proof::new(V3.to_vec().clone()), 70));
|
||||
assert!(Cuckoo::new(&[49], 20).verify(
|
||||
Proof::new(V1.to_vec().clone()),
|
||||
75,
|
||||
));
|
||||
assert!(Cuckoo::new(&[50], 20).verify(
|
||||
Proof::new(V2.to_vec().clone()),
|
||||
70,
|
||||
));
|
||||
assert!(Cuckoo::new(&[51], 20).verify(
|
||||
Proof::new(V3.to_vec().clone()),
|
||||
70,
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate28_vectors() {
|
||||
let mut test_header=[0;32];
|
||||
test_header[0]=24;
|
||||
assert!(Cuckoo::new(&test_header, 28).verify(Proof::new(V4.to_vec().clone()), 50));
|
||||
let mut test_header = [0; 32];
|
||||
test_header[0] = 24;
|
||||
assert!(Cuckoo::new(&test_header, 28).verify(
|
||||
Proof::new(V4.to_vec().clone()),
|
||||
50,
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn validate_fail() {
|
||||
// edge checks
|
||||
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75));
|
||||
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0xffff; 42]), 75));
|
||||
assert!(!Cuckoo::new(&[49], 20).verify(
|
||||
Proof::new(vec![0xffff; 42]),
|
||||
75,
|
||||
));
|
||||
// wrong data for proof
|
||||
assert!(!Cuckoo::new(&[50], 20).verify(Proof::new(V1.to_vec().clone()), 75));
|
||||
let mut test_header=[0;32];
|
||||
test_header[0]=24;
|
||||
assert!(!Cuckoo::new(&test_header, 20).verify(Proof::new(V4.to_vec().clone()), 50));
|
||||
|
||||
assert!(!Cuckoo::new(&[50], 20).verify(
|
||||
Proof::new(V1.to_vec().clone()),
|
||||
75,
|
||||
));
|
||||
let mut test_header = [0; 32];
|
||||
test_header[0] = 24;
|
||||
assert!(!Cuckoo::new(&test_header, 20).verify(
|
||||
Proof::new(
|
||||
V4.to_vec().clone(),
|
||||
),
|
||||
50,
|
||||
));
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -62,14 +62,14 @@ use cuckoo::{Cuckoo, Error};
|
|||
///
|
||||
|
||||
pub trait MiningWorker {
|
||||
|
||||
/// This only sets parameters and does initialisation work now
|
||||
fn new(ease: u32, sizeshift: u32, proof_size:usize) -> Self where Self:Sized;
|
||||
fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self
|
||||
where
|
||||
Self: Sized;
|
||||
|
||||
/// Actually perform a mining attempt on the given input and
|
||||
/// return a proof if found
|
||||
fn mine(&mut self, header: &[u8]) -> Result<Proof, Error>;
|
||||
|
||||
}
|
||||
|
||||
/// Validates the proof of work of a given header, and that the proof of work
|
||||
|
@ -85,43 +85,54 @@ pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
|
|||
|
||||
/// Uses the much easier Cuckoo20 (mostly for
|
||||
/// tests).
|
||||
pub fn pow20<T: MiningWorker>(miner:&mut T, bh: &mut BlockHeader, diff: Difficulty) -> Result<(), Error> {
|
||||
pub fn pow20<T: MiningWorker>(
|
||||
miner: &mut T,
|
||||
bh: &mut BlockHeader,
|
||||
diff: Difficulty,
|
||||
) -> Result<(), Error> {
|
||||
pow_size(miner, bh, diff, 20)
|
||||
}
|
||||
|
||||
/// Mines a genesis block, using the config specified miner if specified. Otherwise,
|
||||
/// Mines a genesis block, using the config specified miner if specified.
|
||||
/// Otherwise,
|
||||
/// uses the internal miner
|
||||
///
|
||||
|
||||
pub fn mine_genesis_block(miner_config:Option<types::MinerConfig>)->Option<core::core::Block> {
|
||||
pub fn mine_genesis_block(miner_config: Option<types::MinerConfig>) -> Option<core::core::Block> {
|
||||
info!("Starting miner loop for Genesis Block");
|
||||
let mut gen = genesis::genesis();
|
||||
let diff = gen.header.difficulty.clone();
|
||||
|
||||
|
||||
let sz = global::sizeshift() as u32;
|
||||
let proof_size = global::proofsize();
|
||||
|
||||
let mut miner:Box<MiningWorker> = match miner_config {
|
||||
let mut miner: Box<MiningWorker> = match miner_config {
|
||||
Some(c) => {
|
||||
if c.use_cuckoo_miner {
|
||||
if c.use_cuckoo_miner {
|
||||
let mut p = plugin::PluginMiner::new(consensus::EASINESS, sz, proof_size);
|
||||
p.init(c.clone());
|
||||
Box::new(p)
|
||||
|
||||
} else {
|
||||
Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
None => Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)),
|
||||
};
|
||||
pow_size(&mut *miner, &mut gen.header, diff, sz as u32).unwrap();
|
||||
Some(gen)
|
||||
}
|
||||
|
||||
/// Runs a proof of work computation over the provided block using the provided Mining Worker,
|
||||
/// until the required difficulty target is reached. May take a while for a low target...
|
||||
pub fn pow_size<T: MiningWorker + ?Sized>(miner:&mut T, bh: &mut BlockHeader,
|
||||
diff: Difficulty, _: u32) -> Result<(), Error> {
|
||||
/// Runs a proof of work computation over the provided block using the provided
|
||||
/// Mining Worker,
|
||||
/// until the required difficulty target is reached. May take a while for a low
|
||||
/// target...
|
||||
pub fn pow_size<T: MiningWorker + ?Sized>(
|
||||
miner: &mut T,
|
||||
bh: &mut BlockHeader,
|
||||
diff: Difficulty,
|
||||
_: u32,
|
||||
) -> Result<(), Error> {
|
||||
let start_nonce = bh.nonce;
|
||||
|
||||
// if we're in production mode, try the pre-mined solution first
|
||||
|
@ -166,17 +177,26 @@ mod test {
|
|||
use global;
|
||||
use core::core::target::Difficulty;
|
||||
use core::genesis;
|
||||
use core::consensus::MINIMUM_DIFFICULTY;
|
||||
use core::consensus::MINIMUM_DIFFICULTY;
|
||||
use core::global::MiningParameterMode;
|
||||
|
||||
|
||||
#[test]
|
||||
fn genesis_pow() {
|
||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||
let mut b = genesis::genesis();
|
||||
b.header.nonce = 310;
|
||||
let mut internal_miner = cuckoo::Miner::new(consensus::EASINESS, global::sizeshift() as u32, global::proofsize());
|
||||
pow_size(&mut internal_miner, &mut b.header, Difficulty::from_num(MINIMUM_DIFFICULTY), global::sizeshift() as u32).unwrap();
|
||||
let mut internal_miner = cuckoo::Miner::new(
|
||||
consensus::EASINESS,
|
||||
global::sizeshift() as u32,
|
||||
global::proofsize(),
|
||||
);
|
||||
pow_size(
|
||||
&mut internal_miner,
|
||||
&mut b.header,
|
||||
Difficulty::from_num(MINIMUM_DIFFICULTY),
|
||||
global::sizeshift() as u32,
|
||||
).unwrap();
|
||||
assert!(b.header.nonce != 310);
|
||||
assert!(b.header.pow.clone().to_difficulty() >= Difficulty::from_num(MINIMUM_DIFFICULTY));
|
||||
assert!(verify_size(&b.header, global::sizeshift() as u32));
|
||||
|
|
|
@ -30,7 +30,7 @@ use types::MinerConfig;
|
|||
use std::sync::Mutex;
|
||||
|
||||
use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution,
|
||||
CuckooMinerDeviceStats, CuckooMinerError};
|
||||
CuckooMinerDeviceStats, CuckooMinerError};
|
||||
|
||||
// For now, we're just going to keep a static reference around to the loaded
|
||||
// config
|
||||
|
@ -112,7 +112,7 @@ impl PluginMiner {
|
|||
let sz = global::sizeshift();
|
||||
|
||||
let mut cuckoo_configs = Vec::new();
|
||||
let mut index=0;
|
||||
let mut index = 0;
|
||||
for f in plugin_vec_filters {
|
||||
// So this is built dynamically based on the plugin implementation
|
||||
// type and the consensus sizeshift
|
||||
|
@ -126,12 +126,12 @@ impl PluginMiner {
|
|||
info!("Mining plugin {} - {}", index, caps[0].full_path.clone());
|
||||
config.plugin_full_path = caps[0].full_path.clone();
|
||||
if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config {
|
||||
if let Some(lp) = l[index].parameter_list.clone(){
|
||||
if let Some(lp) = l[index].parameter_list.clone() {
|
||||
config.parameter_list = lp.clone();
|
||||
}
|
||||
}
|
||||
cuckoo_configs.push(config);
|
||||
index+=1;
|
||||
index += 1;
|
||||
}
|
||||
// Store this config now, because we just want one instance
|
||||
// of the plugin lib per invocation now
|
||||
|
@ -141,7 +141,7 @@ impl PluginMiner {
|
|||
let result = CuckooMiner::new(cuckoo_configs.clone());
|
||||
if let Err(e) = result {
|
||||
error!("Error initializing mining plugin: {:?}", e);
|
||||
//error!("Accepted values are: {:?}", caps[0].parameters);
|
||||
// error!("Accepted values are: {:?}", caps[0].parameters);
|
||||
panic!("Unable to init mining plugin.");
|
||||
}
|
||||
|
||||
|
@ -167,8 +167,8 @@ impl PluginMiner {
|
|||
}
|
||||
|
||||
/// Get stats
|
||||
pub fn get_stats(&self, index:usize) -> Result<Vec<CuckooMinerDeviceStats>, CuckooMinerError> {
|
||||
self.miner.as_ref().unwrap().get_stats(index)
|
||||
pub fn get_stats(&self, index: usize) -> Result<Vec<CuckooMinerDeviceStats>, CuckooMinerError> {
|
||||
self.miner.as_ref().unwrap().get_stats(index)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ impl MiningWorker for PluginMiner {
|
|||
/// And simply calls the mine function of the loaded plugin
|
||||
/// returning whether a solution was found and the solution itself
|
||||
|
||||
fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error>{
|
||||
fn mine(&mut self, header: &[u8]) -> Result<Proof, cuckoo::Error> {
|
||||
let result = self.miner
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
|
|
|
@ -19,18 +19,18 @@ use std::collections::HashMap;
|
|||
/// CuckooMinerPlugin configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CuckooMinerPluginConfig {
|
||||
///The type of plugin to load (i.e. filters on filename)
|
||||
pub type_filter : String,
|
||||
/// The type of plugin to load (i.e. filters on filename)
|
||||
pub type_filter: String,
|
||||
|
||||
///Parameters for this plugin
|
||||
pub parameter_list : Option<HashMap<String, u32>>,
|
||||
/// Parameters for this plugin
|
||||
pub parameter_list: Option<HashMap<String, u32>>,
|
||||
}
|
||||
|
||||
impl Default for CuckooMinerPluginConfig {
|
||||
fn default() -> CuckooMinerPluginConfig {
|
||||
CuckooMinerPluginConfig {
|
||||
type_filter : String::new(),
|
||||
parameter_list : None,
|
||||
type_filter: String::new(),
|
||||
parameter_list: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -374,10 +374,10 @@ fn wallet_command(wallet_args: &ArgMatches) {
|
|||
dest = d;
|
||||
}
|
||||
wallet::issue_send_tx(&wallet_config, &key, amount, dest.to_string()).unwrap();
|
||||
},
|
||||
}
|
||||
("info", Some(_)) => {
|
||||
wallet::show_info(&wallet_config, &key);
|
||||
},
|
||||
}
|
||||
_ => panic!("Unknown wallet command, use 'grin help wallet' for details"),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -119,7 +119,7 @@ impl AppendOnlyFile {
|
|||
} as u64;
|
||||
|
||||
// write the buffer, except if we prune offsets in the current span,
|
||||
// in which case we skip
|
||||
// in which case we skip
|
||||
let mut buf_start = 0;
|
||||
while prune_offs[prune_pos] >= read && prune_offs[prune_pos] < read + len {
|
||||
let prune_at = prune_offs[prune_pos] as usize;
|
||||
|
@ -188,7 +188,11 @@ impl RemoveLog {
|
|||
if last_offs == 0 {
|
||||
self.removed = vec![];
|
||||
} else {
|
||||
self.removed = self.removed.iter().filter(|&&(_, idx)| { idx < last_offs }).map(|x| *x).collect();
|
||||
self.removed = self.removed
|
||||
.iter()
|
||||
.filter(|&&(_, idx)| idx < last_offs)
|
||||
.map(|x| *x)
|
||||
.collect();
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -230,8 +234,7 @@ impl RemoveLog {
|
|||
|
||||
/// Whether the remove log currently includes the provided position.
|
||||
fn includes(&self, elmt: u64) -> bool {
|
||||
include_tuple(&self.removed, elmt) ||
|
||||
include_tuple(&self.removed_tmp, elmt)
|
||||
include_tuple(&self.removed, elmt) || include_tuple(&self.removed_tmp, elmt)
|
||||
}
|
||||
|
||||
/// Number of positions stored in the remove log.
|
||||
|
@ -305,7 +308,7 @@ where
|
|||
// Third, check if it's in the pruned list or its offset
|
||||
let shift = self.pruned_nodes.get_shift(position);
|
||||
if let None = shift {
|
||||
return None
|
||||
return None;
|
||||
}
|
||||
|
||||
// The MMR starts at 1, our binary backend starts at 0
|
||||
|
@ -329,7 +332,9 @@ where
|
|||
|
||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||
assert!(self.buffer.len() == 0, "Rewind on non empty buffer.");
|
||||
self.remove_log.truncate(index).map_err(|e| format!("Could not truncate remove log: {}", e))?;
|
||||
self.remove_log.truncate(index).map_err(|e| {
|
||||
format!("Could not truncate remove log: {}", e)
|
||||
})?;
|
||||
self.rewind = Some((position, index, self.buffer_index));
|
||||
self.buffer_index = position as usize;
|
||||
Ok(())
|
||||
|
@ -340,7 +345,9 @@ where
|
|||
if self.buffer.used_size() > 0 {
|
||||
for position in &positions {
|
||||
let pos_sz = *position as usize;
|
||||
if pos_sz > self.buffer_index && pos_sz - 1 < self.buffer_index + self.buffer.len() {
|
||||
if pos_sz > self.buffer_index &&
|
||||
pos_sz - 1 < self.buffer_index + self.buffer.len()
|
||||
{
|
||||
self.buffer.remove(vec![*position], index).unwrap();
|
||||
}
|
||||
}
|
||||
|
@ -370,7 +377,7 @@ where
|
|||
remove_log: rm_log,
|
||||
buffer: VecBackend::new(),
|
||||
buffer_index: (sz as usize) / record_len,
|
||||
pruned_nodes: pmmr::PruneList{pruned_nodes: prune_list},
|
||||
pruned_nodes: pmmr::PruneList { pruned_nodes: prune_list },
|
||||
rewind: None,
|
||||
})
|
||||
}
|
||||
|
@ -398,7 +405,10 @@ where
|
|||
if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Interrupted,
|
||||
format!("Could not write to log storage, disk full? {:?}", e)
|
||||
format!(
|
||||
"Could not write to log storage, disk full? {:?}",
|
||||
e
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -407,7 +417,7 @@ where
|
|||
self.buffer_index = self.buffer_index + self.buffer.len();
|
||||
self.buffer.clear();
|
||||
self.remove_log.flush()?;
|
||||
self.hashsum_file.sync()?;
|
||||
self.hashsum_file.sync()?;
|
||||
self.rewind = None;
|
||||
Ok(())
|
||||
}
|
||||
|
@ -431,12 +441,14 @@ where
|
|||
/// to decide whether the remove log has reached its maximum length,
|
||||
/// otherwise the RM_LOG_MAX_NODES default value is used.
|
||||
///
|
||||
/// TODO whatever is calling this should also clean up the commit to position
|
||||
/// TODO whatever is calling this should also clean up the commit to
|
||||
/// position
|
||||
/// index in db
|
||||
pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> {
|
||||
if !(max_len > 0 && self.remove_log.len() > max_len ||
|
||||
max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES) {
|
||||
return Ok(())
|
||||
max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// 0. validate none of the nodes in the rm log are in the prune list (to
|
||||
|
@ -444,8 +456,10 @@ where
|
|||
for pos in &self.remove_log.removed[..] {
|
||||
if let None = self.pruned_nodes.pruned_pos(pos.0) {
|
||||
// TODO we likely can recover from this by directly jumping to 3
|
||||
error!("The remove log contains nodes that are already in the pruned \
|
||||
list, a previous compaction likely failed.");
|
||||
error!(
|
||||
"The remove log contains nodes that are already in the pruned \
|
||||
list, a previous compaction likely failed."
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
@ -454,20 +468,34 @@ where
|
|||
// remove list
|
||||
let tmp_prune_file = format!("{}/{}.prune", self.data_dir, PMMR_DATA_FILE);
|
||||
let record_len = (32 + T::sum_len()) as u64;
|
||||
let to_rm = self.remove_log.removed.iter().map(|&(pos, _)| {
|
||||
let shift = self.pruned_nodes.get_shift(pos);
|
||||
(pos - 1 - shift.unwrap()) * record_len
|
||||
}).collect();
|
||||
self.hashsum_file.save_prune(tmp_prune_file.clone(), to_rm, record_len)?;
|
||||
let to_rm = self.remove_log
|
||||
.removed
|
||||
.iter()
|
||||
.map(|&(pos, _)| {
|
||||
let shift = self.pruned_nodes.get_shift(pos);
|
||||
(pos - 1 - shift.unwrap()) * record_len
|
||||
})
|
||||
.collect();
|
||||
self.hashsum_file.save_prune(
|
||||
tmp_prune_file.clone(),
|
||||
to_rm,
|
||||
record_len,
|
||||
)?;
|
||||
|
||||
// 2. update the prune list and save it in place
|
||||
for &(rm_pos, _) in &self.remove_log.removed[..] {
|
||||
self.pruned_nodes.add(rm_pos);
|
||||
}
|
||||
write_vec(format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE), &self.pruned_nodes.pruned_nodes)?;
|
||||
write_vec(
|
||||
format!("{}/{}", self.data_dir, PMMR_PRUNED_FILE),
|
||||
&self.pruned_nodes.pruned_nodes,
|
||||
)?;
|
||||
|
||||
// 3. move the compact copy to the hashsum file and re-open it
|
||||
fs::rename(tmp_prune_file.clone(), format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?;
|
||||
fs::rename(
|
||||
tmp_prune_file.clone(),
|
||||
format!("{}/{}", self.data_dir, PMMR_DATA_FILE),
|
||||
)?;
|
||||
self.hashsum_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE))?;
|
||||
self.hashsum_file.sync()?;
|
||||
|
||||
|
@ -481,7 +509,9 @@ where
|
|||
|
||||
// Read an ordered vector of scalars from a file.
|
||||
fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
|
||||
where T: ser::Readable + cmp::Ord {
|
||||
where
|
||||
T: ser::Readable + cmp::Ord,
|
||||
{
|
||||
|
||||
let file_path = Path::new(&path);
|
||||
let mut ovec = Vec::with_capacity(1000);
|
||||
|
@ -506,7 +536,10 @@ fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
|
|||
Err(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!("Corrupted storage, could not read file at {}", path),
|
||||
format!(
|
||||
"Corrupted storage, could not read file at {}",
|
||||
path
|
||||
),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -519,13 +552,16 @@ fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
|
|||
}
|
||||
|
||||
fn write_vec<T>(path: String, v: &Vec<T>) -> io::Result<()>
|
||||
where T: ser::Writeable {
|
||||
|
||||
where
|
||||
T: ser::Writeable,
|
||||
{
|
||||
|
||||
let mut file_path = File::create(&path)?;
|
||||
ser::serialize(&mut file_path, v).map_err(|_| {
|
||||
io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
format!("Failed to serialize data when writing to {}", path))
|
||||
format!("Failed to serialize data when writing to {}", path),
|
||||
)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ fn sumtree_prune_compact() {
|
|||
let mut backend = store::sumtree::PMMRBackend::new(data_dir).unwrap();
|
||||
let mmr_size = load(0, &elems[..], &mut backend);
|
||||
backend.sync().unwrap();
|
||||
|
||||
|
||||
// save the root
|
||||
let root: HashSum<TestElem>;
|
||||
{
|
||||
|
@ -113,7 +113,7 @@ fn sumtree_reload() {
|
|||
let mut backend = store::sumtree::PMMRBackend::new(data_dir.clone()).unwrap();
|
||||
mmr_size = load(0, &elems[..], &mut backend);
|
||||
backend.sync().unwrap();
|
||||
|
||||
|
||||
// save the root and prune some nodes so we have prune data
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
|
@ -164,8 +164,7 @@ fn setup() -> (String, Vec<TestElem>) {
|
|||
(data_dir, elems)
|
||||
}
|
||||
|
||||
fn load(pos: u64, elems: &[TestElem],
|
||||
backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 {
|
||||
fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 {
|
||||
|
||||
let mut pmmr = PMMR::at(backend, pos);
|
||||
for elem in elems {
|
||||
|
|
|
@ -22,27 +22,30 @@ use std::num;
|
|||
|
||||
/// Encode the provided bytes into a hex string
|
||||
pub fn to_hex(bytes: Vec<u8>) -> String {
|
||||
let mut s = String::new();
|
||||
for byte in bytes {
|
||||
write!(&mut s, "{:02x}", byte).expect("Unable to write");
|
||||
}
|
||||
s
|
||||
let mut s = String::new();
|
||||
for byte in bytes {
|
||||
write!(&mut s, "{:02x}", byte).expect("Unable to write");
|
||||
}
|
||||
s
|
||||
}
|
||||
|
||||
/// Decode a hex string into bytes.
|
||||
pub fn from_hex(hex_str: String) -> Result<Vec<u8>, num::ParseIntError> {
|
||||
let hex_trim = if &hex_str[..2] == "0x" {
|
||||
hex_str[2..].to_owned()
|
||||
} else {
|
||||
hex_str.clone()
|
||||
};
|
||||
split_n(&hex_trim.trim()[..], 2).iter()
|
||||
.map(|b| u8::from_str_radix(b, 16))
|
||||
.collect::<Result<Vec<u8>, _>>()
|
||||
let hex_trim = if &hex_str[..2] == "0x" {
|
||||
hex_str[2..].to_owned()
|
||||
} else {
|
||||
hex_str.clone()
|
||||
};
|
||||
split_n(&hex_trim.trim()[..], 2)
|
||||
.iter()
|
||||
.map(|b| u8::from_str_radix(b, 16))
|
||||
.collect::<Result<Vec<u8>, _>>()
|
||||
}
|
||||
|
||||
fn split_n(s: &str, n: usize) -> Vec<&str> {
|
||||
(0 .. (s.len() - n + 1)/2 + 1).map(|i| &s[2*i .. 2*i + n]).collect()
|
||||
(0..(s.len() - n + 1) / 2 + 1)
|
||||
.map(|i| &s[2 * i..2 * i + n])
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -59,7 +62,13 @@ mod test {
|
|||
#[test]
|
||||
fn test_from_hex() {
|
||||
assert_eq!(from_hex("00000000".to_string()).unwrap(), vec![0, 0, 0, 0]);
|
||||
assert_eq!(from_hex("0a0b0c0d".to_string()).unwrap(), vec![10, 11, 12, 13]);
|
||||
assert_eq!(from_hex("000000ff".to_string()).unwrap(), vec![0, 0, 0, 255]);
|
||||
assert_eq!(
|
||||
from_hex("0a0b0c0d".to_string()).unwrap(),
|
||||
vec![10, 11, 12, 13]
|
||||
);
|
||||
assert_eq!(
|
||||
from_hex("000000ff".to_string()).unwrap(),
|
||||
vec![0, 0, 0, 255]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,26 +28,26 @@ pub use hex::*;
|
|||
// (borrowed).
|
||||
#[derive(Clone)]
|
||||
pub struct OneTime<T> {
|
||||
inner: RefCell<Option<T>>,
|
||||
inner: RefCell<Option<T>>,
|
||||
}
|
||||
|
||||
unsafe impl<T> Sync for OneTime<T> {}
|
||||
unsafe impl<T> Send for OneTime<T> {}
|
||||
|
||||
impl<T> OneTime<T> {
|
||||
/// Builds a new uninitialized OneTime.
|
||||
pub fn new() -> OneTime<T> {
|
||||
OneTime { inner: RefCell::new(None) }
|
||||
}
|
||||
/// Builds a new uninitialized OneTime.
|
||||
pub fn new() -> OneTime<T> {
|
||||
OneTime { inner: RefCell::new(None) }
|
||||
}
|
||||
|
||||
/// Initializes the OneTime, should only be called once after construction.
|
||||
pub fn init(&self, value: T) {
|
||||
let mut inner_mut = self.inner.borrow_mut();
|
||||
*inner_mut = Some(value);
|
||||
}
|
||||
/// Initializes the OneTime, should only be called once after construction.
|
||||
pub fn init(&self, value: T) {
|
||||
let mut inner_mut = self.inner.borrow_mut();
|
||||
*inner_mut = Some(value);
|
||||
}
|
||||
|
||||
/// Borrows the OneTime, should only be called after initialization.
|
||||
pub fn borrow(&self) -> Ref<T> {
|
||||
Ref::map(self.inner.borrow(), |o| o.as_ref().unwrap())
|
||||
}
|
||||
/// Borrows the OneTime, should only be called after initialization.
|
||||
pub fn borrow(&self) -> Ref<T> {
|
||||
Ref::map(self.inner.borrow(), |o| o.as_ref().unwrap())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,11 +22,7 @@ use types::*;
|
|||
use util;
|
||||
|
||||
|
||||
fn refresh_output(
|
||||
out: &mut OutputData,
|
||||
api_out: Option<api::Output>,
|
||||
tip: &api::Tip,
|
||||
) {
|
||||
fn refresh_output(out: &mut OutputData, api_out: Option<api::Output>, tip: &api::Tip) {
|
||||
if let Some(api_out) = api_out {
|
||||
out.height = api_out.height;
|
||||
out.lock_height = api_out.lock_height;
|
||||
|
@ -38,25 +34,23 @@ fn refresh_output(
|
|||
} else {
|
||||
out.status = OutputStatus::Unspent;
|
||||
}
|
||||
} else if vec![
|
||||
OutputStatus::Unspent,
|
||||
OutputStatus::Locked
|
||||
].contains(&out.status) {
|
||||
} else if vec![OutputStatus::Unspent, OutputStatus::Locked].contains(&out.status) {
|
||||
out.status = OutputStatus::Spent;
|
||||
}
|
||||
}
|
||||
|
||||
/// Goes through the list of outputs that haven't been spent yet and check
|
||||
/// with a node whether their status has changed.
|
||||
pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(), Error>{
|
||||
pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(), Error> {
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
let tip = get_tip(config)?;
|
||||
|
||||
WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||
// check each output that's not spent
|
||||
for mut out in wallet_data.outputs
|
||||
.iter_mut()
|
||||
.filter(|out| out.status != OutputStatus::Spent) {
|
||||
for mut out in wallet_data.outputs.iter_mut().filter(|out| {
|
||||
out.status != OutputStatus::Spent
|
||||
})
|
||||
{
|
||||
|
||||
// figure out the commitment
|
||||
// TODO check the pool for unconfirmed
|
||||
|
@ -66,8 +60,9 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(
|
|||
match get_output_by_commitment(config, commitment) {
|
||||
Ok(api_out) => refresh_output(&mut out, api_out, &tip),
|
||||
Err(_) => {
|
||||
//TODO find error with connection and return
|
||||
//error!("Error contacting server node at {}. Is it running?", config.check_node_api_http_addr);
|
||||
// TODO find error with connection and return
|
||||
// error!("Error contacting server node at {}. Is it running?",
|
||||
// config.check_node_api_http_addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -76,14 +71,14 @@ pub fn refresh_outputs(config: &WalletConfig, ext_key: &ExtendedKey) -> Result<(
|
|||
|
||||
fn get_tip(config: &WalletConfig) -> Result<api::Tip, Error> {
|
||||
let url = format!("{}/v1/chain/1", config.check_node_api_http_addr);
|
||||
api::client::get::<api::Tip>(url.as_str())
|
||||
.map_err(|e| Error::Node(e))
|
||||
api::client::get::<api::Tip>(url.as_str()).map_err(|e| Error::Node(e))
|
||||
}
|
||||
|
||||
// queries a reachable node for a given output, checking whether it's been confirmed
|
||||
// queries a reachable node for a given output, checking whether it's been
|
||||
// confirmed
|
||||
fn get_output_by_commitment(
|
||||
config: &WalletConfig,
|
||||
commit: pedersen::Commitment
|
||||
commit: pedersen::Commitment,
|
||||
) -> Result<Option<api::Output>, Error> {
|
||||
let url = format!(
|
||||
"{}/v1/chain/utxo/{}",
|
||||
|
|
|
@ -207,8 +207,9 @@ impl ExtendedKey {
|
|||
|
||||
let mut secret_key = SecretKey::from_slice(&secp, &derived.as_bytes()[0..32])
|
||||
.expect("Error deriving key");
|
||||
secret_key.add_assign(secp, &self.key)
|
||||
.expect("Error deriving key");
|
||||
secret_key.add_assign(secp, &self.key).expect(
|
||||
"Error deriving key",
|
||||
);
|
||||
// TODO check if key != 0 ?
|
||||
|
||||
let mut chain_code: [u8; 32] = [0; 32];
|
||||
|
@ -241,18 +242,26 @@ mod test {
|
|||
let s = Secp256k1::new();
|
||||
let seed = from_hex("000102030405060708090a0b0c0d0e0f");
|
||||
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
|
||||
let sec =
|
||||
from_hex("c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd");
|
||||
let sec = from_hex(
|
||||
"c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd",
|
||||
);
|
||||
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
|
||||
let chaincode =
|
||||
from_hex("e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72");
|
||||
let chaincode = from_hex(
|
||||
"e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72",
|
||||
);
|
||||
let identifier = from_hex("942b6c0bd43bdcb24f3edfe7fadbc77054ecc4f2");
|
||||
let fingerprint = from_hex("942b6c0b");
|
||||
let depth = 0;
|
||||
let n_child = 0;
|
||||
assert_eq!(extk.key, secret_key);
|
||||
assert_eq!(extk.identifier(), Identifier::from_bytes(identifier.as_slice()));
|
||||
assert_eq!(extk.fingerprint, Fingerprint::from_bytes(fingerprint.as_slice()));
|
||||
assert_eq!(
|
||||
extk.identifier(),
|
||||
Identifier::from_bytes(identifier.as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
extk.fingerprint,
|
||||
Fingerprint::from_bytes(fingerprint.as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
extk.identifier().fingerprint(),
|
||||
Fingerprint::from_bytes(fingerprint.as_slice())
|
||||
|
@ -269,19 +278,27 @@ mod test {
|
|||
let seed = from_hex("000102030405060708090a0b0c0d0e0f");
|
||||
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
|
||||
let derived = extk.derive(&s, 0).unwrap();
|
||||
let sec =
|
||||
from_hex("d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f");
|
||||
let sec = from_hex(
|
||||
"d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f",
|
||||
);
|
||||
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
|
||||
let chaincode =
|
||||
from_hex("243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52");
|
||||
let chaincode = from_hex(
|
||||
"243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52",
|
||||
);
|
||||
let fingerprint = from_hex("942b6c0b");
|
||||
let identifier = from_hex("8b011f14345f3f0071e85f6eec116de1e575ea10");
|
||||
let identifier_fingerprint = from_hex("8b011f14");
|
||||
let depth = 1;
|
||||
let n_child = 0;
|
||||
assert_eq!(derived.key, secret_key);
|
||||
assert_eq!(derived.identifier(), Identifier::from_bytes(identifier.as_slice()));
|
||||
assert_eq!(derived.fingerprint, Fingerprint::from_bytes(fingerprint.as_slice()));
|
||||
assert_eq!(
|
||||
derived.identifier(),
|
||||
Identifier::from_bytes(identifier.as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
derived.fingerprint,
|
||||
Fingerprint::from_bytes(fingerprint.as_slice())
|
||||
);
|
||||
assert_eq!(
|
||||
derived.identifier().fingerprint(),
|
||||
Fingerprint::from_bytes(identifier_fingerprint.as_slice())
|
||||
|
|
|
@ -27,9 +27,10 @@ pub fn show_info(config: &WalletConfig, ext_key: &ExtendedKey) {
|
|||
println!("Outputs - ");
|
||||
println!("fingerprint, n_child, height, lock_height, status, value");
|
||||
println!("----------------------------------");
|
||||
for out in &mut wallet_data.outputs
|
||||
.iter()
|
||||
.filter(|o| o.fingerprint == ext_key.fingerprint ) {
|
||||
for out in &mut wallet_data.outputs.iter().filter(|o| {
|
||||
o.fingerprint == ext_key.fingerprint
|
||||
})
|
||||
{
|
||||
let key = ext_key.derive(&secp, out.n_child).unwrap();
|
||||
|
||||
println!(
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
//! So we may as well have it in place already.
|
||||
|
||||
use std::convert::From;
|
||||
use secp::{self};
|
||||
use secp;
|
||||
use secp::key::SecretKey;
|
||||
|
||||
use core::core::{Block, Transaction, TxKernel, Output, build};
|
||||
|
@ -72,16 +72,15 @@ struct TxWrapper {
|
|||
pub fn receive_json_tx(
|
||||
config: &WalletConfig,
|
||||
ext_key: &ExtendedKey,
|
||||
partial_tx_str: &str
|
||||
partial_tx_str: &str,
|
||||
) -> Result<(), Error> {
|
||||
let (amount, blinding, partial_tx) = partial_tx_from_json(partial_tx_str)?;
|
||||
let final_tx = receive_transaction(&config, ext_key, amount, blinding, partial_tx)?;
|
||||
let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap());
|
||||
|
||||
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
|
||||
let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).map_err(|e| {
|
||||
Error::Node(e)
|
||||
})?;
|
||||
let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })
|
||||
.map_err(|e| Error::Node(e))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -102,7 +101,7 @@ impl ApiEndpoint for WalletReceiver {
|
|||
fn operations(&self) -> Vec<Operation> {
|
||||
vec![
|
||||
Operation::Custom("coinbase".to_string()),
|
||||
Operation::Custom("receive_json_tx".to_string())
|
||||
Operation::Custom("receive_json_tx".to_string()),
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -115,41 +114,50 @@ impl ApiEndpoint for WalletReceiver {
|
|||
if cb_amount.amount == 0 {
|
||||
return Err(api::Error::Argument(format!("Zero amount not allowed.")));
|
||||
}
|
||||
let (out, kern) =
|
||||
receive_coinbase(&self.config, &self.key, cb_amount.amount).map_err(|e| {
|
||||
api::Error::Internal(format!("Error building coinbase: {:?}", e))
|
||||
})?;
|
||||
let out_bin =
|
||||
ser::ser_vec(&out).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing output: {:?}", e))
|
||||
})?;
|
||||
let kern_bin =
|
||||
ser::ser_vec(&kern).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing kernel: {:?}", e))
|
||||
})?;
|
||||
let (out, kern) = receive_coinbase(
|
||||
&self.config,
|
||||
&self.key,
|
||||
cb_amount.amount,
|
||||
).map_err(|e| {
|
||||
api::Error::Internal(format!("Error building coinbase: {:?}", e))
|
||||
})?;
|
||||
let out_bin = ser::ser_vec(&out).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing output: {:?}", e))
|
||||
})?;
|
||||
let kern_bin = ser::ser_vec(&kern).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing kernel: {:?}", e))
|
||||
})?;
|
||||
Ok(CbData {
|
||||
output: util::to_hex(out_bin),
|
||||
kernel: util::to_hex(kern_bin),
|
||||
})
|
||||
}
|
||||
_ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))),
|
||||
_ => Err(api::Error::Argument(
|
||||
format!("Incorrect request data: {}", op),
|
||||
)),
|
||||
}
|
||||
}
|
||||
"receive_json_tx" => {
|
||||
match input {
|
||||
WalletReceiveRequest::PartialTransaction(partial_tx_str) => {
|
||||
debug!("Operation {} with transaction {}", op, &partial_tx_str);
|
||||
receive_json_tx(&self.config, &self.key, &partial_tx_str).map_err(|e| {
|
||||
api::Error::Internal(format!("Error processing partial transaction: {:?}", e))
|
||||
}).unwrap();
|
||||
receive_json_tx(&self.config, &self.key, &partial_tx_str)
|
||||
.map_err(|e| {
|
||||
api::Error::Internal(
|
||||
format!("Error processing partial transaction: {:?}", e),
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
//TODO: Return emptiness for now, should be a proper enum return type
|
||||
// TODO: Return emptiness for now, should be a proper enum return type
|
||||
Ok(CbData {
|
||||
output: String::from(""),
|
||||
kernel: String::from(""),
|
||||
})
|
||||
}
|
||||
_ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))),
|
||||
_ => Err(api::Error::Argument(
|
||||
format!("Incorrect request data: {}", op),
|
||||
)),
|
||||
}
|
||||
}
|
||||
_ => Err(api::Error::Argument(format!("Unknown operation: {}", op))),
|
||||
|
@ -158,7 +166,11 @@ impl ApiEndpoint for WalletReceiver {
|
|||
}
|
||||
|
||||
/// Build a coinbase output and the corresponding kernel
|
||||
fn receive_coinbase(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> Result<(Output, TxKernel), Error> {
|
||||
fn receive_coinbase(
|
||||
config: &WalletConfig,
|
||||
ext_key: &ExtendedKey,
|
||||
amount: u64,
|
||||
) -> Result<(Output, TxKernel), Error> {
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
|
||||
// operate within a lock on wallet data
|
||||
|
@ -177,20 +189,23 @@ fn receive_coinbase(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -
|
|||
height: 0,
|
||||
lock_height: 0,
|
||||
});
|
||||
debug!("Using child {} for a new coinbase output.",
|
||||
coinbase_key.n_child);
|
||||
debug!(
|
||||
"Using child {} for a new coinbase output.",
|
||||
coinbase_key.n_child
|
||||
);
|
||||
|
||||
Block::reward_output(coinbase_key.key, &secp).map_err(&From::from)
|
||||
})?
|
||||
}
|
||||
|
||||
/// Builds a full transaction from the partial one sent to us for transfer
|
||||
fn receive_transaction(config: &WalletConfig,
|
||||
ext_key: &ExtendedKey,
|
||||
amount: u64,
|
||||
blinding: SecretKey,
|
||||
partial: Transaction)
|
||||
-> Result<Transaction, Error> {
|
||||
fn receive_transaction(
|
||||
config: &WalletConfig,
|
||||
ext_key: &ExtendedKey,
|
||||
amount: u64,
|
||||
blinding: SecretKey,
|
||||
partial: Transaction,
|
||||
) -> Result<Transaction, Error> {
|
||||
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
|
||||
|
@ -200,9 +215,11 @@ fn receive_transaction(config: &WalletConfig,
|
|||
let next_child = wallet_data.next_child(&ext_key.fingerprint);
|
||||
let out_key = ext_key.derive(&secp, next_child).map_err(|e| Error::Key(e))?;
|
||||
|
||||
let (tx_final, _) = build::transaction(vec![build::initial_tx(partial),
|
||||
build::with_excess(blinding),
|
||||
build::output(amount, out_key.key)])?;
|
||||
let (tx_final, _) = build::transaction(vec![
|
||||
build::initial_tx(partial),
|
||||
build::with_excess(blinding),
|
||||
build::output(amount, out_key.key),
|
||||
])?;
|
||||
|
||||
// make sure the resulting transaction is valid (could have been lied to
|
||||
// on excess)
|
||||
|
@ -218,8 +235,10 @@ fn receive_transaction(config: &WalletConfig,
|
|||
lock_height: 0,
|
||||
});
|
||||
|
||||
debug!("Using child {} for a new transaction output.",
|
||||
out_key.n_child);
|
||||
debug!(
|
||||
"Using child {} for a new transaction output.",
|
||||
out_key.n_child
|
||||
);
|
||||
|
||||
Ok(tx_final)
|
||||
})?
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
use std::convert::From;
|
||||
use secp::{self};
|
||||
use secp;
|
||||
use secp::key::SecretKey;
|
||||
|
||||
use checker;
|
||||
|
@ -27,7 +27,12 @@ use api;
|
|||
/// wallet
|
||||
/// UTXOs. The destination can be "stdout" (for command line) or a URL to the
|
||||
/// recipients wallet receiver (to be implemented).
|
||||
pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64, dest: String) -> Result<(), Error> {
|
||||
pub fn issue_send_tx(
|
||||
config: &WalletConfig,
|
||||
ext_key: &ExtendedKey,
|
||||
amount: u64,
|
||||
dest: String,
|
||||
) -> Result<(), Error> {
|
||||
let _ = checker::refresh_outputs(&config, ext_key);
|
||||
|
||||
let (tx, blind_sum) = build_send_tx(config, ext_key, amount)?;
|
||||
|
@ -39,8 +44,10 @@ pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64,
|
|||
let url = format!("{}/v1/receive/receive_json_tx", &dest);
|
||||
debug!("Posting partial transaction to {}", url);
|
||||
let request = WalletReceiveRequest::PartialTransaction(json_tx);
|
||||
let _: CbData = api::client::post(url.as_str(), &request)
|
||||
.expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url));
|
||||
let _: CbData = api::client::post(url.as_str(), &request).expect(&format!(
|
||||
"Wallet receiver at {} unreachable, could not send transaction. Is it running?",
|
||||
url
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -48,7 +55,11 @@ pub fn issue_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64,
|
|||
/// Builds a transaction to send to someone from the HD seed associated with the
|
||||
/// wallet and the amount to send. Handles reading through the wallet data file,
|
||||
/// selecting outputs to spend and building the change.
|
||||
fn build_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> Result<(Transaction, SecretKey), Error> {
|
||||
fn build_send_tx(
|
||||
config: &WalletConfig,
|
||||
ext_key: &ExtendedKey,
|
||||
amount: u64,
|
||||
) -> Result<(Transaction, SecretKey), Error> {
|
||||
// first, rebuild the private key from the seed
|
||||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
|
||||
|
@ -66,7 +77,9 @@ fn build_send_tx(config: &WalletConfig, ext_key: &ExtendedKey, amount: u64) -> R
|
|||
// third, build inputs using the appropriate key
|
||||
let mut parts = vec![];
|
||||
for coin in &coins {
|
||||
let in_key = ext_key.derive(&secp, coin.n_child).map_err(|e| Error::Key(e))?;
|
||||
let in_key = ext_key.derive(&secp, coin.n_child).map_err(
|
||||
|e| Error::Key(e),
|
||||
)?;
|
||||
parts.push(build::input(coin.value, in_key.key));
|
||||
}
|
||||
|
||||
|
|
|
@ -79,14 +79,14 @@ impl From<api::Error> for Error {
|
|||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct WalletConfig {
|
||||
//Whether to run a wallet
|
||||
// Whether to run a wallet
|
||||
pub enable_wallet: bool,
|
||||
//The api address that this api server (i.e. this wallet) will run
|
||||
// The api address that this api server (i.e. this wallet) will run
|
||||
pub api_http_addr: String,
|
||||
//The api address of a running server node, against which transaction inputs will be checked
|
||||
//during send
|
||||
// The api address of a running server node, against which transaction inputs will be checked
|
||||
// during send
|
||||
pub check_node_api_http_addr: String,
|
||||
//The directory in which wallet files are stored
|
||||
// The directory in which wallet files are stored
|
||||
pub data_file_dir: String,
|
||||
}
|
||||
|
||||
|
@ -171,10 +171,11 @@ impl WalletData {
|
|||
/// Note that due to the impossibility to do an actual file lock easily
|
||||
/// across operating systems, this just creates a lock file with a "should
|
||||
/// not exist" option.
|
||||
pub fn with_wallet<T, F>(data_file_dir:&str, f: F) -> Result<T, Error>
|
||||
where F: FnOnce(&mut WalletData) -> T
|
||||
pub fn with_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error>
|
||||
where
|
||||
F: FnOnce(&mut WalletData) -> T,
|
||||
{
|
||||
//create directory if it doesn't exist
|
||||
// create directory if it doesn't exist
|
||||
fs::create_dir_all(data_file_dir).unwrap_or_else(|why| {
|
||||
info!("! {:?}", why.kind());
|
||||
});
|
||||
|
@ -191,16 +192,23 @@ impl WalletData {
|
|||
.create_new(true)
|
||||
.open(lock_file_path)
|
||||
.map_err(|_| {
|
||||
Error::WalletData(format!("Could not create wallet lock file. Either \
|
||||
some other process is using the wallet or there's a write access issue."))
|
||||
Error::WalletData(format!(
|
||||
"Could not create wallet lock file. Either \
|
||||
some other process is using the wallet or there's a write access issue."
|
||||
))
|
||||
});
|
||||
match result {
|
||||
Ok(_) => { break; },
|
||||
Ok(_) => {
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
if retries >= 3 {
|
||||
return Err(e);
|
||||
}
|
||||
debug!("failed to obtain wallet.lock, retries - {}, sleeping", retries);
|
||||
debug!(
|
||||
"failed to obtain wallet.lock, retries - {}, sleeping",
|
||||
retries
|
||||
);
|
||||
retries += 1;
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
}
|
||||
|
@ -215,16 +223,16 @@ impl WalletData {
|
|||
|
||||
// delete the lock file
|
||||
fs::remove_file(lock_file_path).map_err(|_| {
|
||||
Error::WalletData(
|
||||
format!("Could not remove wallet lock file. Maybe insufficient rights?")
|
||||
)
|
||||
Error::WalletData(format!(
|
||||
"Could not remove wallet lock file. Maybe insufficient rights?"
|
||||
))
|
||||
})?;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Read the wallet data or created a brand new one if it doesn't exist yet
|
||||
fn read_or_create(data_file_path:&str) -> Result<WalletData, Error> {
|
||||
fn read_or_create(data_file_path: &str) -> Result<WalletData, Error> {
|
||||
if Path::new(data_file_path).exists() {
|
||||
WalletData::read(data_file_path)
|
||||
} else {
|
||||
|
@ -234,7 +242,7 @@ impl WalletData {
|
|||
}
|
||||
|
||||
/// Read the wallet data from disk.
|
||||
fn read(data_file_path:&str) -> Result<WalletData, Error> {
|
||||
fn read(data_file_path: &str) -> Result<WalletData, Error> {
|
||||
let data_file = File::open(data_file_path).map_err(|e| {
|
||||
Error::WalletData(format!("Could not open {}: {}", data_file_path, e))
|
||||
})?;
|
||||
|
@ -244,7 +252,7 @@ impl WalletData {
|
|||
}
|
||||
|
||||
/// Write the wallet data to disk.
|
||||
fn write(&self, data_file_path:&str) -> Result<(), Error> {
|
||||
fn write(&self, data_file_path: &str) -> Result<(), Error> {
|
||||
let mut data_file = File::create(data_file_path).map_err(|e| {
|
||||
Error::WalletData(format!("Could not create {}: {}", data_file_path, e))
|
||||
})?;
|
||||
|
@ -262,11 +270,12 @@ impl WalletData {
|
|||
}
|
||||
|
||||
pub fn lock_output(&mut self, out: &OutputData) {
|
||||
if let Some(out_to_lock) = self.outputs.iter_mut().find(|out_to_lock| {
|
||||
out_to_lock.n_child == out.n_child &&
|
||||
out_to_lock.fingerprint == out.fingerprint &&
|
||||
out_to_lock.value == out.value
|
||||
}) {
|
||||
if let Some(out_to_lock) =
|
||||
self.outputs.iter_mut().find(|out_to_lock| {
|
||||
out_to_lock.n_child == out.n_child && out_to_lock.fingerprint == out.fingerprint &&
|
||||
out_to_lock.value == out.value
|
||||
})
|
||||
{
|
||||
out_to_lock.lock();
|
||||
}
|
||||
}
|
||||
|
@ -333,7 +342,9 @@ pub fn partial_tx_from_json(json_str: &str) -> Result<(u64, SecretKey, Transacti
|
|||
let blinding = SecretKey::from_slice(&secp, &blind_bin[..])?;
|
||||
let tx_bin = util::from_hex(partial_tx.tx)?;
|
||||
let tx = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
|
||||
Error::Format("Could not deserialize transaction, invalid format.".to_string())
|
||||
Error::Format(
|
||||
"Could not deserialize transaction, invalid format.".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
Ok((partial_tx.amount, blinding, tx))
|
||||
|
|
Loading…
Reference in a new issue