rustfmt all the things

This commit is contained in:
Ignotus Peverell 2018-03-04 00:19:54 +00:00
parent 19565aea3d
commit be8d9633e4
No known key found for this signature in database
GPG key ID: 99CD25F39F8F8211
83 changed files with 2981 additions and 2607 deletions

View file

@ -32,9 +32,8 @@ where
{ {
let client = hyper::Client::new(); let client = hyper::Client::new();
let res = check_error(client.get(url).send())?; let res = check_error(client.get(url).send())?;
serde_json::from_reader(res).map_err(|e| { serde_json::from_reader(res)
Error::Internal(format!("Server returned invalid JSON: {}", e)) .map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e)))
})
} }
/// Helper function to easily issue a HTTP POST request with the provided JSON /// Helper function to easily issue a HTTP POST request with the provided JSON
@ -45,9 +44,8 @@ pub fn post<'a, IN>(url: &'a str, input: &IN) -> Result<(), Error>
where where
IN: Serialize, IN: Serialize,
{ {
let in_json = serde_json::to_string(input).map_err(|e| { let in_json = serde_json::to_string(input)
Error::Internal(format!("Could not serialize data to JSON: {}", e)) .map_err(|e| Error::Internal(format!("Could not serialize data to JSON: {}", e)))?;
})?;
let client = hyper::Client::new(); let client = hyper::Client::new();
let _res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?; let _res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?;
Ok(()) Ok(())
@ -61,13 +59,17 @@ fn check_error(res: hyper::Result<Response>) -> Result<Response, Error> {
let mut response = res.unwrap(); let mut response = res.unwrap();
match response.status.class() { match response.status.class() {
StatusClass::Success => Ok(response), StatusClass::Success => Ok(response),
StatusClass::ServerError => { StatusClass::ServerError => Err(Error::Internal(format!(
Err(Error::Internal(format!("Server error: {}", err_msg(&mut response)))) "Server error: {}",
} err_msg(&mut response)
))),
StatusClass::ClientError => if response.status == StatusCode::NotFound { StatusClass::ClientError => if response.status == StatusCode::NotFound {
Err(Error::NotFound) Err(Error::NotFound)
} else { } else {
Err(Error::Argument(format!("Argument error: {}", err_msg(&mut response)))) Err(Error::Argument(format!(
"Argument error: {}",
err_msg(&mut response)
)))
}, },
_ => Err(Error::Internal(format!("Unrecognized error."))), _ => Err(Error::Internal(format!("Unrecognized error."))),
} }

View file

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
use std::io::Read; use std::io::Read;
use std::sync::{Arc, Weak, RwLock}; use std::sync::{Arc, RwLock, Weak};
use std::thread; use std::thread;
use iron::prelude::*; use iron::prelude::*;
@ -24,7 +24,7 @@ use serde::Serialize;
use serde_json; use serde_json;
use chain; use chain;
use core::core::{OutputIdentifier, Transaction, OutputFeatures}; use core::core::{OutputFeatures, OutputIdentifier, Transaction};
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::ser; use core::ser;
use pool; use pool;
@ -43,7 +43,6 @@ fn w<T>(weak: &Weak<T>) -> Arc<T> {
weak.upgrade().unwrap() weak.upgrade().unwrap()
} }
// RESTful index of available api endpoints // RESTful index of available api endpoints
// GET /v1/ // GET /v1/
struct IndexHandler { struct IndexHandler {
@ -74,15 +73,16 @@ impl UtxoHandler {
// We need the features here to be able to generate the necessary hash // We need the features here to be able to generate the necessary hash
// to compare against the hash in the output MMR. // to compare against the hash in the output MMR.
// For now we can just try both (but this probably needs to be part of the api params) // For now we can just try both (but this probably needs to be part of the api
// params)
let outputs = [ let outputs = [
OutputIdentifier::new(OutputFeatures::DEFAULT_OUTPUT, &commit), OutputIdentifier::new(OutputFeatures::DEFAULT_OUTPUT, &commit),
OutputIdentifier::new(OutputFeatures::COINBASE_OUTPUT, &commit) OutputIdentifier::new(OutputFeatures::COINBASE_OUTPUT, &commit),
]; ];
for x in outputs.iter() { for x in outputs.iter() {
if let Ok(_) = w(&self.chain).is_unspent(&x) { if let Ok(_) = w(&self.chain).is_unspent(&x) {
return Ok(Utxo::new(&commit)) return Ok(Utxo::new(&commit));
} }
} }
Err(Error::NotFound) Err(Error::NotFound)
@ -117,16 +117,12 @@ impl UtxoHandler {
commitments: Vec<Commitment>, commitments: Vec<Commitment>,
include_proof: bool, include_proof: bool,
) -> BlockOutputs { ) -> BlockOutputs {
let header = w(&self.chain) let header = w(&self.chain).get_header_by_height(block_height).unwrap();
.get_header_by_height(block_height)
.unwrap();
let block = w(&self.chain).get_block(&header.hash()).unwrap(); let block = w(&self.chain).get_block(&header.hash()).unwrap();
let outputs = block let outputs = block
.outputs .outputs
.iter() .iter()
.filter(|output| { .filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
commitments.is_empty() || commitments.contains(&output.commit)
})
.map(|output| { .map(|output| {
OutputPrintable::from_output(output, w(&self.chain), &block, include_proof) OutputPrintable::from_output(output, w(&self.chain), &block, include_proof)
}) })
@ -406,11 +402,7 @@ pub struct BlockHandler {
impl BlockHandler { impl BlockHandler {
fn get_block(&self, h: &Hash) -> Result<BlockPrintable, Error> { fn get_block(&self, h: &Hash) -> Result<BlockPrintable, Error> {
let block = w(&self.chain).get_block(h).map_err(|_| Error::NotFound)?; let block = w(&self.chain).get_block(h).map_err(|_| Error::NotFound)?;
Ok(BlockPrintable::from_block( Ok(BlockPrintable::from_block(&block, w(&self.chain), false))
&block,
w(&self.chain),
false,
))
} }
fn get_compact_block(&self, h: &Hash) -> Result<CompactBlockPrintable, Error> { fn get_compact_block(&self, h: &Hash) -> Result<CompactBlockPrintable, Error> {

View file

@ -14,15 +14,15 @@
extern crate grin_chain as chain; extern crate grin_chain as chain;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_pool as pool;
extern crate grin_p2p as p2p; extern crate grin_p2p as p2p;
extern crate grin_pool as pool;
extern crate grin_store as store; extern crate grin_store as store;
extern crate grin_util as util; extern crate grin_util as util;
extern crate hyper; extern crate hyper;
extern crate iron;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
extern crate iron;
extern crate mount; extern crate mount;
extern crate regex; extern crate regex;
#[macro_use] #[macro_use]

View file

@ -163,7 +163,9 @@ pub struct Utxo {
impl Utxo { impl Utxo {
pub fn new(commit: &pedersen::Commitment) -> Utxo { pub fn new(commit: &pedersen::Commitment) -> Utxo {
Utxo { commit: PrintableCommitment(commit.clone()) } Utxo {
commit: PrintableCommitment(commit.clone()),
}
} }
} }
@ -182,15 +184,19 @@ impl PrintableCommitment {
} }
impl serde::ser::Serialize for PrintableCommitment { impl serde::ser::Serialize for PrintableCommitment {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
S: serde::ser::Serializer { where
S: serde::ser::Serializer,
{
serializer.serialize_str(&util::to_hex(self.to_vec())) serializer.serialize_str(&util::to_hex(self.to_vec()))
} }
} }
impl<'de> serde::de::Deserialize<'de> for PrintableCommitment { impl<'de> serde::de::Deserialize<'de> for PrintableCommitment {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
D: serde::de::Deserializer<'de> { where
D: serde::de::Deserializer<'de>,
{
deserializer.deserialize_str(PrintableCommitmentVisitor) deserializer.deserialize_str(PrintableCommitmentVisitor)
} }
} }
@ -204,9 +210,13 @@ impl<'de> serde::de::Visitor<'de> for PrintableCommitmentVisitor {
formatter.write_str("a Pedersen commitment") formatter.write_str("a Pedersen commitment")
} }
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
E: serde::de::Error, { where
Ok(PrintableCommitment(pedersen::Commitment::from_vec(util::from_hex(String::from(v)).unwrap()))) E: serde::de::Error,
{
Ok(PrintableCommitment(pedersen::Commitment::from_vec(
util::from_hex(String::from(v)).unwrap(),
)))
} }
} }
@ -237,12 +247,14 @@ impl OutputPrintable {
block: &core::Block, block: &core::Block,
include_proof: bool, include_proof: bool,
) -> OutputPrintable { ) -> OutputPrintable {
let output_type = let output_type = if output
if output.features.contains(core::transaction::OutputFeatures::COINBASE_OUTPUT) { .features
OutputType::Coinbase .contains(core::transaction::OutputFeatures::COINBASE_OUTPUT)
} else { {
OutputType::Transaction OutputType::Coinbase
}; } else {
OutputType::Transaction
};
let out_id = core::OutputIdentifier::from_output(&output); let out_id = core::OutputIdentifier::from_output(&output);
let spent = chain.is_unspent(&out_id).is_err(); let spent = chain.is_unspent(&out_id).is_err();
@ -253,13 +265,14 @@ impl OutputPrintable {
None None
}; };
// Get the Merkle proof for all unspent coinbase outputs (to verify maturity on spend). // Get the Merkle proof for all unspent coinbase outputs (to verify maturity on
// We obtain the Merkle proof by rewinding the PMMR. // spend). We obtain the Merkle proof by rewinding the PMMR.
// We require the rewind() to be stable even after the PMMR is pruned and compacted // We require the rewind() to be stable even after the PMMR is pruned and
// so we can still recreate the necessary proof. // compacted so we can still recreate the necessary proof.
let mut merkle_proof = None; let mut merkle_proof = None;
if output.features.contains(core::transaction::OutputFeatures::COINBASE_OUTPUT) if output
&& !spent .features
.contains(core::transaction::OutputFeatures::COINBASE_OUTPUT) && !spent
{ {
merkle_proof = chain.get_merkle_proof(&out_id, &block).ok() merkle_proof = chain.get_merkle_proof(&out_id, &block).ok()
}; };
@ -285,13 +298,17 @@ impl OutputPrintable {
} }
pub fn range_proof(&self) -> Result<pedersen::RangeProof, ser::Error> { pub fn range_proof(&self) -> Result<pedersen::RangeProof, ser::Error> {
self.proof.clone().ok_or_else(|| ser::Error::HexError(format!("output range_proof missing"))) self.proof
.clone()
.ok_or_else(|| ser::Error::HexError(format!("output range_proof missing")))
} }
} }
impl serde::ser::Serialize for OutputPrintable { impl serde::ser::Serialize for OutputPrintable {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
S: serde::ser::Serializer { where
S: serde::ser::Serializer,
{
let mut state = serializer.serialize_struct("OutputPrintable", 7)?; let mut state = serializer.serialize_struct("OutputPrintable", 7)?;
state.serialize_field("output_type", &self.output_type)?; state.serialize_field("output_type", &self.output_type)?;
state.serialize_field("commit", &util::to_hex(self.commit.0.to_vec()))?; state.serialize_field("commit", &util::to_hex(self.commit.0.to_vec()))?;
@ -308,8 +325,10 @@ impl serde::ser::Serialize for OutputPrintable {
} }
impl<'de> serde::de::Deserialize<'de> for OutputPrintable { impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
D: serde::de::Deserializer<'de> { where
D: serde::de::Deserializer<'de>,
{
#[derive(Deserialize)] #[derive(Deserialize)]
#[serde(field_identifier, rename_all = "snake_case")] #[serde(field_identifier, rename_all = "snake_case")]
enum Field { enum Field {
@ -319,7 +338,7 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
Spent, Spent,
Proof, Proof,
ProofHash, ProofHash,
MerkleProof MerkleProof,
} }
struct OutputPrintableVisitor; struct OutputPrintableVisitor;
@ -331,8 +350,10 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
formatter.write_str("a print able Output") formatter.write_str("a print able Output")
} }
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
A: MapAccess<'de>, { where
A: MapAccess<'de>,
{
let mut output_type = None; let mut output_type = None;
let mut commit = None; let mut commit = None;
let mut switch_commit_hash = None; let mut switch_commit_hash = None;
@ -346,15 +367,15 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
Field::OutputType => { Field::OutputType => {
no_dup!(output_type); no_dup!(output_type);
output_type = Some(map.next_value()?) output_type = Some(map.next_value()?)
}, }
Field::Commit => { Field::Commit => {
no_dup!(commit); no_dup!(commit);
let val: String = map.next_value()?; let val: String = map.next_value()?;
let vec = util::from_hex(val.clone()) let vec =
.map_err(serde::de::Error::custom)?; util::from_hex(val.clone()).map_err(serde::de::Error::custom)?;
commit = Some(pedersen::Commitment::from_vec(vec)); commit = Some(pedersen::Commitment::from_vec(vec));
}, }
Field::SwitchCommitHash => { Field::SwitchCommitHash => {
no_dup!(switch_commit_hash); no_dup!(switch_commit_hash);
@ -362,11 +383,11 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
let hash = core::SwitchCommitHash::from_hex(&val.clone()) let hash = core::SwitchCommitHash::from_hex(&val.clone())
.map_err(serde::de::Error::custom)?; .map_err(serde::de::Error::custom)?;
switch_commit_hash = Some(hash) switch_commit_hash = Some(hash)
}, }
Field::Spent => { Field::Spent => {
no_dup!(spent); no_dup!(spent);
spent = Some(map.next_value()?) spent = Some(map.next_value()?)
}, }
Field::Proof => { Field::Proof => {
no_dup!(proof); no_dup!(proof);
@ -380,13 +401,16 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
bytes[i] = vec[i]; bytes[i] = vec[i];
} }
proof = Some(pedersen::RangeProof { proof: bytes, plen: vec.len() }) proof = Some(pedersen::RangeProof {
proof: bytes,
plen: vec.len(),
})
} }
}, }
Field::ProofHash => { Field::ProofHash => {
no_dup!(proof_hash); no_dup!(proof_hash);
proof_hash = Some(map.next_value()?) proof_hash = Some(map.next_value()?)
}, }
Field::MerkleProof => { Field::MerkleProof => {
no_dup!(merkle_proof); no_dup!(merkle_proof);
if let Some(hex) = map.next_value::<Option<String>>()? { if let Some(hex) = map.next_value::<Option<String>>()? {
@ -412,7 +436,14 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
} }
} }
const FIELDS: &'static [&'static str] = &["output_type", "commit", "switch_commit_hash", "spent", "proof", "proof_hash"]; const FIELDS: &'static [&'static str] = &[
"output_type",
"commit",
"switch_commit_hash",
"spent",
"proof",
"proof_hash",
];
deserializer.deserialize_struct("OutputPrintable", FIELDS, OutputPrintableVisitor) deserializer.deserialize_struct("OutputPrintable", FIELDS, OutputPrintableVisitor)
} }
} }
@ -523,14 +554,17 @@ impl BlockPrintable {
chain: Arc<chain::Chain>, chain: Arc<chain::Chain>,
include_proof: bool, include_proof: bool,
) -> BlockPrintable { ) -> BlockPrintable {
let inputs = block.inputs let inputs = block
.inputs
.iter() .iter()
.map(|x| util::to_hex(x.commitment().0.to_vec())) .map(|x| util::to_hex(x.commitment().0.to_vec()))
.collect(); .collect();
let outputs = block let outputs = block
.outputs .outputs
.iter() .iter()
.map(|output| OutputPrintable::from_output(output, chain.clone(), &block, include_proof)) .map(|output| {
OutputPrintable::from_output(output, chain.clone(), &block, include_proof)
})
.collect(); .collect();
let kernels = block let kernels = block
.kernels .kernels
@ -559,19 +593,18 @@ pub struct CompactBlockPrintable {
} }
impl CompactBlockPrintable { impl CompactBlockPrintable {
/// Convert a compact block into a printable representation suitable for api response /// Convert a compact block into a printable representation suitable for
/// api response
pub fn from_compact_block( pub fn from_compact_block(
cb: &core::CompactBlock, cb: &core::CompactBlock,
chain: Arc<chain::Chain>, chain: Arc<chain::Chain>,
) -> CompactBlockPrintable { ) -> CompactBlockPrintable {
let block = chain.get_block(&cb.hash()).unwrap(); let block = chain.get_block(&cb.hash()).unwrap();
let out_full = cb let out_full = cb.out_full
.out_full
.iter() .iter()
.map(|x| OutputPrintable::from_output(x, chain.clone(), &block, false)) .map(|x| OutputPrintable::from_output(x, chain.clone(), &block, false))
.collect(); .collect();
let kern_full = cb let kern_full = cb.kern_full
.kern_full
.iter() .iter()
.map(|x| TxKernelPrintable::from_txkernel(x)) .map(|x| TxKernelPrintable::from_txkernel(x))
.collect(); .collect();
@ -611,15 +644,16 @@ mod test {
#[test] #[test]
fn serialize_output() { fn serialize_output() {
let hex_output = "{\ let hex_output =
\"output_type\":\"Coinbase\",\ "{\
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\ \"output_type\":\"Coinbase\",\
\"switch_commit_hash\":\"85daaf11011dc11e52af84ebe78e2f2d19cbdc76000000000000000000000000\",\ \"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
\"spent\":false,\ \"switch_commit_hash\":\"85daaf11011dc11e52af84ebe78e2f2d19cbdc76000000000000000000000000\",\
\"proof\":null,\ \"spent\":false,\
\"proof_hash\":\"ed6ba96009b86173bade6a9227ed60422916593fa32dd6d78b25b7a4eeef4946\",\ \"proof\":null,\
\"merkle_proof\":null\ \"proof_hash\":\"ed6ba96009b86173bade6a9227ed60422916593fa32dd6d78b25b7a4eeef4946\",\
}"; \"merkle_proof\":null\
}";
let deserialized: OutputPrintable = serde_json::from_str(&hex_output).unwrap(); let deserialized: OutputPrintable = serde_json::from_str(&hex_output).unwrap();
let serialized = serde_json::to_string(&deserialized).unwrap(); let serialized = serde_json::to_string(&deserialized).unwrap();
assert_eq!(serialized, hex_output); assert_eq!(serialized, hex_output);
@ -627,7 +661,8 @@ mod test {
#[test] #[test]
fn serialize_utxo() { fn serialize_utxo() {
let hex_commit = "{\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\"}"; let hex_commit =
"{\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\"}";
let deserialized: Utxo = serde_json::from_str(&hex_commit).unwrap(); let deserialized: Utxo = serde_json::from_str(&hex_commit).unwrap();
let serialized = serde_json::to_string(&deserialized).unwrap(); let serialized = serde_json::to_string(&deserialized).unwrap();
assert_eq!(serialized, hex_commit); assert_eq!(serialized, hex_commit);

View file

@ -20,7 +20,8 @@ use std::fs::File;
use std::sync::{Arc, Mutex, RwLock}; use std::sync::{Arc, Mutex, RwLock};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use core::core::{Block, BlockHeader, Input, OutputFeatures, OutputIdentifier, OutputStoreable, TxKernel}; use core::core::{Block, BlockHeader, Input, OutputFeatures, OutputIdentifier, OutputStoreable,
TxKernel};
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::core::pmmr::MerkleProof; use core::core::pmmr::MerkleProof;
use core::core::target::Difficulty; use core::core::target::Difficulty;
@ -33,7 +34,6 @@ use types::*;
use util::secp::pedersen::RangeProof; use util::secp::pedersen::RangeProof;
use util::LOGGER; use util::LOGGER;
const MAX_ORPHAN_AGE_SECS: u64 = 30; const MAX_ORPHAN_AGE_SECS: u64 = 30;
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
@ -75,7 +75,9 @@ impl OrphanBlockPool {
{ {
let mut orphans = self.orphans.write().unwrap(); let mut orphans = self.orphans.write().unwrap();
let mut prev_idx = self.prev_idx.write().unwrap(); let mut prev_idx = self.prev_idx.write().unwrap();
orphans.retain(|_, ref mut x| x.added.elapsed() < Duration::from_secs(MAX_ORPHAN_AGE_SECS)); orphans.retain(|_, ref mut x| {
x.added.elapsed() < Duration::from_secs(MAX_ORPHAN_AGE_SECS)
});
prev_idx.retain(|_, &mut x| orphans.contains_key(&x)); prev_idx.retain(|_, &mut x| orphans.contains_key(&x));
} }
} }
@ -155,9 +157,7 @@ impl Chain {
// check if we have a head in store, otherwise the genesis block is it // check if we have a head in store, otherwise the genesis block is it
let head = store.head(); let head = store.head();
let sumtree_md = match head { let sumtree_md = match head {
Ok(h) => { Ok(h) => Some(store.get_block_pmmr_file_metadata(&h.last_block_h)?),
Some(store.get_block_pmmr_file_metadata(&h.last_block_h)?)
},
Err(NotFoundErr) => None, Err(NotFoundErr) => None,
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())), Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
}; };
@ -172,9 +172,7 @@ impl Chain {
store.save_block(&genesis)?; store.save_block(&genesis)?;
store.setup_height(&genesis.header, &tip)?; store.setup_height(&genesis.header, &tip)?;
if genesis.kernels.len() > 0 { if genesis.kernels.len() > 0 {
sumtree::extending(&mut sumtrees, |extension| { sumtree::extending(&mut sumtrees, |extension| extension.apply_block(&genesis))?;
extension.apply_block(&genesis)
})?;
} }
// saving a new tip based on genesis // saving a new tip based on genesis
@ -211,30 +209,32 @@ impl Chain {
/// Processes a single block, then checks for orphans, processing /// Processes a single block, then checks for orphans, processing
/// those as well if they're found /// those as well if they're found
pub fn process_block(&self, b: Block, opts: Options) pub fn process_block(
-> Result<(Option<Tip>, Option<Block>), Error> &self,
{ b: Block,
let res = self.process_block_no_orphans(b, opts); opts: Options,
match res { ) -> Result<(Option<Tip>, Option<Block>), Error> {
Ok((t, b)) => { let res = self.process_block_no_orphans(b, opts);
// We accepted a block, so see if we can accept any orphans match res {
if let Some(ref b) = b { Ok((t, b)) => {
self.check_orphans(b.hash()); // We accepted a block, so see if we can accept any orphans
} if let Some(ref b) = b {
Ok((t, b)) self.check_orphans(b.hash());
},
Err(e) => {
Err(e)
} }
Ok((t, b))
} }
Err(e) => Err(e),
} }
}
/// Attempt to add a new block to the chain. Returns the new chain tip if it /// Attempt to add a new block to the chain. Returns the new chain tip if it
/// has been added to the longest chain, None if it's added to an (as of /// has been added to the longest chain, None if it's added to an (as of
/// now) orphan chain. /// now) orphan chain.
pub fn process_block_no_orphans(&self, b: Block, opts: Options) pub fn process_block_no_orphans(
-> Result<(Option<Tip>, Option<Block>), Error> &self,
{ b: Block,
opts: Options,
) -> Result<(Option<Tip>, Option<Block>), Error> {
let head = self.store let head = self.store
.head() .head()
.map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?; .map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?;
@ -258,7 +258,7 @@ impl Chain {
adapter.block_accepted(&b, opts); adapter.block_accepted(&b, opts);
} }
Ok((Some(tip.clone()), Some(b.clone()))) Ok((Some(tip.clone()), Some(b.clone())))
}, }
Ok(None) => { Ok(None) => {
// block got accepted but we did not extend the head // block got accepted but we did not extend the head
// so its on a fork (or is the start of a new fork) // so its on a fork (or is the start of a new fork)
@ -267,7 +267,8 @@ impl Chain {
// TODO - This opens us to an amplification attack on blocks // TODO - This opens us to an amplification attack on blocks
// mined at a low difficulty. We should suppress really old blocks // mined at a low difficulty. We should suppress really old blocks
// or less relevant blocks somehow. // or less relevant blocks somehow.
// We should also probably consider banning nodes that send us really old blocks. // We should also probably consider banning nodes that send us really old
// blocks.
// //
if !opts.contains(Options::SYNC) { if !opts.contains(Options::SYNC) {
// broadcast the block // broadcast the block
@ -275,7 +276,7 @@ impl Chain {
adapter.block_accepted(&b, opts); adapter.block_accepted(&b, opts);
} }
Ok((None, Some(b.clone()))) Ok((None, Some(b.clone())))
}, }
Err(Error::Orphan) => { Err(Error::Orphan) => {
let block_hash = b.hash(); let block_hash = b.hash();
let orphan = Orphan { let orphan = Orphan {
@ -297,7 +298,7 @@ impl Chain {
self.orphans.len(), self.orphans.len(),
); );
Err(Error::Orphan) Err(Error::Orphan)
}, }
Err(Error::Unfit(ref msg)) => { Err(Error::Unfit(ref msg)) => {
debug!( debug!(
LOGGER, LOGGER,
@ -334,11 +335,7 @@ impl Chain {
/// Attempt to add a new header to the header chain. /// Attempt to add a new header to the header chain.
/// This is only ever used during sync and uses sync_head. /// This is only ever used during sync and uses sync_head.
pub fn sync_block_header( pub fn sync_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<Option<Tip>, Error> {
&self,
bh: &BlockHeader,
opts: Options,
) -> Result<Option<Tip>, Error> {
let sync_head = self.get_sync_head()?; let sync_head = self.get_sync_head()?;
let header_head = self.get_header_head()?; let header_head = self.get_header_head()?;
let sync_ctx = self.ctx_from_head(sync_head, opts); let sync_ctx = self.ctx_from_head(sync_head, opts);
@ -361,7 +358,6 @@ impl Chain {
self.orphans.contains(hash) self.orphans.contains(hash)
} }
/// Check for orphans, once a block is successfully added /// Check for orphans, once a block is successfully added
pub fn check_orphans(&self, mut last_block_hash: Hash) { pub fn check_orphans(&self, mut last_block_hash: Hash) {
debug!( debug!(
@ -384,10 +380,10 @@ impl Chain {
} else { } else {
break; break;
} }
}, }
Err(_) => { Err(_) => {
break; break;
}, }
}; };
} else { } else {
break; break;
@ -408,9 +404,7 @@ impl Chain {
pub fn validate(&self) -> Result<(), Error> { pub fn validate(&self) -> Result<(), Error> {
let header = self.store.head_header()?; let header = self.store.head_header()?;
let mut sumtrees = self.sumtrees.write().unwrap(); let mut sumtrees = self.sumtrees.write().unwrap();
sumtree::extending(&mut sumtrees, |extension| { sumtree::extending(&mut sumtrees, |extension| extension.validate(&header))
extension.validate(&header)
})
} }
/// Check if the input has matured sufficiently for the given block height. /// Check if the input has matured sufficiently for the given block height.
@ -466,13 +460,7 @@ impl Chain {
} }
/// Returns current sumtree roots /// Returns current sumtree roots
pub fn get_sumtree_roots( pub fn get_sumtree_roots(&self) -> (Hash, Hash, Hash) {
&self,
) -> (
Hash,
Hash,
Hash,
) {
let mut sumtrees = self.sumtrees.write().unwrap(); let mut sumtrees = self.sumtrees.write().unwrap();
sumtrees.roots() sumtrees.roots()
} }
@ -507,9 +495,8 @@ impl Chain {
h: Hash, h: Hash,
rewind_to_output: u64, rewind_to_output: u64,
rewind_to_kernel: u64, rewind_to_kernel: u64,
sumtree_data: File sumtree_data: File,
) -> Result<(), Error> { ) -> Result<(), Error> {
let head = self.head().unwrap(); let head = self.head().unwrap();
let header_head = self.get_header_head().unwrap(); let header_head = self.get_header_head().unwrap();
if header_head.height - head.height < global::cut_through_horizon() as u64 { if header_head.height - head.height < global::cut_through_horizon() as u64 {
@ -610,17 +597,17 @@ impl Chain {
/// Gets the block header at the provided height /// Gets the block header at the provided height
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> { pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
self.store.get_header_by_height(height).map_err(|e| { self.store
Error::StoreErr(e, "chain get header by height".to_owned()) .get_header_by_height(height)
}) .map_err(|e| Error::StoreErr(e, "chain get header by height".to_owned()))
} }
/// Verifies the given block header is actually on the current chain. /// Verifies the given block header is actually on the current chain.
/// Checks the header_by_height index to verify the header is where we say it is /// Checks the header_by_height index to verify the header is where we say it is
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> { pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
self.store.is_on_current_chain(header).map_err(|e| { self.store
Error::StoreErr(e, "chain is_on_current_chain".to_owned()) .is_on_current_chain(header)
}) .map_err(|e| Error::StoreErr(e, "chain is_on_current_chain".to_owned()))
} }
/// Get the tip of the current "sync" header chain. /// Get the tip of the current "sync" header chain.
@ -648,13 +635,18 @@ impl Chain {
/// Check whether we have a block without reading it /// Check whether we have a block without reading it
pub fn block_exists(&self, h: Hash) -> Result<bool, Error> { pub fn block_exists(&self, h: Hash) -> Result<bool, Error> {
self.store.block_exists(&h) self.store
.block_exists(&h)
.map_err(|e| Error::StoreErr(e, "chain block exists".to_owned())) .map_err(|e| Error::StoreErr(e, "chain block exists".to_owned()))
} }
/// Retrieve the file index metadata for a given block /// Retrieve the file index metadata for a given block
pub fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, Error> { pub fn get_block_pmmr_file_metadata(
self.store.get_block_pmmr_file_metadata(h) &self,
h: &Hash,
) -> Result<PMMRFileMetadataCollection, Error> {
self.store
.get_block_pmmr_file_metadata(h)
.map_err(|e| Error::StoreErr(e, "retrieve block pmmr metadata".to_owned())) .map_err(|e| Error::StoreErr(e, "retrieve block pmmr metadata".to_owned()))
} }
} }

View file

@ -64,25 +64,27 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
validate_header(&b.header, &mut ctx)?; validate_header(&b.header, &mut ctx)?;
// valid header, now check we actually have the previous block in the store // valid header, now check we actually have the previous block in the store
// not just the header but the block itself // not just the header but the block itself
// short circuit the test first both for performance (in-mem vs db access) // short circuit the test first both for performance (in-mem vs db access)
// but also for the specific case of the first fast sync full block // but also for the specific case of the first fast sync full block
if b.header.previous != ctx.head.last_block_h { if b.header.previous != ctx.head.last_block_h {
// we cannot assume we can use the chain head for this as we may be dealing with a fork // we cannot assume we can use the chain head for this as we may be dealing
// we cannot use heights here as the fork may have jumped in height // with a fork we cannot use heights here as the fork may have jumped in
// height
match ctx.store.block_exists(&b.header.previous) { match ctx.store.block_exists(&b.header.previous) {
Ok(true) => {}, Ok(true) => {}
Ok(false) => { Ok(false) => {
return Err(Error::Orphan); return Err(Error::Orphan);
}, }
Err(e) => { Err(e) => {
return Err(Error::StoreErr(e, "pipe get previous".to_owned())); return Err(Error::StoreErr(e, "pipe get previous".to_owned()));
} }
} }
} }
// valid header and we have a previous block, time to take the lock on the sum trees // valid header and we have a previous block, time to take the lock on the sum
// trees
let local_sumtrees = ctx.sumtrees.clone(); let local_sumtrees = ctx.sumtrees.clone();
let mut sumtrees = local_sumtrees.write().unwrap(); let mut sumtrees = local_sumtrees.write().unwrap();
@ -112,15 +114,19 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
match result { match result {
Ok(t) => { Ok(t) => {
save_pmmr_metadata(&Tip::from_block(&b.header), &sumtrees, ctx.store.clone())?; save_pmmr_metadata(&Tip::from_block(&b.header), &sumtrees, ctx.store.clone())?;
Ok(t) Ok(t)
}, }
Err(e) => Err(e), Err(e) => Err(e),
} }
} }
/// Save pmmr index location for a given block /// Save pmmr index location for a given block
pub fn save_pmmr_metadata(t: &Tip, sumtrees: &sumtree::SumTrees, store: Arc<ChainStore>) -> Result<(), Error> { pub fn save_pmmr_metadata(
t: &Tip,
sumtrees: &sumtree::SumTrees,
store: Arc<ChainStore>,
) -> Result<(), Error> {
// Save pmmr file metadata for this block // Save pmmr file metadata for this block
let block_file_md = sumtrees.last_file_metadata(); let block_file_md = sumtrees.last_file_metadata();
store store
@ -136,7 +142,12 @@ pub fn sync_block_header(
mut sync_ctx: BlockContext, mut sync_ctx: BlockContext,
mut header_ctx: BlockContext, mut header_ctx: BlockContext,
) -> Result<Option<Tip>, Error> { ) -> Result<Option<Tip>, Error> {
debug!(LOGGER, "pipe: sync_block_header: {} at {}", bh.hash(), bh.height); debug!(
LOGGER,
"pipe: sync_block_header: {} at {}",
bh.hash(),
bh.height
);
validate_header(&bh, &mut sync_ctx)?; validate_header(&bh, &mut sync_ctx)?;
add_block_header(bh, &mut sync_ctx)?; add_block_header(bh, &mut sync_ctx)?;
@ -146,17 +157,20 @@ pub fn sync_block_header(
// just taking the shared lock // just taking the shared lock
let _ = header_ctx.sumtrees.write().unwrap(); let _ = header_ctx.sumtrees.write().unwrap();
// now update the header_head (if new header with most work) and the sync_head (always) // now update the header_head (if new header with most work) and the sync_head
// (always)
update_header_head(bh, &mut header_ctx)?; update_header_head(bh, &mut header_ctx)?;
update_sync_head(bh, &mut sync_ctx) update_sync_head(bh, &mut sync_ctx)
} }
/// Process block header as part of "header first" block propagation. /// Process block header as part of "header first" block propagation.
pub fn process_block_header( pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
bh: &BlockHeader, debug!(
mut ctx: BlockContext, LOGGER,
) -> Result<Option<Tip>, Error> { "pipe: process_block_header: {} at {}",
debug!(LOGGER, "pipe: process_block_header: {} at {}", bh.hash(), bh.height); bh.hash(),
bh.height
);
check_header_known(bh.hash(), &mut ctx)?; check_header_known(bh.hash(), &mut ctx)?;
validate_header(&bh, &mut ctx)?; validate_header(&bh, &mut ctx)?;
@ -214,13 +228,11 @@ fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
/// arranged by order of cost to have as little DoS surface as possible. /// arranged by order of cost to have as little DoS surface as possible.
/// TODO require only the block header (with length information) /// TODO require only the block header (with length information)
fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> { fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
// check version, enforces scheduled hard fork // check version, enforces scheduled hard fork
if !consensus::valid_header_version(header.height, header.version) { if !consensus::valid_header_version(header.height, header.version) {
error!( error!(
LOGGER, LOGGER,
"Invalid block header version received ({}), maybe update Grin?", "Invalid block header version received ({}), maybe update Grin?", header.version
header.version
); );
return Err(Error::InvalidBlockVersion(header.version)); return Err(Error::InvalidBlockVersion(header.version));
} }
@ -236,11 +248,17 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
if !ctx.opts.contains(Options::SKIP_POW) { if !ctx.opts.contains(Options::SKIP_POW) {
let n = global::sizeshift() as u32; let n = global::sizeshift() as u32;
if !(ctx.pow_verifier)(header, n) { if !(ctx.pow_verifier)(header, n) {
error!(LOGGER, "pipe: validate_header failed for cuckoo shift size {}", n); error!(
LOGGER,
"pipe: validate_header failed for cuckoo shift size {}", n
);
return Err(Error::InvalidPow); return Err(Error::InvalidPow);
} }
if header.height % 500 == 0 { if header.height % 500 == 0 {
debug!(LOGGER, "Validating header validated, using cuckoo shift size {}", n); debug!(
LOGGER,
"Validating header validated, using cuckoo shift size {}", n
);
} }
} }
@ -248,9 +266,10 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
let prev = match ctx.store.get_block_header(&header.previous) { let prev = match ctx.store.get_block_header(&header.previous) {
Ok(prev) => Ok(prev), Ok(prev) => Ok(prev),
Err(grin_store::Error::NotFoundErr) => Err(Error::Orphan), Err(grin_store::Error::NotFoundErr) => Err(Error::Orphan),
Err(e) =>{ Err(e) => Err(Error::StoreErr(
Err(Error::StoreErr(e, format!("previous header {}", header.previous))) e,
} format!("previous header {}", header.previous),
)),
}?; }?;
if header.height != prev.height + 1 { if header.height != prev.height + 1 {
@ -312,7 +331,6 @@ fn validate_block(
ctx: &mut BlockContext, ctx: &mut BlockContext,
ext: &mut sumtree::Extension, ext: &mut sumtree::Extension,
) -> Result<(), Error> { ) -> Result<(), Error> {
// main isolated block validation, checks all commitment sums and sigs // main isolated block validation, checks all commitment sums and sigs
b.validate().map_err(&Error::InvalidBlockProof)?; b.validate().map_err(&Error::InvalidBlockProof)?;
@ -331,9 +349,7 @@ fn validate_block(
debug!( debug!(
LOGGER, LOGGER,
"validate_block: utxo roots - {:?}, {:?}", "validate_block: utxo roots - {:?}, {:?}", roots.utxo_root, b.header.utxo_root,
roots.utxo_root,
b.header.utxo_root,
); );
debug!( debug!(
LOGGER, LOGGER,
@ -343,9 +359,7 @@ fn validate_block(
); );
debug!( debug!(
LOGGER, LOGGER,
"validate_block: kernel roots - {:?}, {:?}", "validate_block: kernel roots - {:?}, {:?}", roots.kernel_root, b.header.kernel_root,
roots.kernel_root,
b.header.kernel_root,
); );
return Err(Error::InvalidRoot); return Err(Error::InvalidRoot);
@ -395,11 +409,21 @@ fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error>
} }
ctx.head = tip.clone(); ctx.head = tip.clone();
if b.header.height % 100 == 0 { if b.header.height % 100 == 0 {
info!(LOGGER, "pipe: chain head reached {} @ {} [{}]", info!(
b.header.height, b.header.difficulty, b.hash()); LOGGER,
"pipe: chain head reached {} @ {} [{}]",
b.header.height,
b.header.difficulty,
b.hash()
);
} else { } else {
debug!(LOGGER, "pipe: chain head reached {} @ {} [{}]", debug!(
b.header.height, b.header.difficulty, b.hash()); LOGGER,
"pipe: chain head reached {} @ {} [{}]",
b.header.height,
b.header.difficulty,
b.hash()
);
} }
Ok(Some(tip)) Ok(Some(tip))
} else { } else {
@ -415,9 +439,21 @@ fn update_sync_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<T
.map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?; .map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?;
ctx.head = tip.clone(); ctx.head = tip.clone();
if bh.height % 100 == 0 { if bh.height % 100 == 0 {
info!(LOGGER, "sync head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash()); info!(
LOGGER,
"sync head {} @ {} [{}]",
bh.total_difficulty,
bh.height,
bh.hash()
);
} else { } else {
debug!(LOGGER, "sync head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash()); debug!(
LOGGER,
"sync head {} @ {} [{}]",
bh.total_difficulty,
bh.height,
bh.hash()
);
} }
Ok(Some(tip)) Ok(Some(tip))
} }
@ -430,9 +466,21 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?; .map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
ctx.head = tip.clone(); ctx.head = tip.clone();
if bh.height % 100 == 0 { if bh.height % 100 == 0 {
info!(LOGGER, "header head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash()); info!(
LOGGER,
"header head {} @ {} [{}]",
bh.total_difficulty,
bh.height,
bh.hash()
);
} else { } else {
debug!(LOGGER, "header head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash()); debug!(
LOGGER,
"header head {} @ {} [{}]",
bh.total_difficulty,
bh.height,
bh.hash()
);
} }
Ok(Some(tip)) Ok(Some(tip))
} else { } else {
@ -449,7 +497,6 @@ pub fn rewind_and_apply_fork(
store: Arc<ChainStore>, store: Arc<ChainStore>,
ext: &mut sumtree::Extension, ext: &mut sumtree::Extension,
) -> Result<(), Error> { ) -> Result<(), Error> {
// extending a fork, first identify the block where forking occurred // extending a fork, first identify the block where forking occurred
// keeping the hashes of blocks along the fork // keeping the hashes of blocks along the fork
let mut current = b.header.previous; let mut current = b.header.previous;
@ -479,9 +526,9 @@ pub fn rewind_and_apply_fork(
// apply all forked blocks, including this new one // apply all forked blocks, including this new one
for h in hashes { for h in hashes {
let fb = store.get_block(&h).map_err(|e| { let fb = store
Error::StoreErr(e, format!("getting forked blocks")) .get_block(&h)
})?; .map_err(|e| Error::StoreErr(e, format!("getting forked blocks")))?;
ext.apply_block(&fb)?; ext.apply_block(&fb)?;
} }
Ok(()) Ok(())

View file

@ -105,7 +105,8 @@ impl ChainStore for ChainKVStore {
fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> { fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
option_to_not_found( option_to_not_found(
self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())), self.db
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
) )
} }
@ -113,10 +114,7 @@ impl ChainStore for ChainKVStore {
fn save_block(&self, b: &Block) -> Result<(), Error> { fn save_block(&self, b: &Block) -> Result<(), Error> {
let batch = self.db let batch = self.db
.batch() .batch()
.put_ser( .put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?
&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..],
b,
)?
.put_ser( .put_ser(
&to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..], &to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..],
&b.header, &b.header,
@ -187,14 +185,18 @@ impl ChainStore for ChainKVStore {
) )
} }
fn save_block_pmmr_file_metadata(&self, h:&Hash, md: &PMMRFileMetadataCollection) -> Result<(), Error> { fn save_block_pmmr_file_metadata(
&self,
h: &Hash,
md: &PMMRFileMetadataCollection,
) -> Result<(), Error> {
self.db.put_ser( self.db.put_ser(
&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..], &to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..],
&md, &md,
) )
} }
fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, Error>{ fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, Error> {
option_to_not_found( option_to_not_found(
self.db self.db
.get_ser(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())), .get_ser(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())),
@ -202,7 +204,8 @@ impl ChainStore for ChainKVStore {
} }
fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), Error> { fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), Error> {
self.db.delete(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..]) self.db
.delete(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..])
} }
/// Maintain consistency of the "header_by_height" index by traversing back /// Maintain consistency of the "header_by_height" index by traversing back
@ -212,9 +215,7 @@ impl ChainStore for ChainKVStore {
/// We need to handle the case where we have no index entry for a given /// We need to handle the case where we have no index entry for a given
/// height to account for the case where we just switched to a new fork and /// height to account for the case where we just switched to a new fork and
/// the height jumped beyond current chain height. /// the height jumped beyond current chain height.
fn setup_height(&self, header: &BlockHeader, old_tip: &Tip) fn setup_height(&self, header: &BlockHeader, old_tip: &Tip) -> Result<(), Error> {
-> Result<(), Error> {
// remove headers ahead if we backtracked // remove headers ahead if we backtracked
for n in header.height..old_tip.height { for n in header.height..old_tip.height {
self.delete_header_by_height(n)?; self.delete_header_by_height(n)?;
@ -229,8 +230,10 @@ impl ChainStore for ChainKVStore {
if let Ok(_) = self.is_on_current_chain(&prev_header) { if let Ok(_) = self.is_on_current_chain(&prev_header) {
break; break;
} }
self.db self.db.put_ser(
.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, prev_header.height), &prev_header)?; &u64_to_key(HEADER_HEIGHT_PREFIX, prev_header.height),
&prev_header,
)?;
prev_header = self.get_block_header(&prev_header.previous)?; prev_header = self.get_block_header(&prev_header.previous)?;
} }

View file

@ -23,19 +23,19 @@ use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use util::static_secp_instance; use util::static_secp_instance;
use util::secp::pedersen::{RangeProof, Commitment}; use util::secp::pedersen::{Commitment, RangeProof};
use core::consensus::reward; use core::consensus::reward;
use core::core::{Block, BlockHeader, Input, Output, OutputIdentifier, use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier,
OutputFeatures, OutputStoreable, TxKernel}; OutputStoreable, TxKernel};
use core::core::pmmr::{self, PMMR, MerkleProof}; use core::core::pmmr::{self, MerkleProof, PMMR};
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::ser::{self, PMMRable}; use core::ser::{self, PMMRable};
use grin_store; use grin_store;
use grin_store::pmmr::{PMMRBackend, PMMRFileMetadata}; use grin_store::pmmr::{PMMRBackend, PMMRFileMetadata};
use types::{ChainStore, SumTreeRoots, PMMRFileMetadataCollection, Error}; use types::{ChainStore, Error, PMMRFileMetadataCollection, SumTreeRoots};
use util::{LOGGER, zip}; use util::{zip, LOGGER};
const SUMTREES_SUBDIR: &'static str = "sumtrees"; const SUMTREES_SUBDIR: &'static str = "sumtrees";
const UTXO_SUBDIR: &'static str = "utxo"; const UTXO_SUBDIR: &'static str = "utxo";
@ -55,7 +55,11 @@ impl<T> PMMRHandle<T>
where where
T: PMMRable, T: PMMRable,
{ {
fn new(root_dir: String, file_name: &str, index_md: Option<PMMRFileMetadata>) -> Result<PMMRHandle<T>, Error> { fn new(
root_dir: String,
file_name: &str,
index_md: Option<PMMRFileMetadata>,
) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name); let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
fs::create_dir_all(path.clone())?; fs::create_dir_all(path.clone())?;
let be = PMMRBackend::new(path.to_str().unwrap().to_string(), index_md)?; let be = PMMRBackend::new(path.to_str().unwrap().to_string(), index_md)?;
@ -65,7 +69,7 @@ where
last_pos: sz, last_pos: sz,
}) })
} }
/// Return last written positions of hash file and data file /// Return last written positions of hash file and data file
pub fn last_file_positions(&self) -> PMMRFileMetadata { pub fn last_file_positions(&self) -> PMMRFileMetadata {
self.backend.last_file_positions() self.backend.last_file_positions()
@ -93,18 +97,21 @@ pub struct SumTrees {
impl SumTrees { impl SumTrees {
/// Open an existing or new set of backends for the SumTrees /// Open an existing or new set of backends for the SumTrees
pub fn open(root_dir: String, pub fn open(
root_dir: String,
commit_index: Arc<ChainStore>, commit_index: Arc<ChainStore>,
last_file_positions: Option<PMMRFileMetadataCollection> last_file_positions: Option<PMMRFileMetadataCollection>,
) -> Result<SumTrees, Error> { ) -> Result<SumTrees, Error> {
let utxo_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, UTXO_SUBDIR].iter().collect(); let utxo_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, UTXO_SUBDIR].iter().collect();
fs::create_dir_all(utxo_file_path.clone())?; fs::create_dir_all(utxo_file_path.clone())?;
let rproof_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, RANGE_PROOF_SUBDIR].iter().collect(); let rproof_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, RANGE_PROOF_SUBDIR]
.iter()
.collect();
fs::create_dir_all(rproof_file_path.clone())?; fs::create_dir_all(rproof_file_path.clone())?;
let kernel_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, KERNEL_SUBDIR].iter().collect(); let kernel_file_path: PathBuf =
[&root_dir, SUMTREES_SUBDIR, KERNEL_SUBDIR].iter().collect();
fs::create_dir_all(kernel_file_path.clone())?; fs::create_dir_all(kernel_file_path.clone())?;
let mut utxo_md = None; let mut utxo_md = None;
@ -131,10 +138,8 @@ impl SumTrees {
pub fn is_unspent(&mut self, output_id: &OutputIdentifier) -> Result<Hash, Error> { pub fn is_unspent(&mut self, output_id: &OutputIdentifier) -> Result<Hash, Error> {
match self.commit_index.get_output_pos(&output_id.commit) { match self.commit_index.get_output_pos(&output_id.commit) {
Ok(pos) => { Ok(pos) => {
let output_pmmr:PMMR<OutputStoreable, _> = PMMR::at( let output_pmmr: PMMR<OutputStoreable, _> =
&mut self.utxo_pmmr_h.backend, PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
self.utxo_pmmr_h.last_pos,
);
if let Some((hash, _)) = output_pmmr.get(pos, false) { if let Some((hash, _)) = output_pmmr.get(pos, false) {
if hash == output_id.hash() { if hash == output_id.hash() {
Ok(hash) Ok(hash)
@ -154,19 +159,22 @@ impl SumTrees {
/// nodes at level 0 /// nodes at level 0
/// TODO: These need to return the actual data from the flat-files instead of hashes now /// TODO: These need to return the actual data from the flat-files instead of hashes now
pub fn last_n_utxo(&mut self, distance: u64) -> Vec<(Hash, Option<OutputStoreable>)> { pub fn last_n_utxo(&mut self, distance: u64) -> Vec<(Hash, Option<OutputStoreable>)> {
let utxo_pmmr:PMMR<OutputStoreable, _> = PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos); let utxo_pmmr: PMMR<OutputStoreable, _> =
PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
utxo_pmmr.get_last_n_insertions(distance) utxo_pmmr.get_last_n_insertions(distance)
} }
/// as above, for range proofs /// as above, for range proofs
pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<(Hash, Option<RangeProof>)> { pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<(Hash, Option<RangeProof>)> {
let rproof_pmmr:PMMR<RangeProof, _> = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos); let rproof_pmmr: PMMR<RangeProof, _> =
PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
rproof_pmmr.get_last_n_insertions(distance) rproof_pmmr.get_last_n_insertions(distance)
} }
/// as above, for kernels /// as above, for kernels
pub fn last_n_kernel(&mut self, distance: u64) -> Vec<(Hash, Option<TxKernel>)> { pub fn last_n_kernel(&mut self, distance: u64) -> Vec<(Hash, Option<TxKernel>)> {
let kernel_pmmr:PMMR<TxKernel, _> = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); let kernel_pmmr: PMMR<TxKernel, _> =
PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
kernel_pmmr.get_last_n_insertions(distance) kernel_pmmr.get_last_n_insertions(distance)
} }
@ -180,22 +188,19 @@ impl SumTrees {
PMMRFileMetadataCollection::new( PMMRFileMetadataCollection::new(
self.utxo_pmmr_h.last_file_positions(), self.utxo_pmmr_h.last_file_positions(),
self.rproof_pmmr_h.last_file_positions(), self.rproof_pmmr_h.last_file_positions(),
self.kernel_pmmr_h.last_file_positions() self.kernel_pmmr_h.last_file_positions(),
) )
} }
/// Get sum tree roots /// Get sum tree roots
/// TODO: Return data instead of hashes /// TODO: Return data instead of hashes
pub fn roots( pub fn roots(&mut self) -> (Hash, Hash, Hash) {
&mut self, let output_pmmr: PMMR<OutputStoreable, _> =
) -> ( PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
Hash, let rproof_pmmr: PMMR<RangeProof, _> =
Hash, PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
Hash, let kernel_pmmr: PMMR<TxKernel, _> =
) { PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
let output_pmmr:PMMR<OutputStoreable, _> = PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
let rproof_pmmr:PMMR<RangeProof, _> = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
let kernel_pmmr:PMMR<TxKernel, _> = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
(output_pmmr.root(), rproof_pmmr.root(), kernel_pmmr.root()) (output_pmmr.root(), rproof_pmmr.root(), kernel_pmmr.root())
} }
} }
@ -273,16 +278,9 @@ pub struct Extension<'a> {
impl<'a> Extension<'a> { impl<'a> Extension<'a> {
// constructor // constructor
fn new( fn new(trees: &'a mut SumTrees, commit_index: Arc<ChainStore>) -> Extension<'a> {
trees: &'a mut SumTrees,
commit_index: Arc<ChainStore>,
) -> Extension<'a> {
Extension { Extension {
utxo_pmmr: PMMR::at( utxo_pmmr: PMMR::at(&mut trees.utxo_pmmr_h.backend, trees.utxo_pmmr_h.last_pos),
&mut trees.utxo_pmmr_h.backend,
trees.utxo_pmmr_h.last_pos,
),
rproof_pmmr: PMMR::at( rproof_pmmr: PMMR::at(
&mut trees.rproof_pmmr_h.backend, &mut trees.rproof_pmmr_h.backend,
trees.rproof_pmmr_h.last_pos, trees.rproof_pmmr_h.last_pos,
@ -302,7 +300,6 @@ impl<'a> Extension<'a> {
/// applied in order of the provided Vec. If pruning is enabled, inputs also /// applied in order of the provided Vec. If pruning is enabled, inputs also
/// prune MMR data. /// prune MMR data.
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> { pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
// first applying coinbase outputs. due to the construction of PMMRs the // first applying coinbase outputs. due to the construction of PMMRs the
// last element, when its a leaf, can never be pruned as it has no parent // last element, when its a leaf, can never be pruned as it has no parent
// yet and it will be needed to calculate that hash. to work around this, // yet and it will be needed to calculate that hash. to work around this,
@ -357,8 +354,9 @@ impl<'a> Extension<'a> {
// check hash from pmmr matches hash from input (or corresponding output) // check hash from pmmr matches hash from input (or corresponding output)
// if not then the input is not being honest about // if not then the input is not being honest about
// what it is attempting to spend... // what it is attempting to spend...
if output_id_hash != read_hash || if output_id_hash != read_hash
output_id_hash != read_elem.expect("no output at position").hash() { || output_id_hash != read_elem.expect("no output at position").hash()
{
return Err(Error::SumTreeErr(format!("output pmmr hash mismatch"))); return Err(Error::SumTreeErr(format!("output pmmr hash mismatch")));
} }
@ -370,7 +368,8 @@ impl<'a> Extension<'a> {
} }
// Now prune the utxo_pmmr, rproof_pmmr and their storage. // Now prune the utxo_pmmr, rproof_pmmr and their storage.
// Input is not valid if we cannot prune successfully (to spend an unspent output). // Input is not valid if we cannot prune successfully (to spend an unspent
// output).
match self.utxo_pmmr.prune(pos, height as u32) { match self.utxo_pmmr.prune(pos, height as u32) {
Ok(true) => { Ok(true) => {
self.rproof_pmmr self.rproof_pmmr
@ -420,7 +419,7 @@ impl<'a> Extension<'a> {
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> { fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
if let Ok(pos) = self.get_kernel_pos(&kernel.excess) { if let Ok(pos) = self.get_kernel_pos(&kernel.excess) {
// same as outputs // same as outputs
if let Some((h,_)) = self.kernel_pmmr.get(pos, false) { if let Some((h, _)) = self.kernel_pmmr.get(pos, false) {
if h == kernel.hash() { if h == kernel.hash() {
return Err(Error::DuplicateKernel(kernel.excess.clone())); return Err(Error::DuplicateKernel(kernel.excess.clone()));
} }
@ -446,7 +445,11 @@ impl<'a> Extension<'a> {
output: &OutputIdentifier, output: &OutputIdentifier,
block: &Block, block: &Block,
) -> Result<MerkleProof, Error> { ) -> Result<MerkleProof, Error> {
debug!(LOGGER, "sumtree: merkle_proof_via_rewind: rewinding to block {:?}", block.hash()); debug!(
LOGGER,
"sumtree: merkle_proof_via_rewind: rewinding to block {:?}",
block.hash()
);
// rewind to the specified block // rewind to the specified block
self.rewind(block)?; self.rewind(block)?;
// then calculate the Merkle Proof based on the known pos // then calculate the Merkle Proof based on the known pos
@ -476,11 +479,15 @@ impl<'a> Extension<'a> {
/// Rewinds the MMRs to the provided positions, given the output and /// Rewinds the MMRs to the provided positions, given the output and
/// kernel we want to rewind to. /// kernel we want to rewind to.
pub fn rewind_pos(&mut self, height: u64, out_pos_rew: u64, kern_pos_rew: u64) -> Result<(), Error> { pub fn rewind_pos(
debug!(LOGGER, &mut self,
"Rewind sumtrees to output pos: {}, kernel pos: {}", height: u64,
out_pos_rew, out_pos_rew: u64,
kern_pos_rew, kern_pos_rew: u64,
) -> Result<(), Error> {
debug!(
LOGGER,
"Rewind sumtrees to output pos: {}, kernel pos: {}", out_pos_rew, kern_pos_rew,
); );
self.utxo_pmmr self.utxo_pmmr
@ -514,9 +521,7 @@ impl<'a> Extension<'a> {
/// Current root hashes and sums (if applicable) for the UTXO, range proof /// Current root hashes and sums (if applicable) for the UTXO, range proof
/// and kernel sum trees. /// and kernel sum trees.
pub fn roots( pub fn roots(&self) -> SumTreeRoots {
&self,
) -> SumTreeRoots {
SumTreeRoots { SumTreeRoots {
utxo_root: self.utxo_pmmr.root(), utxo_root: self.utxo_pmmr.root(),
rproof_root: self.rproof_pmmr.root(), rproof_root: self.rproof_pmmr.root(),
@ -556,7 +561,9 @@ impl<'a> Extension<'a> {
let adjusted_sum_utxo = secp.commit_sum(vec![utxo_sum], vec![over_commit])?; let adjusted_sum_utxo = secp.commit_sum(vec![utxo_sum], vec![over_commit])?;
if adjusted_sum_utxo != kernel_sum { if adjusted_sum_utxo != kernel_sum {
return Err(Error::InvalidSumtree("Differing UTXO commitment and kernel excess sums.".to_owned())); return Err(Error::InvalidSumtree(
"Differing UTXO commitment and kernel excess sums.".to_owned(),
));
} }
} }
@ -567,11 +574,12 @@ impl<'a> Extension<'a> {
/// by iterating over the whole MMR data. This is a costly operation /// by iterating over the whole MMR data. This is a costly operation
/// performed only when we receive a full new chain state. /// performed only when we receive a full new chain state.
pub fn rebuild_index(&self) -> Result<(), Error> { pub fn rebuild_index(&self) -> Result<(), Error> {
for n in 1..self.utxo_pmmr.unpruned_size()+1 { for n in 1..self.utxo_pmmr.unpruned_size() + 1 {
// non-pruned leaves only // non-pruned leaves only
if pmmr::bintree_postorder_height(n) == 0 { if pmmr::bintree_postorder_height(n) == 0 {
if let Some((_, out)) = self.utxo_pmmr.get(n, true) { if let Some((_, out)) = self.utxo_pmmr.get(n, true) {
self.commit_index.save_output_pos(&out.expect("not a leaf node").commit, n)?; self.commit_index
.save_output_pos(&out.expect("not a leaf node").commit, n)?;
} }
} }
} }
@ -605,7 +613,8 @@ impl<'a> Extension<'a> {
) )
} }
/// Sums the excess of all our kernels, validating their signatures on the way /// Sums the excess of all our kernels, validating their signatures on the
/// way
fn sum_kernels(&self) -> Result<(Commitment, u64), Error> { fn sum_kernels(&self) -> Result<(Commitment, u64), Error> {
// make sure we have the right count of kernels using the MMR, the storage // make sure we have the right count of kernels using the MMR, the storage
// file may have a few more // file may have a few more
@ -644,9 +653,9 @@ impl<'a> Extension<'a> {
let mut sum_utxo = None; let mut sum_utxo = None;
let mut utxo_count = 0; let mut utxo_count = 0;
let secp = static_secp_instance(); let secp = static_secp_instance();
for n in 1..self.utxo_pmmr.unpruned_size()+1 { for n in 1..self.utxo_pmmr.unpruned_size() + 1 {
if pmmr::bintree_postorder_height(n) == 0 { if pmmr::bintree_postorder_height(n) == 0 {
if let Some((_,output)) = self.utxo_pmmr.get(n, true) { if let Some((_, output)) = self.utxo_pmmr.get(n, true) {
let out = output.expect("not a leaf node"); let out = output.expect("not a leaf node");
let commit = out.commit.clone(); let commit = out.commit.clone();
match self.rproof_pmmr.get(n, true) { match self.rproof_pmmr.get(n, true) {
@ -675,9 +684,10 @@ impl<'a> Extension<'a> {
/// and needs to be consistent with how we originally processed /// and needs to be consistent with how we originally processed
/// the outputs in apply_block() /// the outputs in apply_block()
fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Error> { fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Error> {
// If we have any regular outputs then the "last" output is the last regular output // If we have any regular outputs then the "last" output is the last regular
// otherwise it is the last coinbase output. // output otherwise it is the last coinbase output.
// This is because we process coinbase outputs before regular outputs in apply_block(). // This is because we process coinbase outputs before regular outputs in
// apply_block().
// //
// TODO - consider maintaining coinbase outputs in a separate vec in a block? // TODO - consider maintaining coinbase outputs in a separate vec in a block?
// //
@ -698,7 +708,7 @@ fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Er
} else if last_coinbase_output.is_some() { } else if last_coinbase_output.is_some() {
last_coinbase_output.unwrap() last_coinbase_output.unwrap()
} else { } else {
return Err(Error::Other("can't get index in an empty block".to_owned())) return Err(Error::Other("can't get index in an empty block".to_owned()));
}; };
let out_idx = commit_index let out_idx = commit_index
@ -706,10 +716,9 @@ fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Er
.map_err(|e| Error::StoreErr(e, format!("missing output pos for block")))?; .map_err(|e| Error::StoreErr(e, format!("missing output pos for block")))?;
let kern_idx = match block.kernels.last() { let kern_idx = match block.kernels.last() {
Some(kernel) => commit_index.get_kernel_pos(&kernel.excess) Some(kernel) => commit_index
.map_err(|e| { .get_kernel_pos(&kernel.excess)
Error::StoreErr(e, format!("missing kernel pos for block")) .map_err(|e| Error::StoreErr(e, format!("missing kernel pos for block")))?,
})?,
None => { None => {
return Err(Error::Other("can't get index in an empty block".to_owned())); return Err(Error::Other("can't get index in an empty block".to_owned()));
} }
@ -741,6 +750,5 @@ pub fn zip_write(root_dir: String, sumtree_data: File) -> Result<(), Error> {
let sumtrees_path = Path::new(&root_dir).join(SUMTREES_SUBDIR); let sumtrees_path = Path::new(&root_dir).join(SUMTREES_SUBDIR);
fs::create_dir_all(sumtrees_path.clone())?; fs::create_dir_all(sumtrees_path.clone())?;
zip::decompress(sumtree_data, &sumtrees_path) zip::decompress(sumtree_data, &sumtrees_path).map_err(|ze| Error::Other(ze.to_string()))
.map_err(|ze| Error::Other(ze.to_string()))
} }

View file

@ -20,10 +20,10 @@ use util::secp;
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
use grin_store as store; use grin_store as store;
use core::core::{Block, BlockHeader, block, transaction}; use core::core::{block, transaction, Block, BlockHeader};
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::core::target::Difficulty; use core::core::target::Difficulty;
use core::ser::{self, Readable, Writeable, Reader, Writer}; use core::ser::{self, Readable, Reader, Writeable, Writer};
use grin_store; use grin_store;
use grin_store::pmmr::PMMRFileMetadata; use grin_store::pmmr::PMMRFileMetadata;
@ -131,13 +131,13 @@ impl Error {
pub fn is_bad_data(&self) -> bool { pub fn is_bad_data(&self) -> bool {
// shorter to match on all the "not the block's fault" errors // shorter to match on all the "not the block's fault" errors
match *self { match *self {
Error::Unfit(_) | Error::Unfit(_)
Error::Orphan | | Error::Orphan
Error::StoreErr(_, _) | | Error::StoreErr(_, _)
Error::SerErr(_) | | Error::SerErr(_)
Error::SumTreeErr(_)| | Error::SumTreeErr(_)
Error::GenesisBlockRequired | | Error::GenesisBlockRequired
Error::Other(_) => false, | Error::Other(_) => false,
_ => true, _ => true,
} }
} }
@ -291,11 +291,19 @@ pub trait ChainStore: Send + Sync {
/// UTXO MMR. Used as an index for spending and pruning. /// UTXO MMR. Used as an index for spending and pruning.
fn get_kernel_pos(&self, commit: &Commitment) -> Result<u64, store::Error>; fn get_kernel_pos(&self, commit: &Commitment) -> Result<u64, store::Error>;
/// Saves information about the last written PMMR file positions for each committed block /// Saves information about the last written PMMR file positions for each
fn save_block_pmmr_file_metadata(&self, h: &Hash, md: &PMMRFileMetadataCollection) -> Result<(), store::Error>; /// committed block
fn save_block_pmmr_file_metadata(
&self,
h: &Hash,
md: &PMMRFileMetadataCollection,
) -> Result<(), store::Error>;
/// Retrieves stored pmmr file metadata information for a given block /// Retrieves stored pmmr file metadata information for a given block
fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, store::Error>; fn get_block_pmmr_file_metadata(
&self,
h: &Hash,
) -> Result<PMMRFileMetadataCollection, store::Error>;
/// Delete stored pmmr file metadata information for a given block /// Delete stored pmmr file metadata information for a given block
fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), store::Error>; fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), store::Error>;
@ -306,7 +314,8 @@ pub trait ChainStore: Send + Sync {
fn setup_height(&self, bh: &BlockHeader, old_tip: &Tip) -> Result<(), store::Error>; fn setup_height(&self, bh: &BlockHeader, old_tip: &Tip) -> Result<(), store::Error>;
} }
/// Single serializable struct to hold metadata about all PMMR file position for a given block /// Single serializable struct to hold metadata about all PMMR file position
/// for a given block
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct PMMRFileMetadataCollection { pub struct PMMRFileMetadataCollection {
/// file metadata for the utxo file /// file metadata for the utxo file
@ -314,7 +323,7 @@ pub struct PMMRFileMetadataCollection {
/// file metadata for the rangeproof file /// file metadata for the rangeproof file
pub rproof_file_md: PMMRFileMetadata, pub rproof_file_md: PMMRFileMetadata,
/// file metadata for the kernel file /// file metadata for the kernel file
pub kernel_file_md: PMMRFileMetadata pub kernel_file_md: PMMRFileMetadata,
} }
impl Writeable for PMMRFileMetadataCollection { impl Writeable for PMMRFileMetadataCollection {
@ -329,9 +338,9 @@ impl Writeable for PMMRFileMetadataCollection {
impl Readable for PMMRFileMetadataCollection { impl Readable for PMMRFileMetadataCollection {
fn read(reader: &mut Reader) -> Result<PMMRFileMetadataCollection, ser::Error> { fn read(reader: &mut Reader) -> Result<PMMRFileMetadataCollection, ser::Error> {
Ok(PMMRFileMetadataCollection { Ok(PMMRFileMetadataCollection {
utxo_file_md : PMMRFileMetadata::read(reader)?, utxo_file_md: PMMRFileMetadata::read(reader)?,
rproof_file_md : PMMRFileMetadata::read(reader)?, rproof_file_md: PMMRFileMetadata::read(reader)?,
kernel_file_md : PMMRFileMetadata::read(reader)?, kernel_file_md: PMMRFileMetadata::read(reader)?,
}) })
} }
} }
@ -347,11 +356,13 @@ impl PMMRFileMetadataCollection {
} }
/// Helper to create a new collection /// Helper to create a new collection
pub fn new(utxo_md: PMMRFileMetadata, pub fn new(
utxo_md: PMMRFileMetadata,
rproof_md: PMMRFileMetadata, rproof_md: PMMRFileMetadata,
kernel_md: PMMRFileMetadata) -> PMMRFileMetadataCollection { kernel_md: PMMRFileMetadata,
PMMRFileMetadataCollection { ) -> PMMRFileMetadataCollection {
utxo_file_md : utxo_md, PMMRFileMetadataCollection {
utxo_file_md: utxo_md,
rproof_file_md: rproof_md, rproof_file_md: rproof_md,
kernel_file_md: kernel_md, kernel_file_md: kernel_md,
} }

View file

@ -88,13 +88,8 @@ fn data_files() {
let prev = chain.head_header().unwrap(); let prev = chain.head_header().unwrap();
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
let pk = keychain.derive_key_id(n as u32).unwrap(); let pk = keychain.derive_key_id(n as u32).unwrap();
let mut b = core::core::Block::new( let mut b =
&prev, core::core::Block::new(&prev, vec![], &keychain, &pk, difficulty.clone()).unwrap();
vec![],
&keychain,
&pk,
difficulty.clone(),
).unwrap();
b.header.timestamp = prev.timestamp + time::Duration::seconds(60); b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
b.header.difficulty = difficulty.clone(); // TODO: overwrite here? really? b.header.difficulty = difficulty.clone(); // TODO: overwrite here? really?
@ -109,17 +104,20 @@ fn data_files() {
let prev_bhash = b.header.previous; let prev_bhash = b.header.previous;
let bhash = b.hash(); let bhash = b.hash();
chain.process_block(b.clone(), chain::Options::MINE).unwrap(); chain
.process_block(b.clone(), chain::Options::MINE)
.unwrap();
let head = Tip::from_block(&b.header); let head = Tip::from_block(&b.header);
// Check we have indexes for the last block and the block previous // Check we have indexes for the last block and the block previous
let cur_pmmr_md = chain.get_block_pmmr_file_metadata(&head.last_block_h) let cur_pmmr_md = chain
.get_block_pmmr_file_metadata(&head.last_block_h)
.expect("block pmmr file data doesn't exist"); .expect("block pmmr file data doesn't exist");
let pref_pmmr_md = chain.get_block_pmmr_file_metadata(&head.prev_block_h) let pref_pmmr_md = chain
.get_block_pmmr_file_metadata(&head.prev_block_h)
.expect("previous block pmmr file data doesn't exist"); .expect("previous block pmmr file data doesn't exist");
println!("Cur_pmmr_md: {:?}", cur_pmmr_md); println!("Cur_pmmr_md: {:?}", cur_pmmr_md);
chain.validate().unwrap(); chain.validate().unwrap();
} }
@ -137,7 +135,13 @@ fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) ->
b b
} }
fn prepare_block_tx(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>) -> Block { fn prepare_block_tx(
kc: &Keychain,
prev: &BlockHeader,
chain: &Chain,
diff: u64,
txs: Vec<&Transaction>,
) -> Block {
let mut b = prepare_block_nosum(kc, prev, diff, txs); let mut b = prepare_block_nosum(kc, prev, diff, txs);
chain.set_sumtree_roots(&mut b, false).unwrap(); chain.set_sumtree_roots(&mut b, false).unwrap();
b b
@ -149,18 +153,29 @@ fn prepare_fork_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u6
b b
} }
fn prepare_fork_block_tx(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>) -> Block { fn prepare_fork_block_tx(
kc: &Keychain,
prev: &BlockHeader,
chain: &Chain,
diff: u64,
txs: Vec<&Transaction>,
) -> Block {
let mut b = prepare_block_nosum(kc, prev, diff, txs); let mut b = prepare_block_nosum(kc, prev, diff, txs);
chain.set_sumtree_roots(&mut b, true).unwrap(); chain.set_sumtree_roots(&mut b, true).unwrap();
b b
} }
fn prepare_block_nosum(kc: &Keychain, prev: &BlockHeader, diff: u64, txs: Vec<&Transaction>) -> Block { fn prepare_block_nosum(
kc: &Keychain,
prev: &BlockHeader,
diff: u64,
txs: Vec<&Transaction>,
) -> Block {
let key_id = kc.derive_key_id(diff as u32).unwrap(); let key_id = kc.derive_key_id(diff as u32).unwrap();
let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) { let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) {
Err(e) => panic!("{:?}",e), Err(e) => panic!("{:?}", e),
Ok(b) => b Ok(b) => b,
}; };
b.header.timestamp = prev.timestamp + time::Duration::seconds(60); b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
b.header.total_difficulty = Difficulty::from_num(diff); b.header.total_difficulty = Difficulty::from_num(diff);

View file

@ -26,7 +26,7 @@ use std::sync::Arc;
use chain::Chain; use chain::Chain;
use chain::types::*; use chain::types::*;
use core::core::{Block, BlockHeader, Transaction, OutputIdentifier, OutputFeatures, build}; use core::core::{build, Block, BlockHeader, OutputFeatures, OutputIdentifier, Transaction};
use core::core::hash::Hashed; use core::core::hash::Hashed;
use core::core::target::Difficulty; use core::core::target::Difficulty;
use core::consensus; use core::consensus;
@ -76,13 +76,8 @@ fn mine_empty_chain() {
let prev = chain.head_header().unwrap(); let prev = chain.head_header().unwrap();
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
let pk = keychain.derive_key_id(n as u32).unwrap(); let pk = keychain.derive_key_id(n as u32).unwrap();
let mut b = core::core::Block::new( let mut b =
&prev, core::core::Block::new(&prev, vec![], &keychain, &pk, difficulty.clone()).unwrap();
vec![],
&keychain,
&pk,
difficulty.clone(),
).unwrap();
b.header.timestamp = prev.timestamp + time::Duration::seconds(60); b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
b.header.difficulty = difficulty.clone(); // TODO: overwrite here? really? b.header.difficulty = difficulty.clone(); // TODO: overwrite here? really?
@ -181,11 +176,13 @@ fn mine_losing_fork() {
let bfork = prepare_block(&kc, &b1head, &chain, 3); let bfork = prepare_block(&kc, &b1head, &chain, 3);
// add higher difficulty first, prepare its successor, then fork // add higher difficulty first, prepare its successor, then fork
// with lower diff // with lower diff
chain.process_block(b2, chain::Options::SKIP_POW).unwrap(); chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
assert_eq!(chain.head_header().unwrap().hash(), b2head.hash()); assert_eq!(chain.head_header().unwrap().hash(), b2head.hash());
let b3 = prepare_block(&kc, &b2head, &chain, 5); let b3 = prepare_block(&kc, &b2head, &chain, 5);
chain.process_block(bfork, chain::Options::SKIP_POW).unwrap(); chain
.process_block(bfork, chain::Options::SKIP_POW)
.unwrap();
// adding the successor // adding the successor
let b3head = b3.header.clone(); let b3head = b3.header.clone();
@ -206,12 +203,14 @@ fn longer_fork() {
// for the forked chain // for the forked chain
let mut prev = chain.head_header().unwrap(); let mut prev = chain.head_header().unwrap();
for n in 0..10 { for n in 0..10 {
let b = prepare_block(&kc, &prev, &chain, 2*n + 2); let b = prepare_block(&kc, &prev, &chain, 2 * n + 2);
let bh = b.header.clone(); let bh = b.header.clone();
if n < 5 { if n < 5 {
let b_fork = b.clone(); let b_fork = b.clone();
chain_fork.process_block(b_fork, chain::Options::SKIP_POW).unwrap(); chain_fork
.process_block(b_fork, chain::Options::SKIP_POW)
.unwrap();
} }
chain.process_block(b, chain::Options::SKIP_POW).unwrap(); chain.process_block(b, chain::Options::SKIP_POW).unwrap();
@ -227,13 +226,15 @@ fn longer_fork() {
let mut prev_fork = head_fork.clone(); let mut prev_fork = head_fork.clone();
for n in 0..7 { for n in 0..7 {
let b_fork = prepare_block(&kc, &prev_fork, &chain_fork, 2*n + 11); let b_fork = prepare_block(&kc, &prev_fork, &chain_fork, 2 * n + 11);
let bh_fork = b_fork.header.clone(); let bh_fork = b_fork.header.clone();
let b = b_fork.clone(); let b = b_fork.clone();
chain.process_block(b, chain::Options::SKIP_POW).unwrap(); chain.process_block(b, chain::Options::SKIP_POW).unwrap();
chain_fork.process_block(b_fork, chain::Options::SKIP_POW).unwrap(); chain_fork
.process_block(b_fork, chain::Options::SKIP_POW)
.unwrap();
prev_fork = bh_fork; prev_fork = bh_fork;
} }
} }
@ -254,7 +255,9 @@ fn spend_in_fork() {
let out_id = OutputIdentifier::from_output(&b.outputs[0]); let out_id = OutputIdentifier::from_output(&b.outputs[0]);
assert!(out_id.features.contains(OutputFeatures::COINBASE_OUTPUT)); assert!(out_id.features.contains(OutputFeatures::COINBASE_OUTPUT));
fork_head = b.header.clone(); fork_head = b.header.clone();
chain.process_block(b.clone(), chain::Options::SKIP_POW).unwrap(); chain
.process_block(b.clone(), chain::Options::SKIP_POW)
.unwrap();
let merkle_proof = chain.get_merkle_proof(&out_id, &b).unwrap(); let merkle_proof = chain.get_merkle_proof(&out_id, &b).unwrap();
@ -290,7 +293,9 @@ fn spend_in_fork() {
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]); let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
let prev_main = next.header.clone(); let prev_main = next.header.clone();
chain.process_block(next.clone(), chain::Options::SKIP_POW).unwrap(); chain
.process_block(next.clone(), chain::Options::SKIP_POW)
.unwrap();
chain.validate().unwrap(); chain.validate().unwrap();
println!("tx 1 processed, should have 6 outputs or 396 bytes in file, first skipped"); println!("tx 1 processed, should have 6 outputs or 396 bytes in file, first skipped");
@ -310,7 +315,7 @@ fn spend_in_fork() {
chain.validate().unwrap(); chain.validate().unwrap();
println!("tx 2 processed"); println!("tx 2 processed");
/*panic!("Stop");*/ /* panic!("Stop"); */
// mine 2 forked blocks from the first // mine 2 forked blocks from the first
let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]); let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
@ -319,28 +324,48 @@ fn spend_in_fork() {
let fork_next = prepare_fork_block_tx(&kc, &prev_fork, &chain, 8, vec![&tx2]); let fork_next = prepare_fork_block_tx(&kc, &prev_fork, &chain, 8, vec![&tx2]);
let prev_fork = fork_next.header.clone(); let prev_fork = fork_next.header.clone();
chain.process_block(fork_next, chain::Options::SKIP_POW).unwrap(); chain
.process_block(fork_next, chain::Options::SKIP_POW)
.unwrap();
chain.validate().unwrap(); chain.validate().unwrap();
// check state // check state
let head = chain.head_header().unwrap(); let head = chain.head_header().unwrap();
assert_eq!(head.height, 6); assert_eq!(head.height, 6);
assert_eq!(head.hash(), prev_main.hash()); assert_eq!(head.hash(), prev_main.hash());
assert!(chain.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0])).is_ok()); assert!(
assert!(chain.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0])).is_err()); chain
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0]))
.is_ok()
);
assert!(
chain
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0]))
.is_err()
);
// make the fork win // make the fork win
let fork_next = prepare_fork_block(&kc, &prev_fork, &chain, 10); let fork_next = prepare_fork_block(&kc, &prev_fork, &chain, 10);
let prev_fork = fork_next.header.clone(); let prev_fork = fork_next.header.clone();
chain.process_block(fork_next, chain::Options::SKIP_POW).unwrap(); chain
.process_block(fork_next, chain::Options::SKIP_POW)
.unwrap();
chain.validate().unwrap(); chain.validate().unwrap();
// check state // check state
let head = chain.head_header().unwrap(); let head = chain.head_header().unwrap();
assert_eq!(head.height, 7); assert_eq!(head.height, 7);
assert_eq!(head.hash(), prev_fork.hash()); assert_eq!(head.hash(), prev_fork.hash());
assert!(chain.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0])).is_ok()); assert!(
assert!(chain.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0])).is_err()); chain
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0]))
.is_ok()
);
assert!(
chain
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0]))
.is_err()
);
} }
fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block { fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
@ -349,7 +374,13 @@ fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) ->
b b
} }
fn prepare_block_tx(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>) -> Block { fn prepare_block_tx(
kc: &Keychain,
prev: &BlockHeader,
chain: &Chain,
diff: u64,
txs: Vec<&Transaction>,
) -> Block {
let mut b = prepare_block_nosum(kc, prev, diff, txs); let mut b = prepare_block_nosum(kc, prev, diff, txs);
chain.set_sumtree_roots(&mut b, false).unwrap(); chain.set_sumtree_roots(&mut b, false).unwrap();
b b
@ -361,18 +392,29 @@ fn prepare_fork_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u6
b b
} }
fn prepare_fork_block_tx(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>) -> Block { fn prepare_fork_block_tx(
kc: &Keychain,
prev: &BlockHeader,
chain: &Chain,
diff: u64,
txs: Vec<&Transaction>,
) -> Block {
let mut b = prepare_block_nosum(kc, prev, diff, txs); let mut b = prepare_block_nosum(kc, prev, diff, txs);
chain.set_sumtree_roots(&mut b, true).unwrap(); chain.set_sumtree_roots(&mut b, true).unwrap();
b b
} }
fn prepare_block_nosum(kc: &Keychain, prev: &BlockHeader, diff: u64, txs: Vec<&Transaction>) -> Block { fn prepare_block_nosum(
kc: &Keychain,
prev: &BlockHeader,
diff: u64,
txs: Vec<&Transaction>,
) -> Block {
let key_id = kc.derive_key_id(diff as u32).unwrap(); let key_id = kc.derive_key_id(diff as u32).unwrap();
let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) { let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) {
Err(e) => panic!("{:?}",e), Err(e) => panic!("{:?}", e),
Ok(b) => b Ok(b) => b,
}; };
b.header.timestamp = prev.timestamp + time::Duration::seconds(60); b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
b.header.total_difficulty = Difficulty::from_num(diff); b.header.total_difficulty = Difficulty::from_num(diff);

View file

@ -46,20 +46,23 @@ fn test_various_store_indices() {
global::set_mining_mode(ChainTypes::AutomatedTesting); global::set_mining_mode(ChainTypes::AutomatedTesting);
let genesis = pow::mine_genesis_block(None).unwrap(); let genesis = pow::mine_genesis_block(None).unwrap();
chain_store.save_block(&genesis).unwrap(); chain_store.save_block(&genesis).unwrap();
chain_store.setup_height(&genesis.header, &Tip::new(genesis.hash())).unwrap(); chain_store
.setup_height(&genesis.header, &Tip::new(genesis.hash()))
.unwrap();
let block = Block::new( let block = Block::new(
&genesis.header, &genesis.header,
vec![], vec![],
&keychain, &keychain,
&key_id, &key_id,
Difficulty::one() Difficulty::one(),
).unwrap(); ).unwrap();
let block_hash = block.hash(); let block_hash = block.hash();
chain_store.save_block(&block).unwrap(); chain_store.save_block(&block).unwrap();
chain_store.setup_height(&block.header, chain_store
&Tip::from_block(&block.header)).unwrap(); .setup_height(&block.header, &Tip::from_block(&block.header))
.unwrap();
let block_header = chain_store.get_block_header(&block_hash).unwrap(); let block_header = chain_store.get_block_header(&block_hash).unwrap();
assert_eq!(block_header.hash(), block_hash); assert_eq!(block_header.hash(), block_hash);

View file

@ -76,13 +76,8 @@ fn test_coinbase_maturity() {
let key_id3 = keychain.derive_key_id(3).unwrap(); let key_id3 = keychain.derive_key_id(3).unwrap();
let key_id4 = keychain.derive_key_id(4).unwrap(); let key_id4 = keychain.derive_key_id(4).unwrap();
let mut block = core::core::Block::new( let mut block =
&prev, core::core::Block::new(&prev, vec![], &keychain, &key_id1, Difficulty::one()).unwrap();
vec![],
&keychain,
&key_id1,
Difficulty::one()
).unwrap();
block.header.timestamp = prev.timestamp + time::Duration::seconds(60); block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
@ -109,7 +104,9 @@ fn test_coinbase_maturity() {
// we will need this later when we want to spend the coinbase output // we will need this later when we want to spend the coinbase output
let block_hash = block.hash(); let block_hash = block.hash();
chain.process_block(block.clone(), chain::Options::MINE).unwrap(); chain
.process_block(block.clone(), chain::Options::MINE)
.unwrap();
let merkle_proof = chain.get_merkle_proof(&out_id, &block).unwrap(); let merkle_proof = chain.get_merkle_proof(&out_id, &block).unwrap();
@ -124,26 +121,20 @@ fn test_coinbase_maturity() {
// this is not a valid tx as the coinbase output cannot be spent yet // this is not a valid tx as the coinbase output cannot be spent yet
let coinbase_txn = build::transaction( let coinbase_txn = build::transaction(
vec![ vec![
build::coinbase_input( build::coinbase_input(amount, block_hash, merkle_proof.clone(), key_id1.clone()),
amount,
block_hash,
merkle_proof.clone(),
key_id1.clone(),
),
build::output(amount - 2, key_id2.clone()), build::output(amount - 2, key_id2.clone()),
build::with_fee(2), build::with_fee(2),
], ],
&keychain, &keychain,
).unwrap(); ).unwrap();
let mut block = let mut block = core::core::Block::new(
core::core::Block::new( &prev,
&prev, vec![&coinbase_txn],
vec![&coinbase_txn], &keychain,
&keychain, &key_id3,
&key_id3, Difficulty::one(),
Difficulty::one(), ).unwrap();
).unwrap();
block.header.timestamp = prev.timestamp + time::Duration::seconds(60); block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
@ -169,13 +160,8 @@ fn test_coinbase_maturity() {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let pk = keychain.derive_key_id(1).unwrap(); let pk = keychain.derive_key_id(1).unwrap();
let mut block = core::core::Block::new( let mut block =
&prev, core::core::Block::new(&prev, vec![], &keychain, &pk, Difficulty::one()).unwrap();
vec![],
&keychain,
&pk,
Difficulty::one()
).unwrap();
block.header.timestamp = prev.timestamp + time::Duration::seconds(60); block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
@ -196,12 +182,7 @@ fn test_coinbase_maturity() {
let coinbase_txn = build::transaction( let coinbase_txn = build::transaction(
vec![ vec![
build::coinbase_input( build::coinbase_input(amount, block_hash, merkle_proof.clone(), key_id1.clone()),
amount,
block_hash,
merkle_proof.clone(),
key_id1.clone(),
),
build::output(amount - 2, key_id2.clone()), build::output(amount - 2, key_id2.clone()),
build::with_fee(2), build::with_fee(2),
], ],

View file

@ -121,8 +121,8 @@ impl GlobalConfig {
} }
// Try to parse the config file if it exists // Try to parse the config file if it exists
// explode if it does exist but something's wrong // explode if it does exist but something's wrong
// with it // with it
return_value.read_config() return_value.read_config()
} }
@ -164,9 +164,10 @@ impl GlobalConfig {
match encoded { match encoded {
Ok(enc) => return Ok(enc), Ok(enc) => return Ok(enc),
Err(e) => { Err(e) => {
return Err(ConfigError::SerializationError( return Err(ConfigError::SerializationError(String::from(format!(
String::from(format!("{}", e)), "{}",
)); e
))));
} }
} }
} }

View file

@ -20,7 +20,6 @@
#![deny(unused_mut)] #![deny(unused_mut)]
#![warn(missing_docs)] #![warn(missing_docs)]
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;

View file

@ -45,8 +45,7 @@ impl fmt::Display for ConfigError {
ConfigError::ParseError(ref file_name, ref message) => write!( ConfigError::ParseError(ref file_name, ref message) => write!(
f, f,
"Error parsing configuration file at {} - {}", "Error parsing configuration file at {} - {}",
file_name, file_name, message
message
), ),
ConfigError::FileIOError(ref file_name, ref message) => { ConfigError::FileIOError(ref file_name, ref message) => {
write!(f, "{} {}", message, file_name) write!(f, "{} {}", message, file_name)

View file

@ -29,7 +29,7 @@ impl Summable for TestElem {
type Sum = u64; type Sum = u64;
fn sum(&self) -> u64 { fn sum(&self) -> u64 {
// sums are not allowed to overflow, so we use this simple // sums are not allowed to overflow, so we use this simple
// non-injective "sum" function that will still be homomorphic // non-injective "sum" function that will still be homomorphic
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
+ self.0[3] as u64 + self.0[3] as u64
} }
@ -51,7 +51,7 @@ fn bench_small_tree(b: &mut Bencher) {
let mut big_tree = SumTree::new(); let mut big_tree = SumTree::new();
for i in 0..1000 { for i in 0..1000 {
// To avoid RNG overflow we generate random elements that are small. // To avoid RNG overflow we generate random elements that are small.
// Though to avoid repeat elements they have to be reasonably big. // Though to avoid repeat elements they have to be reasonably big.
let new_elem; let new_elem;
let word1 = rng.gen::<u16>() as u32; let word1 = rng.gen::<u16>() as u32;
let word2 = rng.gen::<u16>() as u32; let word2 = rng.gen::<u16>() as u32;

View file

@ -91,8 +91,7 @@ pub const MAX_BLOCK_INPUTS: usize = 300_000; // soft fork down when too_high
/// Whether a block exceeds the maximum acceptable weight /// Whether a block exceeds the maximum acceptable weight
pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool { pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool {
input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT
+ kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT + kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT || input_len > MAX_BLOCK_INPUTS
|| input_len > MAX_BLOCK_INPUTS
} }
/// Fork every 250,000 blocks for first 2 years, simple number and just a /// Fork every 250,000 blocks for first 2 years, simple number and just a
@ -186,21 +185,24 @@ where
// Get the difficulty sum for averaging later // Get the difficulty sum for averaging later
// Which in this case is the sum of the last // Which in this case is the sum of the last
// DIFFICULTY_ADJUST_WINDOW elements // DIFFICULTY_ADJUST_WINDOW elements
let diff_sum = diff_data.iter() let diff_sum = diff_data
.iter()
.skip(MEDIAN_TIME_WINDOW as usize) .skip(MEDIAN_TIME_WINDOW as usize)
.take(DIFFICULTY_ADJUST_WINDOW as usize) .take(DIFFICULTY_ADJUST_WINDOW as usize)
.fold(Difficulty::zero(), |sum, d| sum + d.clone().unwrap().1); .fold(Difficulty::zero(), |sum, d| sum + d.clone().unwrap().1);
// Obtain the median window for the earlier time period // Obtain the median window for the earlier time period
// which is just the first MEDIAN_TIME_WINDOW elements // which is just the first MEDIAN_TIME_WINDOW elements
let mut window_earliest: Vec<u64> = diff_data.iter() let mut window_earliest: Vec<u64> = diff_data
.iter()
.take(MEDIAN_TIME_WINDOW as usize) .take(MEDIAN_TIME_WINDOW as usize)
.map(|n| n.clone().unwrap().0) .map(|n| n.clone().unwrap().0)
.collect(); .collect();
// Obtain the median window for the latest time period // Obtain the median window for the latest time period
// i.e. the last MEDIAN_TIME_WINDOW elements // i.e. the last MEDIAN_TIME_WINDOW elements
let mut window_latest: Vec<u64> = diff_data.iter() let mut window_latest: Vec<u64> = diff_data
.iter()
.skip(DIFFICULTY_ADJUST_WINDOW as usize) .skip(DIFFICULTY_ADJUST_WINDOW as usize)
.map(|n| n.clone().unwrap().0) .map(|n| n.clone().unwrap().0)
.collect(); .collect();
@ -212,15 +214,14 @@ where
let earliest_ts = window_earliest[MEDIAN_TIME_INDEX as usize]; let earliest_ts = window_earliest[MEDIAN_TIME_INDEX as usize];
// Calculate the average difficulty // Calculate the average difficulty
let diff_avg = diff_sum.into_num() / let diff_avg = diff_sum.into_num() / Difficulty::from_num(DIFFICULTY_ADJUST_WINDOW).into_num();
Difficulty::from_num(DIFFICULTY_ADJUST_WINDOW).into_num();
// Actual undampened time delta // Actual undampened time delta
let ts_delta = latest_ts - earliest_ts; let ts_delta = latest_ts - earliest_ts;
// Apply dampening // Apply dampening
let ts_damp = match diff_avg { let ts_damp = match diff_avg {
n if n >= DAMP_FACTOR => ((DAMP_FACTOR-1) * BLOCK_TIME_WINDOW + ts_delta) / DAMP_FACTOR, n if n >= DAMP_FACTOR => ((DAMP_FACTOR - 1) * BLOCK_TIME_WINDOW + ts_delta) / DAMP_FACTOR,
_ => ts_delta, _ => ts_delta,
}; };
@ -233,8 +234,7 @@ where
ts_damp ts_damp
}; };
let difficulty = let difficulty = diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW).into_num()
diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW).into_num()
/ Difficulty::from_num(adj_ts).into_num(); / Difficulty::from_num(adj_ts).into_num();
Ok(max(Difficulty::from_num(difficulty), Difficulty::one())) Ok(max(Difficulty::from_num(difficulty), Difficulty::one()))

View file

@ -18,26 +18,15 @@ use time;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use std::collections::HashSet; use std::collections::HashSet;
use core::{ use core::{Committed, Input, KernelFeatures, Output, OutputFeatures, Proof, ProofMessageElements,
Committed, ShortId, SwitchCommitHash, Transaction, TxKernel};
Input,
Output,
ShortId,
SwitchCommitHash,
Proof,
ProofMessageElements,
TxKernel,
Transaction,
OutputFeatures,
KernelFeatures
};
use consensus; use consensus;
use consensus::{exceeds_weight, reward, REWARD, VerifySortOrder}; use consensus::{exceeds_weight, reward, VerifySortOrder, REWARD};
use core::hash::{Hash, Hashed, ZERO_HASH}; use core::hash::{Hash, Hashed, ZERO_HASH};
use core::id::ShortIdentifiable; use core::id::ShortIdentifiable;
use core::target::Difficulty; use core::target::Difficulty;
use core::transaction; use core::transaction;
use ser::{self, Readable, Reader, Writeable, Writer, WriteableSorted, read_and_verify_sorted}; use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
use global; use global;
use keychain; use keychain;
use keychain::BlindingFactor; use keychain::BlindingFactor;
@ -61,7 +50,8 @@ pub enum Error {
KernelLockHeight(u64), KernelLockHeight(u64),
/// Underlying tx related error /// Underlying tx related error
Transaction(transaction::Error), Transaction(transaction::Error),
/// Underlying Secp256k1 error (signature validation or invalid public key typically) /// Underlying Secp256k1 error (signature validation or invalid public key
/// typically)
Secp(secp::Error), Secp(secp::Error),
/// Underlying keychain related error /// Underlying keychain related error
Keychain(keychain::Error), Keychain(keychain::Error),
@ -69,15 +59,17 @@ pub enum Error {
Consensus(consensus::Error), Consensus(consensus::Error),
/// Coinbase has not yet matured and cannot be spent (1,000 blocks) /// Coinbase has not yet matured and cannot be spent (1,000 blocks)
ImmatureCoinbase { ImmatureCoinbase {
/// The height of the block containing the input spending the coinbase output /// The height of the block containing the input spending the coinbase
/// output
height: u64, height: u64,
/// The lock_height needed to be reached for the coinbase output to mature /// The lock_height needed to be reached for the coinbase output to
/// mature
lock_height: u64, lock_height: u64,
}, },
/// Underlying Merkle proof error /// Underlying Merkle proof error
MerkleProof, MerkleProof,
/// Other unspecified error condition /// Other unspecified error condition
Other(String) Other(String),
} }
impl From<transaction::Error> for Error { impl From<transaction::Error> for Error {
@ -129,7 +121,8 @@ pub struct BlockHeader {
pub difficulty: Difficulty, pub difficulty: Difficulty,
/// Total accumulated difficulty since genesis block /// Total accumulated difficulty since genesis block
pub total_difficulty: Difficulty, pub total_difficulty: Difficulty,
/// The single aggregate "offset" that needs to be applied for all commitments to sum /// The single aggregate "offset" that needs to be applied for all
/// commitments to sum
pub kernel_offset: BlindingFactor, pub kernel_offset: BlindingFactor,
} }
@ -229,7 +222,8 @@ pub struct CompactBlock {
pub out_full: Vec<Output>, pub out_full: Vec<Output>,
/// List of full kernels - specifically the coinbase kernel(s) /// List of full kernels - specifically the coinbase kernel(s)
pub kern_full: Vec<TxKernel>, pub kern_full: Vec<TxKernel>,
/// List of transaction kernels, excluding those in the full list (short_ids) /// List of transaction kernels, excluding those in the full list
/// (short_ids)
pub kern_ids: Vec<ShortId>, pub kern_ids: Vec<ShortId>,
} }
@ -254,7 +248,8 @@ impl Writeable for CompactBlock {
let mut kern_full = self.kern_full.clone(); let mut kern_full = self.kern_full.clone();
let mut kern_ids = self.kern_ids.clone(); let mut kern_ids = self.kern_ids.clone();
// Consensus rule that everything is sorted in lexicographical order on the wire. // Consensus rule that everything is sorted in lexicographical order on the
// wire.
try!(out_full.write_sorted(writer)); try!(out_full.write_sorted(writer));
try!(kern_full.write_sorted(writer)); try!(kern_full.write_sorted(writer));
try!(kern_ids.write_sorted(writer)); try!(kern_ids.write_sorted(writer));
@ -298,7 +293,8 @@ pub struct Block {
pub inputs: Vec<Input>, pub inputs: Vec<Input>,
/// List of transaction outputs /// List of transaction outputs
pub outputs: Vec<Output>, pub outputs: Vec<Output>,
/// List of kernels with associated proofs (note these are offset from tx_kernels) /// List of kernels with associated proofs (note these are offset from
/// tx_kernels)
pub kernels: Vec<TxKernel>, pub kernels: Vec<TxKernel>,
} }
@ -321,7 +317,8 @@ impl Writeable for Block {
let mut outputs = self.outputs.clone(); let mut outputs = self.outputs.clone();
let mut kernels = self.kernels.clone(); let mut kernels = self.kernels.clone();
// Consensus rule that everything is sorted in lexicographical order on the wire. // Consensus rule that everything is sorted in lexicographical order on the
// wire.
try!(inputs.write_sorted(writer)); try!(inputs.write_sorted(writer));
try!(outputs.write_sorted(writer)); try!(outputs.write_sorted(writer));
try!(kernels.write_sorted(writer)); try!(kernels.write_sorted(writer));
@ -394,12 +391,8 @@ impl Block {
difficulty: Difficulty, difficulty: Difficulty,
) -> Result<Block, Error> { ) -> Result<Block, Error> {
let fees = txs.iter().map(|tx| tx.fee()).sum(); let fees = txs.iter().map(|tx| tx.fee()).sum();
let (reward_out, reward_proof) = Block::reward_output( let (reward_out, reward_proof) =
keychain, Block::reward_output(keychain, key_id, fees, prev.height + 1)?;
key_id,
fees,
prev.height + 1,
)?;
let block = Block::with_reward(prev, txs, reward_out, reward_proof, difficulty)?; let block = Block::with_reward(prev, txs, reward_out, reward_proof, difficulty)?;
Ok(block) Ok(block)
} }
@ -544,9 +537,7 @@ impl Block {
.iter() .iter()
.cloned() .cloned()
.filter(|x| *x != BlindingFactor::zero()) .filter(|x| *x != BlindingFactor::zero())
.filter_map(|x| { .filter_map(|x| x.secret_key(&secp).ok())
x.secret_key(&secp).ok()
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if keys.is_empty() { if keys.is_empty() {
BlindingFactor::zero() BlindingFactor::zero()
@ -557,25 +548,22 @@ impl Block {
} }
}; };
Ok( Ok(Block {
Block { header: BlockHeader {
header: BlockHeader { height: prev.height + 1,
height: prev.height + 1, timestamp: time::Tm {
timestamp: time::Tm { tm_nsec: 0,
tm_nsec: 0, ..time::now_utc()
..time::now_utc()
},
previous: prev.hash(),
total_difficulty: difficulty +
prev.total_difficulty.clone(),
kernel_offset: kernel_offset,
..Default::default()
}, },
inputs: inputs, previous: prev.hash(),
outputs: outputs, total_difficulty: difficulty + prev.total_difficulty.clone(),
kernels: kernels, kernel_offset: kernel_offset,
}.cut_through(), ..Default::default()
) },
inputs: inputs,
outputs: outputs,
kernels: kernels,
}.cut_through())
} }
/// Blockhash, computed using only the header /// Blockhash, computed using only the header
@ -702,10 +690,7 @@ impl Block {
// sum all kernels commitments // sum all kernels commitments
let kernel_sum = { let kernel_sum = {
let mut kernel_commits = self.kernels let mut kernel_commits = self.kernels.iter().map(|x| x.excess).collect::<Vec<_>>();
.iter()
.map(|x| x.excess)
.collect::<Vec<_>>();
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock().unwrap();
@ -763,10 +748,7 @@ impl Block {
cb_outs.iter().map(|x| x.commitment()).collect(), cb_outs.iter().map(|x| x.commitment()).collect(),
vec![over_commit], vec![over_commit],
)?; )?;
kerns_sum = secp.commit_sum( kerns_sum = secp.commit_sum(cb_kerns.iter().map(|x| x.excess).collect(), vec![])?;
cb_kerns.iter().map(|x| x.excess).collect(),
vec![],
)?;
} }
if kerns_sum != out_adjust_sum { if kerns_sum != out_adjust_sum {
@ -775,7 +757,8 @@ impl Block {
Ok(()) Ok(())
} }
/// Builds the blinded output and related signature proof for the block reward. /// Builds the blinded output and related signature proof for the block
/// reward.
pub fn reward_output( pub fn reward_output(
keychain: &keychain::Keychain, keychain: &keychain::Keychain,
key_id: &keychain::Identifier, key_id: &keychain::Identifier,
@ -784,11 +767,8 @@ impl Block {
) -> Result<(Output, TxKernel), keychain::Error> { ) -> Result<(Output, TxKernel), keychain::Error> {
let commit = keychain.commit(reward(fees), key_id)?; let commit = keychain.commit(reward(fees), key_id)?;
let switch_commit = keychain.switch_commit(key_id)?; let switch_commit = keychain.switch_commit(key_id)?;
let switch_commit_hash = SwitchCommitHash::from_switch_commit( let switch_commit_hash =
switch_commit, SwitchCommitHash::from_switch_commit(switch_commit, keychain, key_id);
keychain,
key_id,
);
trace!( trace!(
LOGGER, LOGGER,
@ -803,11 +783,15 @@ impl Block {
); );
let value = reward(fees); let value = reward(fees);
let msg = (ProofMessageElements { let msg = (ProofMessageElements { value: value }).to_proof_message();
value: value
}).to_proof_message();
let rproof = keychain.range_proof(value, key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg)?; let rproof = keychain.range_proof(
value,
key_id,
commit,
Some(switch_commit_hash.as_ref().to_vec()),
msg,
)?;
let output = Output { let output = Output {
features: OutputFeatures::COINBASE_OUTPUT, features: OutputFeatures::COINBASE_OUTPUT,
@ -826,7 +810,8 @@ impl Block {
// For a coinbase output the fee is 0 and the lock_height is // For a coinbase output the fee is 0 and the lock_height is
// the lock_height of the coinbase output itself, // the lock_height of the coinbase output itself,
// not the lock_height of the tx (there is no tx for a coinbase output). // not the lock_height of the tx (there is no tx for a coinbase output).
// This output will not be spendable earlier than lock_height (and we sign this here). // This output will not be spendable earlier than lock_height (and we sign this
// here).
let msg = secp::Message::from_slice(&kernel_sig_msg(0, height))?; let msg = secp::Message::from_slice(&kernel_sig_msg(0, height))?;
let sig = keychain.aggsig_sign_from_key_id(&msg, &key_id)?; let sig = keychain.aggsig_sign_from_key_id(&msg, &key_id)?;
@ -850,7 +835,7 @@ mod test {
use core::build::{self, input, output, with_fee}; use core::build::{self, input, output, with_fee};
use core::test::{tx1i2o, tx2i1o}; use core::test::{tx1i2o, tx2i1o};
use keychain::{Identifier, Keychain}; use keychain::{Identifier, Keychain};
use consensus::{MAX_BLOCK_WEIGHT, BLOCK_OUTPUT_WEIGHT}; use consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
use std::time::Instant; use std::time::Instant;
use util::secp; use util::secp;
@ -864,7 +849,7 @@ mod test {
txs, txs,
keychain, keychain,
&key_id, &key_id,
Difficulty::one() Difficulty::one(),
).unwrap() ).unwrap()
} }
@ -901,8 +886,7 @@ mod test {
let now = Instant::now(); let now = Instant::now();
parts.append(&mut vec![input(500000, pks.pop().unwrap()), with_fee(2)]); parts.append(&mut vec![input(500000, pks.pop().unwrap()), with_fee(2)]);
let mut tx = build::transaction(parts, &keychain) let mut tx = build::transaction(parts, &keychain).unwrap();
.unwrap();
println!("Build tx: {}", now.elapsed().as_secs()); println!("Build tx: {}", now.elapsed().as_secs());
let b = new_block(vec![&mut tx], &keychain); let b = new_block(vec![&mut tx], &keychain);
@ -924,7 +908,6 @@ mod test {
b.verify_coinbase(), b.verify_coinbase(),
Err(Error::Secp(secp::Error::IncorrectCommitSum)) Err(Error::Secp(secp::Error::IncorrectCommitSum))
); );
} }
#[test] #[test]
@ -989,19 +972,19 @@ mod test {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let mut b = new_block(vec![], &keychain); let mut b = new_block(vec![], &keychain);
assert!(b.outputs[0].features.contains(OutputFeatures::COINBASE_OUTPUT)); assert!(
b.outputs[0].features.remove(OutputFeatures::COINBASE_OUTPUT); b.outputs[0]
.features
assert_eq!( .contains(OutputFeatures::COINBASE_OUTPUT)
b.verify_coinbase(),
Err(Error::CoinbaseSumMismatch)
); );
b.outputs[0]
.features
.remove(OutputFeatures::COINBASE_OUTPUT);
assert_eq!(b.verify_coinbase(), Err(Error::CoinbaseSumMismatch));
assert_eq!(b.verify_kernels(), Ok(())); assert_eq!(b.verify_kernels(), Ok(()));
assert_eq!( assert_eq!(b.validate(), Err(Error::CoinbaseSumMismatch));
b.validate(),
Err(Error::CoinbaseSumMismatch)
);
} }
#[test] #[test]
@ -1011,8 +994,14 @@ mod test {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let mut b = new_block(vec![], &keychain); let mut b = new_block(vec![], &keychain);
assert!(b.kernels[0].features.contains(KernelFeatures::COINBASE_KERNEL)); assert!(
b.kernels[0].features.remove(KernelFeatures::COINBASE_KERNEL); b.kernels[0]
.features
.contains(KernelFeatures::COINBASE_KERNEL)
);
b.kernels[0]
.features
.remove(KernelFeatures::COINBASE_KERNEL);
assert_eq!( assert_eq!(
b.verify_coinbase(), b.verify_coinbase(),
@ -1047,10 +1036,7 @@ mod test {
let mut vec = Vec::new(); let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed"); ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 1_256; let target_len = 1_256;
assert_eq!( assert_eq!(vec.len(), target_len,);
vec.len(),
target_len,
);
} }
#[test] #[test]
@ -1061,10 +1047,7 @@ mod test {
let mut vec = Vec::new(); let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed"); ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 2_900; let target_len = 2_900;
assert_eq!( assert_eq!(vec.len(), target_len,);
vec.len(),
target_len,
);
} }
#[test] #[test]
@ -1074,10 +1057,7 @@ mod test {
let mut vec = Vec::new(); let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
let target_len = 1_264; let target_len = 1_264;
assert_eq!( assert_eq!(vec.len(), target_len,);
vec.len(),
target_len,
);
} }
#[test] #[test]
@ -1088,10 +1068,7 @@ mod test {
let mut vec = Vec::new(); let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
let target_len = 1_270; let target_len = 1_270;
assert_eq!( assert_eq!(vec.len(), target_len,);
vec.len(),
target_len,
);
} }
#[test] #[test]
@ -1104,17 +1081,11 @@ mod test {
txs.push(tx); txs.push(tx);
} }
let b = new_block( let b = new_block(txs.iter().collect(), &keychain);
txs.iter().collect(),
&keychain,
);
let mut vec = Vec::new(); let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed"); ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 17_696; let target_len = 17_696;
assert_eq!( assert_eq!(vec.len(), target_len,);
vec.len(),
target_len,
);
} }
#[test] #[test]
@ -1127,17 +1098,11 @@ mod test {
txs.push(tx); txs.push(tx);
} }
let b = new_block( let b = new_block(txs.iter().collect(), &keychain);
txs.iter().collect(),
&keychain,
);
let mut vec = Vec::new(); let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed"); ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
let target_len = 1_324; let target_len = 1_324;
assert_eq!( assert_eq!(vec.len(), target_len,);
vec.len(),
target_len,
);
} }
#[test] #[test]
@ -1158,8 +1123,14 @@ mod test {
// check we can identify the specified kernel from the short_id // check we can identify the specified kernel from the short_id
// correctly in both of the compact_blocks // correctly in both of the compact_blocks
assert_eq!(cb1.kern_ids[0], tx.kernels[0].short_id(&cb1.hash(), cb1.nonce)); assert_eq!(
assert_eq!(cb2.kern_ids[0], tx.kernels[0].short_id(&cb2.hash(), cb2.nonce)); cb1.kern_ids[0],
tx.kernels[0].short_id(&cb1.hash(), cb1.nonce)
);
assert_eq!(
cb2.kern_ids[0],
tx.kernels[0].short_id(&cb2.hash(), cb2.nonce)
);
} }
#[test] #[test]

View file

@ -25,13 +25,14 @@
//! build::transaction(vec![input_rand(75), output_rand(42), output_rand(32), //! build::transaction(vec![input_rand(75), output_rand(42), output_rand(32),
//! with_fee(1)]) //! with_fee(1)])
use util::{secp, kernel_sig_msg}; use util::{kernel_sig_msg, secp};
use core::{Transaction, TxKernel, Input, Output, OutputFeatures, ProofMessageElements, SwitchCommitHash}; use core::{Input, Output, OutputFeatures, ProofMessageElements, SwitchCommitHash, Transaction,
TxKernel};
use core::hash::Hash; use core::hash::Hash;
use core::pmmr::MerkleProof; use core::pmmr::MerkleProof;
use keychain; use keychain;
use keychain::{Keychain, BlindSum, BlindingFactor, Identifier}; use keychain::{BlindSum, BlindingFactor, Identifier, Keychain};
use util::LOGGER; use util::LOGGER;
/// Context information available to transaction combinators. /// Context information available to transaction combinators.
@ -41,7 +42,8 @@ pub struct Context<'a> {
/// Function type returned by the transaction combinators. Transforms a /// Function type returned by the transaction combinators. Transforms a
/// (Transaction, BlindSum) pair into another, provided some context. /// (Transaction, BlindSum) pair into another, provided some context.
pub type Append = for<'a> Fn(&'a mut Context, (Transaction, TxKernel, BlindSum)) -> (Transaction, TxKernel, BlindSum); pub type Append = for<'a> Fn(&'a mut Context, (Transaction, TxKernel, BlindSum))
-> (Transaction, TxKernel, BlindSum);
/// Adds an input with the provided value and blinding key to the transaction /// Adds an input with the provided value and blinding key to the transaction
/// being built. /// being built.
@ -52,25 +54,22 @@ fn build_input(
merkle_proof: Option<MerkleProof>, merkle_proof: Option<MerkleProof>,
key_id: Identifier, key_id: Identifier,
) -> Box<Append> { ) -> Box<Append> {
Box::new(move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) { Box::new(
let commit = build.keychain.commit(value, &key_id).unwrap(); move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
let input = Input::new( let commit = build.keychain.commit(value, &key_id).unwrap();
features, let input = Input::new(features, commit, block_hash.clone(), merkle_proof.clone());
commit, (tx.with_input(input), kern, sum.sub_key_id(key_id.clone()))
block_hash.clone(), },
merkle_proof.clone(), )
);
(tx.with_input(input), kern, sum.sub_key_id(key_id.clone()))
})
} }
/// Adds an input with the provided value and blinding key to the transaction /// Adds an input with the provided value and blinding key to the transaction
/// being built. /// being built.
pub fn input( pub fn input(value: u64, key_id: Identifier) -> Box<Append> {
value: u64, debug!(
key_id: Identifier, LOGGER,
) -> Box<Append> { "Building input (spending regular output): {}, {}", value, key_id
debug!(LOGGER, "Building input (spending regular output): {}, {}", value, key_id); );
build_input(value, OutputFeatures::DEFAULT_OUTPUT, None, None, key_id) build_input(value, OutputFeatures::DEFAULT_OUTPUT, None, None, key_id)
} }
@ -82,90 +81,105 @@ pub fn coinbase_input(
merkle_proof: MerkleProof, merkle_proof: MerkleProof,
key_id: Identifier, key_id: Identifier,
) -> Box<Append> { ) -> Box<Append> {
debug!(LOGGER, "Building input (spending coinbase): {}, {}", value, key_id); debug!(
build_input(value, OutputFeatures::COINBASE_OUTPUT, Some(block_hash), Some(merkle_proof), key_id) LOGGER,
"Building input (spending coinbase): {}, {}", value, key_id
);
build_input(
value,
OutputFeatures::COINBASE_OUTPUT,
Some(block_hash),
Some(merkle_proof),
key_id,
)
} }
/// Adds an output with the provided value and key identifier from the /// Adds an output with the provided value and key identifier from the
/// keychain. /// keychain.
pub fn output(value: u64, key_id: Identifier) -> Box<Append> { pub fn output(value: u64, key_id: Identifier) -> Box<Append> {
Box::new(move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) { Box::new(
debug!( move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
LOGGER, debug!(LOGGER, "Building an output: {}, {}", value, key_id,);
"Building an output: {}, {}",
value,
key_id,
);
let commit = build.keychain.commit(value, &key_id).unwrap(); let commit = build.keychain.commit(value, &key_id).unwrap();
let switch_commit = build.keychain.switch_commit(&key_id).unwrap(); let switch_commit = build.keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit( let switch_commit_hash =
switch_commit, SwitchCommitHash::from_switch_commit(switch_commit, build.keychain, &key_id);
build.keychain, trace!(
&key_id, LOGGER,
); "Builder - Pedersen Commit is: {:?}, Switch Commit is: {:?}",
trace!( commit,
LOGGER, switch_commit,
"Builder - Pedersen Commit is: {:?}, Switch Commit is: {:?}", );
commit, trace!(
switch_commit, LOGGER,
); "Builder - Switch Commit Hash is: {:?}",
trace!( switch_commit_hash
LOGGER, );
"Builder - Switch Commit Hash is: {:?}",
switch_commit_hash
);
let msg = (ProofMessageElements { let msg = (ProofMessageElements { value: value }).to_proof_message();
value: value,
}).to_proof_message();
let rproof = build let rproof = build
.keychain .keychain
.range_proof(value, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg) .range_proof(
.unwrap(); value,
&key_id,
commit,
Some(switch_commit_hash.as_ref().to_vec()),
msg,
)
.unwrap();
( (
tx.with_output(Output { tx.with_output(Output {
features: OutputFeatures::DEFAULT_OUTPUT, features: OutputFeatures::DEFAULT_OUTPUT,
commit: commit, commit: commit,
switch_commit_hash: switch_commit_hash, switch_commit_hash: switch_commit_hash,
proof: rproof, proof: rproof,
}), }),
kern, kern,
sum.add_key_id(key_id.clone()), sum.add_key_id(key_id.clone()),
) )
}) },
)
} }
/// Sets the fee on the transaction being built. /// Sets the fee on the transaction being built.
pub fn with_fee(fee: u64) -> Box<Append> { pub fn with_fee(fee: u64) -> Box<Append> {
Box::new(move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) { Box::new(
(tx, kern.with_fee(fee), sum) move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
}) (tx, kern.with_fee(fee), sum)
},
)
} }
/// Sets the lock_height on the transaction being built. /// Sets the lock_height on the transaction being built.
pub fn with_lock_height(lock_height: u64) -> Box<Append> { pub fn with_lock_height(lock_height: u64) -> Box<Append> {
Box::new(move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) { Box::new(
(tx, kern.with_lock_height(lock_height), sum) move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
}) (tx, kern.with_lock_height(lock_height), sum)
},
)
} }
/// Adds a known excess value on the transaction being built. Usually used in /// Adds a known excess value on the transaction being built. Usually used in
/// combination with the initial_tx function when a new transaction is built /// combination with the initial_tx function when a new transaction is built
/// by adding to a pre-existing one. /// by adding to a pre-existing one.
pub fn with_excess(excess: BlindingFactor) -> Box<Append> { pub fn with_excess(excess: BlindingFactor) -> Box<Append> {
Box::new(move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) { Box::new(
(tx, kern, sum.add_blinding_factor(excess.clone())) move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
}) (tx, kern, sum.add_blinding_factor(excess.clone()))
},
)
} }
/// Sets a known tx "offset". Used in final step of tx construction. /// Sets a known tx "offset". Used in final step of tx construction.
pub fn with_offset(offset: BlindingFactor) -> Box<Append> { pub fn with_offset(offset: BlindingFactor) -> Box<Append> {
Box::new(move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) { Box::new(
(tx.with_offset(offset), kern, sum) move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
}) (tx.with_offset(offset), kern, sum)
},
)
} }
/// Sets an initial transaction to add to when building a new transaction. /// Sets an initial transaction to add to when building a new transaction.
@ -173,9 +187,11 @@ pub fn with_offset(offset: BlindingFactor) -> Box<Append> {
pub fn initial_tx(mut tx: Transaction) -> Box<Append> { pub fn initial_tx(mut tx: Transaction) -> Box<Append> {
assert_eq!(tx.kernels.len(), 1); assert_eq!(tx.kernels.len(), 1);
let kern = tx.kernels.remove(0); let kern = tx.kernels.remove(0);
Box::new(move |_build, (_, _, sum)| -> (Transaction, TxKernel, BlindSum) { Box::new(
(tx.clone(), kern.clone(), sum) move |_build, (_, _, sum)| -> (Transaction, TxKernel, BlindSum) {
}) (tx.clone(), kern.clone(), sum)
},
)
} }
/// Builds a new transaction by combining all the combinators provided in a /// Builds a new transaction by combining all the combinators provided in a

View file

@ -177,7 +177,9 @@ impl HashWriter {
impl Default for HashWriter { impl Default for HashWriter {
fn default() -> HashWriter { fn default() -> HashWriter {
HashWriter { state: Blake2b::new(32) } HashWriter {
state: Blake2b::new(32),
}
} }
} }
@ -225,9 +227,10 @@ impl<T: Writeable> consensus::VerifySortOrder<T> for Vec<T> {
.map(|item| item.hash()) .map(|item| item.hash())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.windows(2) .windows(2)
.any(|pair| pair[0] > pair[1]) { .any(|pair| pair[0] > pair[1])
true => Err(consensus::Error::SortError), {
false => Ok(()), true => Err(consensus::Error::SortError),
} false => Ok(()),
}
} }
} }

View file

@ -16,15 +16,14 @@
use std::cmp::min; use std::cmp::min;
use byteorder::{LittleEndian, ByteOrder}; use byteorder::{ByteOrder, LittleEndian};
use siphasher::sip::SipHasher24; use siphasher::sip::SipHasher24;
use core::hash::{Hash, Hashed}; use core::hash::{Hash, Hashed};
use ser; use ser;
use ser::{Reader, Readable, Writer, Writeable}; use ser::{Readable, Reader, Writeable, Writer};
use util; use util;
/// The size of a short id used to identify inputs|outputs|kernels (6 bytes) /// The size of a short id used to identify inputs|outputs|kernels (6 bytes)
pub const SHORT_ID_SIZE: usize = 6; pub const SHORT_ID_SIZE: usize = 6;
@ -62,7 +61,8 @@ impl<H: Hashed> ShortIdentifiable for H {
sip_hasher.write(&self.hash().to_vec()[..]); sip_hasher.write(&self.hash().to_vec()[..]);
let res = sip_hasher.finish(); let res = sip_hasher.finish();
// construct a short_id from the resulting bytes (dropping the 2 most significant bytes) // construct a short_id from the resulting bytes (dropping the 2 most
// significant bytes)
let mut buf = [0; 8]; let mut buf = [0; 8];
LittleEndian::write_u64(&mut buf, res); LittleEndian::write_u64(&mut buf, res);
ShortId::from_bytes(&buf[0..6]) ShortId::from_bytes(&buf[0..6])
@ -131,7 +131,6 @@ mod test {
use super::*; use super::*;
use ser::{Writeable, Writer}; use ser::{Writeable, Writer};
#[test] #[test]
fn test_short_id() { fn test_short_id() {
// minimal struct for testing // minimal struct for testing
@ -152,7 +151,10 @@ mod test {
assert_eq!(foo.hash(), expected_hash); assert_eq!(foo.hash(), expected_hash);
let other_hash = Hash::zero(); let other_hash = Hash::zero();
assert_eq!(foo.short_id(&other_hash, foo.0), ShortId::from_hex("4cc808b62476").unwrap()); assert_eq!(
foo.short_id(&other_hash, foo.0),
ShortId::from_hex("4cc808b62476").unwrap()
);
let foo = Foo(5); let foo = Foo(5);
let expected_hash = Hash::from_hex( let expected_hash = Hash::from_hex(
@ -161,7 +163,10 @@ mod test {
assert_eq!(foo.hash(), expected_hash); assert_eq!(foo.hash(), expected_hash);
let other_hash = Hash::zero(); let other_hash = Hash::zero();
assert_eq!(foo.short_id(&other_hash, foo.0), ShortId::from_hex("02955a094534").unwrap()); assert_eq!(
foo.short_id(&other_hash, foo.0),
ShortId::from_hex("02955a094534").unwrap()
);
let foo = Foo(5); let foo = Foo(5);
let expected_hash = Hash::from_hex( let expected_hash = Hash::from_hex(
@ -172,6 +177,9 @@ mod test {
let other_hash = Hash::from_hex( let other_hash = Hash::from_hex(
"81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c", "81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c",
).unwrap(); ).unwrap();
assert_eq!(foo.short_id(&other_hash, foo.0), ShortId::from_hex("3e9cde72a687").unwrap()); assert_eq!(
foo.short_id(&other_hash, foo.0),
ShortId::from_hex("3e9cde72a687").unwrap()
);
} }
} }

View file

@ -264,10 +264,7 @@ mod test {
let mut vec = Vec::new(); let mut vec = Vec::new();
ser::serialize(&mut vec, &tx).expect("serialization failed"); ser::serialize(&mut vec, &tx).expect("serialization failed");
let target_len = 986; let target_len = 986;
assert_eq!( assert_eq!(vec.len(), target_len,);
vec.len(),
target_len,
);
} }
#[test] #[test]
@ -392,8 +389,8 @@ mod test {
let btx = tx2i1o(); let btx = tx2i1o();
assert!(btx.validate().is_ok()); assert!(btx.validate().is_ok());
// Ignored for bullet proofs, because calling range_proof_info // Ignored for bullet proofs, because calling range_proof_info
// with a bullet proof causes painful errors // with a bullet proof causes painful errors
// checks that the range proof on our blind output is sufficiently hiding // checks that the range proof on our blind output is sufficiently hiding
let Output { proof, .. } = btx.outputs[0]; let Output { proof, .. } = btx.outputs[0];
@ -423,12 +420,13 @@ mod test {
// let key_id2 = keychain.derive_key_id(2).unwrap(); // let key_id2 = keychain.derive_key_id(2).unwrap();
// let key_id3 = keychain.derive_key_id(3).unwrap(); // let key_id3 = keychain.derive_key_id(3).unwrap();
// let key_id4 = keychain.derive_key_id(4).unwrap(); // let key_id4 = keychain.derive_key_id(4).unwrap();
// //
// let (tx_alice, blind_sum) = { // let (tx_alice, blind_sum) = {
// // Alice gets 2 of her pre-existing outputs to send 5 coins to Bob, they // // Alice gets 2 of her pre-existing outputs to send 5 coins to Bob, they
// // become inputs in the new transaction // // become inputs in the new transaction
// let (in1, in2) = (input(4, ZERO_HASH, key_id1), input(3, ZERO_HASH, key_id2)); // let (in1, in2) = (input(4, ZERO_HASH, key_id1), input(3, ZERO_HASH,
// // key_id2));
//
// // Alice builds her transaction, with change, which also produces the sum // // Alice builds her transaction, with change, which also produces the sum
// // of blinding factors before they're obscured. // // of blinding factors before they're obscured.
// let (tx, sum) = build::partial_transaction( // let (tx, sum) = build::partial_transaction(
@ -436,21 +434,21 @@ mod test {
// with_fee(2)], // with_fee(2)],
// &keychain, // &keychain,
// ).unwrap(); // ).unwrap();
// //
// (tx, sum) // (tx, sum)
// }; // };
// //
// let blind = blind_sum.secret_key(&keychain.secp())?; // let blind = blind_sum.secret_key(&keychain.secp())?;
// keychain.aggsig_create_context(blind); // keychain.aggsig_create_context(blind);
// let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(); // let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys();
// //
// let sig_part = keychain.aggsig_calculate_partial_sig( // let sig_part = keychain.aggsig_calculate_partial_sig(
// &pub_nonce, // &pub_nonce,
// tx.fee(), // tx.fee(),
// tx.lock_height(), // tx.lock_height(),
// ).unwrap(); // ).unwrap();
// //
// //
// // From now on, Bob only has the obscured transaction and the sum of // // From now on, Bob only has the obscured transaction and the sum of
// // blinding factors. He adds his output, finalizes the transaction so it's // // blinding factors. He adds his output, finalizes the transaction so it's
// // ready for broadcast. // // ready for broadcast.
@ -462,9 +460,9 @@ mod test {
// ], // ],
// &keychain, // &keychain,
// ).unwrap(); // ).unwrap();
// //
// tx_final.validate().unwrap(); // tx_final.validate().unwrap();
// //
// } // }
/// Simulate the standard exchange between 2 parties when creating a basic /// Simulate the standard exchange between 2 parties when creating a basic
@ -485,8 +483,7 @@ mod test {
// Alice builds her transaction, with change, which also produces the sum // Alice builds her transaction, with change, which also produces the sum
// of blinding factors before they're obscured. // of blinding factors before they're obscured.
let (tx, sum) = build::partial_transaction( let (tx, sum) = build::partial_transaction(
vec![in1, in2, output(1, key_id3), vec![in1, in2, output(1, key_id3), with_fee(2)],
with_fee(2)],
&keychain, &keychain,
).unwrap(); ).unwrap();
@ -567,8 +564,8 @@ mod test {
let key_id2 = keychain.derive_key_id(2).unwrap(); let key_id2 = keychain.derive_key_id(2).unwrap();
let key_id3 = keychain.derive_key_id(3).unwrap(); let key_id3 = keychain.derive_key_id(3).unwrap();
// first check we can add a timelocked tx where lock height matches current block height // first check we can add a timelocked tx where lock height matches current
// and that the resulting block is valid // block height and that the resulting block is valid
let tx1 = build::transaction( let tx1 = build::transaction(
vec![ vec![
input(5, key_id1.clone()), input(5, key_id1.clone()),
@ -588,7 +585,8 @@ mod test {
).unwrap(); ).unwrap();
b.validate().unwrap(); b.validate().unwrap();
// now try adding a timelocked tx where lock height is greater than current block height // now try adding a timelocked tx where lock height is greater than current
// block height
let tx1 = build::transaction( let tx1 = build::transaction(
vec![ vec![
input(5, key_id1.clone()), input(5, key_id1.clone()),

View file

@ -30,10 +30,10 @@
//! binary operations, they're extremely fast. For more information, see the //! binary operations, they're extremely fast. For more information, see the
//! doc on bintree_jump_left_sibling. //! doc on bintree_jump_left_sibling.
//! 2. The implementation of a prunable MMR tree using the above. Each leaf //! 2. The implementation of a prunable MMR tree using the above. Each leaf
//! is required to be Writeable (which implements Hashed). Tree roots can be trivially and //! is required to be Writeable (which implements Hashed). Tree roots can be
//! efficiently calculated without materializing the full tree. The underlying //! trivially and efficiently calculated without materializing the full tree.
//! Hashes are stored in a Backend implementation that can either be //! The underlying Hashes are stored in a Backend implementation that can
//! a simple Vec or a database. //! either be a simple Vec or a database.
use std::clone::Clone; use std::clone::Clone;
use std::marker::PhantomData; use std::marker::PhantomData;
@ -48,8 +48,10 @@ use util::LOGGER;
/// The PMMR itself does not need the Backend to be accurate on the existence /// The PMMR itself does not need the Backend to be accurate on the existence
/// of an element (i.e. remove could be a no-op) but layers above can /// of an element (i.e. remove could be a no-op) but layers above can
/// depend on an accurate Backend to check existence. /// depend on an accurate Backend to check existence.
pub trait Backend<T> where pub trait Backend<T>
T:PMMRable { where
T: PMMRable,
{
/// Append the provided Hashes to the backend storage, and optionally an associated /// Append the provided Hashes to the backend storage, and optionally an associated
/// data element to flatfile storage (for leaf nodes only). The position of the /// data element to flatfile storage (for leaf nodes only). The position of the
/// first element of the Vec in the MMR is provided to help the implementation. /// first element of the Vec in the MMR is provided to help the implementation.
@ -65,7 +67,8 @@ pub trait Backend<T> where
/// also return the associated data element /// also return the associated data element
fn get(&self, position: u64, include_data: bool) -> Option<(Hash, Option<T>)>; fn get(&self, position: u64, include_data: bool) -> Option<(Hash, Option<T>)>;
/// Get a Hash/Element by original insertion position (ignoring the remove list). /// Get a Hash/Element by original insertion position (ignoring the remove
/// list).
fn get_from_file(&self, position: u64) -> Option<Hash>; fn get_from_file(&self, position: u64) -> Option<Hash>;
/// Remove HashSums by insertion position. An index is also provided so the /// Remove HashSums by insertion position. An index is also provided so the
@ -97,7 +100,8 @@ pub struct MerkleProof {
pub peaks: Vec<Hash>, pub peaks: Vec<Hash>,
/// The siblings along the path of the tree as we traverse from node to peak /// The siblings along the path of the tree as we traverse from node to peak
pub path: Vec<Hash>, pub path: Vec<Hash>,
/// Order of siblings (left vs right) matters, so track this here for each path element /// Order of siblings (left vs right) matters, so track this here for each
/// path element
pub left_right: Vec<bool>, pub left_right: Vec<bool>,
} }
@ -108,7 +112,6 @@ impl Writeable for MerkleProof {
[write_fixed_bytes, &self.root], [write_fixed_bytes, &self.root],
[write_fixed_bytes, &self.node], [write_fixed_bytes, &self.node],
[write_u64, self.peaks.len() as u64], [write_u64, self.peaks.len() as u64],
// note: path length used for both path and left_right vecs // note: path length used for both path and left_right vecs
[write_u64, self.path.len() as u64] [write_u64, self.path.len() as u64]
); );
@ -134,8 +137,7 @@ impl Readable for MerkleProof {
let root = Hash::read(reader)?; let root = Hash::read(reader)?;
let node = Hash::read(reader)?; let node = Hash::read(reader)?;
let (peaks_len, path_len) = let (peaks_len, path_len) = ser_multiread!(reader, read_u64, read_u64);
ser_multiread!(reader, read_u64, read_u64);
let mut peaks = Vec::with_capacity(peaks_len as usize); let mut peaks = Vec::with_capacity(peaks_len as usize);
for _ in 0..peaks_len { for _ in 0..peaks_len {
@ -148,15 +150,13 @@ impl Readable for MerkleProof {
let left_right_bytes = reader.read_fixed_bytes(path_len as usize)?; let left_right_bytes = reader.read_fixed_bytes(path_len as usize)?;
let left_right = left_right_bytes.iter().map(|&x| x == 1).collect(); let left_right = left_right_bytes.iter().map(|&x| x == 1).collect();
Ok( Ok(MerkleProof {
MerkleProof { root,
root, node,
node, peaks,
peaks, path,
path, left_right,
left_right, })
}
)
} }
} }
@ -223,7 +223,8 @@ impl MerkleProof {
let sibling = path.remove(0); let sibling = path.remove(0);
let mut left_right = self.left_right.clone(); let mut left_right = self.left_right.clone();
// hash our node and sibling together (noting left/right position of the sibling) // hash our node and sibling together (noting left/right position of the
// sibling)
let parent = if left_right.remove(0) { let parent = if left_right.remove(0) {
self.node.hash_with(sibling) self.node.hash_with(sibling)
} else { } else {
@ -242,7 +243,6 @@ impl MerkleProof {
} }
} }
/// Prunable Merkle Mountain Range implementation. All positions within the tree /// Prunable Merkle Mountain Range implementation. All positions within the tree
/// start at 1 as they're postorder tree traversal positions rather than array /// start at 1 as they're postorder tree traversal positions rather than array
/// indices. /// indices.
@ -290,7 +290,8 @@ where
/// tree and "bags" them to get a single peak. /// tree and "bags" them to get a single peak.
pub fn root(&self) -> Hash { pub fn root(&self) -> Hash {
let peaks_pos = peaks(self.last_pos); let peaks_pos = peaks(self.last_pos);
let peaks: Vec<Option<(Hash, Option<T>)>> = peaks_pos.into_iter() let peaks: Vec<Option<(Hash, Option<T>)>> = peaks_pos
.into_iter()
.map(|pi| self.backend.get(pi, false)) .map(|pi| self.backend.get(pi, false))
.collect(); .collect();
@ -307,7 +308,10 @@ where
/// Build a Merkle proof for the element at the given position in the MMR /// Build a Merkle proof for the element at the given position in the MMR
pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> { pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> {
debug!(LOGGER, "merkle_proof (via rewind) - {}, last_pos {}", pos, self.last_pos); debug!(
LOGGER,
"merkle_proof (via rewind) - {}, last_pos {}", pos, self.last_pos
);
if !is_leaf(pos) { if !is_leaf(pos) {
return Err(format!("not a leaf at pos {}", pos)); return Err(format!("not a leaf at pos {}", pos));
@ -320,10 +324,7 @@ where
.0; .0;
let family_branch = family_branch(pos, self.last_pos); let family_branch = family_branch(pos, self.last_pos);
let left_right = family_branch let left_right = family_branch.iter().map(|x| x.2).collect::<Vec<_>>();
.iter()
.map(|x| x.2)
.collect::<Vec<_>>();
let path = family_branch let path = family_branch
.iter() .iter()
@ -370,9 +371,9 @@ where
// creation of another parent. // creation of another parent.
while bintree_postorder_height(pos + 1) > height { while bintree_postorder_height(pos + 1) > height {
let left_sibling = bintree_jump_left_sibling(pos); let left_sibling = bintree_jump_left_sibling(pos);
let left_elem = self.backend.get(left_sibling, false).expect( let left_elem = self.backend
"missing left sibling in tree, should not have been pruned", .get(left_sibling, false)
); .expect("missing left sibling in tree, should not have been pruned");
current_hash = left_elem.0 + current_hash; current_hash = left_elem.0 + current_hash;
to_append.push((current_hash.clone(), None)); to_append.push((current_hash.clone(), None));
@ -498,16 +499,18 @@ where
if bintree_postorder_height(n) > 0 { if bintree_postorder_height(n) > 0 {
if let Some(hs) = self.get(n, false) { if let Some(hs) = self.get(n, false) {
// take the left and right children, if they exist // take the left and right children, if they exist
let left_pos = bintree_move_down_left(n) let left_pos = bintree_move_down_left(n).ok_or(format!("left_pos not found"))?;
.ok_or(format!("left_pos not found"))?;
let right_pos = bintree_jump_right_sibling(left_pos); let right_pos = bintree_jump_right_sibling(left_pos);
if let Some(left_child_hs) = self.get(left_pos, false) { if let Some(left_child_hs) = self.get(left_pos, false) {
if let Some(right_child_hs) = self.get(right_pos, false) { if let Some(right_child_hs) = self.get(right_pos, false) {
// add hashes and compare // add hashes and compare
if left_child_hs.0+right_child_hs.0 != hs.0 { if left_child_hs.0 + right_child_hs.0 != hs.0 {
return Err(format!("Invalid MMR, hash of parent at {} does \ return Err(format!(
not match children.", n)); "Invalid MMR, hash of parent at {} does \
not match children.",
n
));
} }
} }
} }
@ -575,7 +578,9 @@ pub struct PruneList {
impl PruneList { impl PruneList {
/// Instantiate a new empty prune list /// Instantiate a new empty prune list
pub fn new() -> PruneList { pub fn new() -> PruneList {
PruneList { pruned_nodes: vec![] } PruneList {
pruned_nodes: vec![],
}
} }
/// Computes by how many positions a node at pos should be shifted given the /// Computes by how many positions a node at pos should be shifted given the
@ -602,7 +607,6 @@ impl PruneList {
/// given leaf. Helpful if, for instance, data for each leaf is being stored /// given leaf. Helpful if, for instance, data for each leaf is being stored
/// separately in a continuous flat-file /// separately in a continuous flat-file
pub fn get_leaf_shift(&self, pos: u64) -> Option<u64> { pub fn get_leaf_shift(&self, pos: u64) -> Option<u64> {
// get the position where the node at pos would fit in the pruned list, if // get the position where the node at pos would fit in the pruned list, if
// it's already pruned, nothing to skip // it's already pruned, nothing to skip
match self.pruned_pos(pos) { match self.pruned_pos(pos) {
@ -716,12 +720,13 @@ pub fn peaks(num: u64) -> Vec<u64> {
/// The number of leaves nodes in a MMR of the provided size. Uses peaks to /// The number of leaves nodes in a MMR of the provided size. Uses peaks to
/// get the positions of all full binary trees and uses the height of these /// get the positions of all full binary trees and uses the height of these
pub fn n_leaves(mut sz: u64) -> u64 { pub fn n_leaves(mut sz: u64) -> u64 {
while bintree_postorder_height(sz+1) > 0 { while bintree_postorder_height(sz + 1) > 0 {
sz += 1; sz += 1;
} }
peaks(sz).iter().map(|n| { peaks(sz)
(1 << bintree_postorder_height(*n)) as u64 .iter()
}).sum() .map(|n| (1 << bintree_postorder_height(*n)) as u64)
.sum()
} }
/// The height of a node in a full binary tree from its postorder traversal /// The height of a node in a full binary tree from its postorder traversal
@ -909,23 +914,26 @@ fn most_significant_pos(num: u64) -> u64 {
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use super::*; use super::*;
use ser::{Writeable, Readable, Error}; use ser::{Error, Readable, Writeable};
use core::{Writer, Reader}; use core::{Reader, Writer};
use core::hash::{Hash}; use core::hash::Hash;
/// Simple MMR backend implementation based on a Vector. Pruning does not /// Simple MMR backend implementation based on a Vector. Pruning does not
/// compact the Vec itself. /// compact the Vec itself.
#[derive(Clone)] #[derive(Clone)]
pub struct VecBackend<T> pub struct VecBackend<T>
where T:PMMRable { where
T: PMMRable,
{
/// Backend elements /// Backend elements
pub elems: Vec<Option<(Hash, Option<T>)>>, pub elems: Vec<Option<(Hash, Option<T>)>>,
/// Positions of removed elements /// Positions of removed elements
pub remove_list: Vec<u64>, pub remove_list: Vec<u64>,
} }
impl <T> Backend <T> for VecBackend<T> impl<T> Backend<T> for VecBackend<T>
where T: PMMRable where
T: PMMRable,
{ {
fn append(&mut self, _position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String> { fn append(&mut self, _position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String> {
self.elems.append(&mut map_vec!(data, |d| Some(d.clone()))); self.elems.append(&mut map_vec!(data, |d| Some(d.clone())));
@ -965,8 +973,9 @@ mod test {
} }
} }
impl <T> VecBackend <T> impl<T> VecBackend<T>
where T:PMMRable where
T: PMMRable,
{ {
/// Instantiates a new VecBackend<T> /// Instantiates a new VecBackend<T>
pub fn new() -> VecBackend<T> { pub fn new() -> VecBackend<T> {
@ -990,14 +999,13 @@ mod test {
} }
#[test] #[test]
fn test_leaf_index(){ fn test_leaf_index() {
assert_eq!(n_leaves(1),1); assert_eq!(n_leaves(1), 1);
assert_eq!(n_leaves(2),2); assert_eq!(n_leaves(2), 2);
assert_eq!(n_leaves(4),3); assert_eq!(n_leaves(4), 3);
assert_eq!(n_leaves(5),4); assert_eq!(n_leaves(5), 4);
assert_eq!(n_leaves(8),5); assert_eq!(n_leaves(8), 5);
assert_eq!(n_leaves(9),6); assert_eq!(n_leaves(9), 6);
} }
#[test] #[test]
@ -1044,7 +1052,8 @@ mod test {
#[test] #[test]
fn various_n_leaves() { fn various_n_leaves() {
assert_eq!(n_leaves(1), 1); assert_eq!(n_leaves(1), 1);
// 2 is not a valid size for a tree, but n_leaves rounds up to next valid tree size // 2 is not a valid size for a tree, but n_leaves rounds up to next valid tree
// size
assert_eq!(n_leaves(2), 2); assert_eq!(n_leaves(2), 2);
assert_eq!(n_leaves(3), 2); assert_eq!(n_leaves(3), 2);
assert_eq!(n_leaves(7), 4); assert_eq!(n_leaves(7), 4);
@ -1076,7 +1085,8 @@ mod test {
// leaf node in a larger tree of 7 nodes (height 2) // leaf node in a larger tree of 7 nodes (height 2)
assert_eq!(family_branch(1, 7), [(3, 2, true), (7, 6, true)]); assert_eq!(family_branch(1, 7), [(3, 2, true), (7, 6, true)]);
// note these only go as far up as the local peak, not necessarily the single root // note these only go as far up as the local peak, not necessarily the single
// root
assert_eq!(family_branch(1, 4), [(3, 2, true)]); assert_eq!(family_branch(1, 4), [(3, 2, true)]);
// pos 4 in a tree of size 4 is a local peak // pos 4 in a tree of size 4 is a local peak
assert_eq!(family_branch(4, 4), []); assert_eq!(family_branch(4, 4), []);
@ -1089,9 +1099,10 @@ mod test {
// ok now for a more realistic one, a tree with over a million nodes in it // ok now for a more realistic one, a tree with over a million nodes in it
// find the "family path" back up the tree from a leaf node at 0 // find the "family path" back up the tree from a leaf node at 0
// Note: the first two entries in the branch are consistent with a small 7 node tree // Note: the first two entries in the branch are consistent with a small 7 node
// Note: each sibling is on the left branch, this is an example of the largest possible // tree Note: each sibling is on the left branch, this is an example of the
// list of peaks before we start combining them into larger peaks. // largest possible list of peaks before we start combining them into larger
// peaks.
assert_eq!( assert_eq!(
family_branch(1, 1_049_000), family_branch(1, 1_049_000),
[ [
@ -1139,34 +1150,19 @@ mod test {
assert_eq!(peaks(42), [31, 38, 41, 42]); assert_eq!(peaks(42), [31, 38, 41, 42]);
// large realistic example with almost 1.5 million nodes // large realistic example with almost 1.5 million nodes
// note the distance between peaks decreases toward the right (trees get smaller) // note the distance between peaks decreases toward the right (trees get
// smaller)
assert_eq!( assert_eq!(
peaks(1048555), peaks(1048555),
[ [
524287, 524287, 786430, 917501, 983036, 1015803, 1032186, 1040377, 1044472, 1046519,
786430, 1047542, 1048053, 1048308, 1048435, 1048498, 1048529, 1048544, 1048551, 1048554,
917501,
983036,
1015803,
1032186,
1040377,
1044472,
1046519,
1047542,
1048053,
1048308,
1048435,
1048498,
1048529,
1048544,
1048551,
1048554,
1048555, 1048555,
], ],
); );
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq)] #[derive(Copy, Clone, Debug, PartialEq, Eq)]
struct TestElem([u32; 4]); struct TestElem([u32; 4]);
impl PMMRable for TestElem { impl PMMRable for TestElem {
@ -1186,14 +1182,12 @@ mod test {
impl Readable for TestElem { impl Readable for TestElem {
fn read(reader: &mut Reader) -> Result<TestElem, Error> { fn read(reader: &mut Reader) -> Result<TestElem, Error> {
Ok(TestElem ( Ok(TestElem([
[ reader.read_u32()?,
reader.read_u32()?, reader.read_u32()?,
reader.read_u32()?, reader.read_u32()?,
reader.read_u32()?, reader.read_u32()?,
reader.read_u32()?, ]))
]
))
} }
} }
@ -1237,7 +1231,10 @@ mod test {
assert!(proof2.verify()); assert!(proof2.verify());
// check that we cannot generate a merkle proof for pos 3 (not a leaf node) // check that we cannot generate a merkle proof for pos 3 (not a leaf node)
assert_eq!(pmmr.merkle_proof(3).err(), Some(format!("not a leaf at pos 3"))); assert_eq!(
pmmr.merkle_proof(3).err(),
Some(format!("not a leaf at pos 3"))
);
let proof4 = pmmr.merkle_proof(4).unwrap(); let proof4 = pmmr.merkle_proof(4).unwrap();
assert_eq!(proof4.peaks.len(), 2); assert_eq!(proof4.peaks.len(), 2);
@ -1309,10 +1306,7 @@ mod test {
// one element // one element
pmmr.push(elems[0]).unwrap(); pmmr.push(elems[0]).unwrap();
let node_hash = elems[0].hash(); let node_hash = elems[0].hash();
assert_eq!( assert_eq!(pmmr.root(), node_hash,);
pmmr.root(),
node_hash,
);
assert_eq!(pmmr.unpruned_size(), 1); assert_eq!(pmmr.unpruned_size(), 1);
pmmr.dump(false); pmmr.dump(false);
@ -1347,8 +1341,7 @@ mod test {
// six elements // six elements
pmmr.push(elems[5]).unwrap(); pmmr.push(elems[5]).unwrap();
let sum6 = sum4 + let sum6 = sum4 + (elems[4].hash() + elems[5].hash());
(elems[4].hash() + elems[5].hash());
assert_eq!(pmmr.root(), sum6.clone()); assert_eq!(pmmr.root(), sum6.clone());
assert_eq!(pmmr.unpruned_size(), 10); assert_eq!(pmmr.unpruned_size(), 10);
@ -1360,9 +1353,8 @@ mod test {
// eight elements // eight elements
pmmr.push(elems[7]).unwrap(); pmmr.push(elems[7]).unwrap();
let sum8 = sum4 + let sum8 =
((elems[4].hash() + elems[5].hash()) + sum4 + ((elems[4].hash() + elems[5].hash()) + (elems[6].hash() + elems[7].hash()));
(elems[6].hash() + elems[7].hash()));
assert_eq!(pmmr.root(), sum8); assert_eq!(pmmr.root(), sum8);
assert_eq!(pmmr.unpruned_size(), 15); assert_eq!(pmmr.unpruned_size(), 15);
@ -1411,9 +1403,7 @@ mod test {
pmmr.push(elems[3]).unwrap(); pmmr.push(elems[3]).unwrap();
let res = pmmr.get_last_n_insertions(19); let res = pmmr.get_last_n_insertions(19);
assert!( assert!(res.len() == 4);
res.len() == 4
);
pmmr.push(elems[5]).unwrap(); pmmr.push(elems[5]).unwrap();
pmmr.push(elems[6]).unwrap(); pmmr.push(elems[6]).unwrap();
@ -1421,9 +1411,7 @@ mod test {
pmmr.push(elems[8]).unwrap(); pmmr.push(elems[8]).unwrap();
let res = pmmr.get_last_n_insertions(7); let res = pmmr.get_last_n_insertions(7);
assert!( assert!(res.len() == 7);
res.len() == 7
);
} }
#[test] #[test]
@ -1455,7 +1443,7 @@ mod test {
// pruning a leaf with no parent should do nothing // pruning a leaf with no parent should do nothing
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
pmmr.prune(16, 0).unwrap(); pmmr.prune(16, 0).unwrap();
assert_eq!(orig_root, pmmr.root()); assert_eq!(orig_root, pmmr.root());
} }
@ -1463,14 +1451,14 @@ mod test {
// pruning leaves with no shared parent just removes 1 element // pruning leaves with no shared parent just removes 1 element
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
pmmr.prune(2, 0).unwrap(); pmmr.prune(2, 0).unwrap();
assert_eq!(orig_root, pmmr.root()); assert_eq!(orig_root, pmmr.root());
} }
assert_eq!(ba.used_size(), 15); assert_eq!(ba.used_size(), 15);
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
pmmr.prune(4, 0).unwrap(); pmmr.prune(4, 0).unwrap();
assert_eq!(orig_root, pmmr.root()); assert_eq!(orig_root, pmmr.root());
} }
@ -1478,7 +1466,7 @@ mod test {
// pruning a non-leaf node has no effect // pruning a non-leaf node has no effect
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
pmmr.prune(3, 0).unwrap_err(); pmmr.prune(3, 0).unwrap_err();
assert_eq!(orig_root, pmmr.root()); assert_eq!(orig_root, pmmr.root());
} }
@ -1486,7 +1474,7 @@ mod test {
// pruning sibling removes subtree // pruning sibling removes subtree
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
pmmr.prune(5, 0).unwrap(); pmmr.prune(5, 0).unwrap();
assert_eq!(orig_root, pmmr.root()); assert_eq!(orig_root, pmmr.root());
} }
@ -1494,7 +1482,7 @@ mod test {
// pruning all leaves under level >1 removes all subtree // pruning all leaves under level >1 removes all subtree
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
pmmr.prune(1, 0).unwrap(); pmmr.prune(1, 0).unwrap();
assert_eq!(orig_root, pmmr.root()); assert_eq!(orig_root, pmmr.root());
} }
@ -1502,7 +1490,7 @@ mod test {
// pruning everything should only leave us the peaks // pruning everything should only leave us the peaks
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
for n in 1..16 { for n in 1..16 {
let _ = pmmr.prune(n, 0); let _ = pmmr.prune(n, 0);
} }
@ -1550,7 +1538,6 @@ mod test {
assert_eq!(pl.get_shift(17), Some(11)); assert_eq!(pl.get_shift(17), Some(11));
} }
#[test] #[test]
fn n_size_check() { fn n_size_check() {
assert_eq!(n_leaves(1), 1); assert_eq!(n_leaves(1), 1);

View file

@ -29,7 +29,6 @@ use core::hash::Hash;
use ser::{self, Readable, Reader, Writeable, Writer}; use ser::{self, Readable, Reader, Writeable, Writer};
use core::global; use core::global;
/// The difficulty is defined as the maximum target divided by the block hash. /// The difficulty is defined as the maximum target divided by the block hash.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] #[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
pub struct Difficulty { pub struct Difficulty {
@ -63,7 +62,9 @@ impl Difficulty {
let mut in_vec = h.to_vec(); let mut in_vec = h.to_vec();
in_vec.truncate(8); in_vec.truncate(8);
let num = BigEndian::read_u64(&in_vec); let num = BigEndian::read_u64(&in_vec);
Difficulty { num: max_target / num } Difficulty {
num: max_target / num,
}
} }
/// Converts the difficulty into a u64 /// Converts the difficulty into a u64
@ -81,28 +82,36 @@ impl fmt::Display for Difficulty {
impl Add<Difficulty> for Difficulty { impl Add<Difficulty> for Difficulty {
type Output = Difficulty; type Output = Difficulty;
fn add(self, other: Difficulty) -> Difficulty { fn add(self, other: Difficulty) -> Difficulty {
Difficulty { num: self.num + other.num } Difficulty {
num: self.num + other.num,
}
} }
} }
impl Sub<Difficulty> for Difficulty { impl Sub<Difficulty> for Difficulty {
type Output = Difficulty; type Output = Difficulty;
fn sub(self, other: Difficulty) -> Difficulty { fn sub(self, other: Difficulty) -> Difficulty {
Difficulty { num: self.num - other.num } Difficulty {
num: self.num - other.num,
}
} }
} }
impl Mul<Difficulty> for Difficulty { impl Mul<Difficulty> for Difficulty {
type Output = Difficulty; type Output = Difficulty;
fn mul(self, other: Difficulty) -> Difficulty { fn mul(self, other: Difficulty) -> Difficulty {
Difficulty { num: self.num * other.num } Difficulty {
num: self.num * other.num,
}
} }
} }
impl Div<Difficulty> for Difficulty { impl Div<Difficulty> for Difficulty {
type Output = Difficulty; type Output = Difficulty;
fn div(self, other: Difficulty) -> Difficulty { fn div(self, other: Difficulty) -> Difficulty {
Difficulty { num: self.num / other.num } Difficulty {
num: self.num / other.num,
}
} }
} }
@ -157,7 +166,9 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
&"a value number", &"a value number",
)); ));
}; };
Ok(Difficulty { num: num_in.unwrap() }) Ok(Difficulty {
num: num_in.unwrap(),
})
} }
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E> fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>

View file

@ -15,9 +15,9 @@
//! Transactions //! Transactions
use blake2::blake2b::blake2b; use blake2::blake2b::blake2b;
use util::secp::{self, Message, Signature}; use util::secp::{self, Message, Signature};
use util::{static_secp_instance, kernel_sig_msg}; use util::{kernel_sig_msg, static_secp_instance};
use util::secp::pedersen::{Commitment, RangeProof, ProofMessage}; use util::secp::pedersen::{Commitment, ProofMessage, RangeProof};
use std::cmp::{min, max}; use std::cmp::{max, min};
use std::cmp::Ordering; use std::cmp::Ordering;
use std::{error, fmt}; use std::{error, fmt};
@ -29,8 +29,9 @@ use core::BlockHeader;
use core::hash::{Hash, Hashed, ZERO_HASH}; use core::hash::{Hash, Hashed, ZERO_HASH};
use core::pmmr::MerkleProof; use core::pmmr::MerkleProof;
use keychain; use keychain;
use keychain::{Identifier, Keychain, BlindingFactor}; use keychain::{BlindingFactor, Identifier, Keychain};
use ser::{self, read_and_verify_sorted, PMMRable, Readable, Reader, Writeable, WriteableSorted, Writer, ser_vec}; use ser::{self, read_and_verify_sorted, ser_vec, PMMRable, Readable, Reader, Writeable,
WriteableSorted, Writer};
use std::io::Cursor; use std::io::Cursor;
use util; use util;
use util::LOGGER; use util::LOGGER;
@ -38,7 +39,8 @@ use util::LOGGER;
/// The size of the blake2 hash of a switch commitment (256 bits) /// The size of the blake2 hash of a switch commitment (256 bits)
pub const SWITCH_COMMIT_HASH_SIZE: usize = 32; pub const SWITCH_COMMIT_HASH_SIZE: usize = 32;
/// The size of the secret key used in to generate blake2 switch commitment hash (256 bits) /// The size of the secret key used in to generate blake2 switch commitment
/// hash (256 bits)
pub const SWITCH_COMMIT_KEY_SIZE: usize = 32; pub const SWITCH_COMMIT_KEY_SIZE: usize = 32;
bitflags! { bitflags! {
@ -81,7 +83,8 @@ pub enum Error {
OddFee, OddFee,
/// Kernel fee can't be odd, due to half fee burning /// Kernel fee can't be odd, due to half fee burning
OddKernelFee, OddKernelFee,
/// Underlying Secp256k1 error (signature validation or invalid public key typically) /// Underlying Secp256k1 error (signature validation or invalid public key
/// typically)
Secp(secp::Error), Secp(secp::Error),
/// Underlying keychain related error /// Underlying keychain related error
Keychain(keychain::Error), Keychain(keychain::Error),
@ -100,7 +103,8 @@ pub enum Error {
RangeProof, RangeProof,
/// Error originating from an invalid Merkle proof /// Error originating from an invalid Merkle proof
MerkleProof, MerkleProof,
/// Error originating from an input attempting to spend an immature coinbase output /// Error originating from an input attempting to spend an immature
/// coinbase output
ImmatureCoinbase, ImmatureCoinbase,
} }
@ -138,7 +142,6 @@ impl From<keychain::Error> for Error {
} }
} }
/// A proof that a transaction sums to zero. Includes both the transaction's /// A proof that a transaction sums to zero. Includes both the transaction's
/// Pedersen commitment and the signature, that guarantees that the commitments /// Pedersen commitment and the signature, that guarantees that the commitments
/// amount to zero. /// amount to zero.
@ -164,7 +167,8 @@ pub struct TxKernel {
hashable_ord!(TxKernel); hashable_ord!(TxKernel);
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash implementations? /// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash
/// implementations?
impl ::std::hash::Hash for TxKernel { impl ::std::hash::Hash for TxKernel {
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) { fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
let mut vec = Vec::new(); let mut vec = Vec::new();
@ -189,9 +193,8 @@ impl Writeable for TxKernel {
impl Readable for TxKernel { impl Readable for TxKernel {
fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> { fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> {
let features = KernelFeatures::from_bits(reader.read_u8()?).ok_or( let features =
ser::Error::CorruptedData, KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
)?;
Ok(TxKernel { Ok(TxKernel {
features: features, features: features,
fee: reader.read_u64()?, fee: reader.read_u64()?,
@ -246,8 +249,7 @@ impl TxKernel {
impl PMMRable for TxKernel { impl PMMRable for TxKernel {
fn len() -> usize { fn len() -> usize {
17 + // features plus fee and lock_height 17 + // features plus fee and lock_height
secp::constants::PEDERSEN_COMMITMENT_SIZE + secp::constants::PEDERSEN_COMMITMENT_SIZE + secp::constants::AGG_SIGNATURE_SIZE
secp::constants::AGG_SIGNATURE_SIZE
} }
} }
@ -277,7 +279,8 @@ impl Writeable for Transaction {
[write_u64, self.kernels.len() as u64] [write_u64, self.kernels.len() as u64]
); );
// Consensus rule that everything is sorted in lexicographical order on the wire. // Consensus rule that everything is sorted in lexicographical order on the
// wire.
let mut inputs = self.inputs.clone(); let mut inputs = self.inputs.clone();
let mut outputs = self.outputs.clone(); let mut outputs = self.outputs.clone();
let mut kernels = self.kernels.clone(); let mut kernels = self.kernels.clone();
@ -344,11 +347,7 @@ impl Transaction {
/// Creates a new transaction initialized with /// Creates a new transaction initialized with
/// the provided inputs, outputs, kernels /// the provided inputs, outputs, kernels
pub fn new( pub fn new(inputs: Vec<Input>, outputs: Vec<Output>, kernels: Vec<TxKernel>) -> Transaction {
inputs: Vec<Input>,
outputs: Vec<Output>,
kernels: Vec<TxKernel>,
) -> Transaction {
Transaction { Transaction {
offset: BlindingFactor::zero(), offset: BlindingFactor::zero(),
inputs: inputs, inputs: inputs,
@ -397,7 +396,9 @@ impl Transaction {
/// Lock height of a transaction is the max lock height of the kernels. /// Lock height of a transaction is the max lock height of the kernels.
pub fn lock_height(&self) -> u64 { pub fn lock_height(&self) -> u64 {
self.kernels.iter().fold(0, |acc, ref x| max(acc, x.lock_height)) self.kernels
.iter()
.fold(0, |acc, ref x| max(acc, x.lock_height))
} }
/// To verify transaction kernels we check that - /// To verify transaction kernels we check that -
@ -419,10 +420,7 @@ impl Transaction {
// sum all kernels commitments // sum all kernels commitments
let kernel_sum = { let kernel_sum = {
let mut kernel_commits = self.kernels let mut kernel_commits = self.kernels.iter().map(|x| x.excess).collect::<Vec<_>>();
.iter()
.map(|x| x.excess)
.collect::<Vec<_>>();
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock().unwrap();
@ -508,7 +506,7 @@ impl Transaction {
/// But also information required to verify coinbase maturity through /// But also information required to verify coinbase maturity through
/// the lock_height hashed in the switch_commit_hash. /// the lock_height hashed in the switch_commit_hash.
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Input{ pub struct Input {
/// The features of the output being spent. /// The features of the output being spent.
/// We will check maturity for coinbase output. /// We will check maturity for coinbase output.
pub features: OutputFeatures, pub features: OutputFeatures,
@ -524,7 +522,8 @@ pub struct Input{
hashable_ord!(Input); hashable_ord!(Input);
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash implementations? /// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash
/// implementations?
impl ::std::hash::Hash for Input { impl ::std::hash::Hash for Input {
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) { fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
let mut vec = Vec::new(); let mut vec = Vec::new();
@ -558,28 +557,17 @@ impl Writeable for Input {
/// an Input from a binary stream. /// an Input from a binary stream.
impl Readable for Input { impl Readable for Input {
fn read(reader: &mut Reader) -> Result<Input, ser::Error> { fn read(reader: &mut Reader) -> Result<Input, ser::Error> {
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or( let features =
ser::Error::CorruptedData, OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
)?;
let commit = Commitment::read(reader)?; let commit = Commitment::read(reader)?;
if features.contains(OutputFeatures::COINBASE_OUTPUT) { if features.contains(OutputFeatures::COINBASE_OUTPUT) {
let block_hash = Some(Hash::read(reader)?); let block_hash = Some(Hash::read(reader)?);
let merkle_proof = Some(MerkleProof::read(reader)?); let merkle_proof = Some(MerkleProof::read(reader)?);
Ok(Input::new( Ok(Input::new(features, commit, block_hash, merkle_proof))
features,
commit,
block_hash,
merkle_proof,
))
} else { } else {
Ok(Input::new( Ok(Input::new(features, commit, None, None))
features,
commit,
None,
None,
))
} }
} }
} }
@ -589,7 +577,8 @@ impl Readable for Input {
/// Input must also provide the original output features and the hash of the block /// Input must also provide the original output features and the hash of the block
/// the output originated from. /// the output originated from.
impl Input { impl Input {
/// Build a new input from the data required to identify and verify an output being spent. /// Build a new input from the data required to identify and verify an
/// output being spent.
pub fn new( pub fn new(
features: OutputFeatures, features: OutputFeatures,
commit: Commitment, commit: Commitment,
@ -701,7 +690,7 @@ bitflags! {
/// Definition of the switch commitment hash /// Definition of the switch commitment hash
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)] #[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
pub struct SwitchCommitHashKey ([u8; SWITCH_COMMIT_KEY_SIZE]); pub struct SwitchCommitHashKey([u8; SWITCH_COMMIT_KEY_SIZE]);
impl SwitchCommitHashKey { impl SwitchCommitHashKey {
/// We use a zero value key for regular transactions. /// We use a zero value key for regular transactions.
@ -712,14 +701,18 @@ impl SwitchCommitHashKey {
/// Generate a switch commit hash key from the provided keychain and key id. /// Generate a switch commit hash key from the provided keychain and key id.
pub fn from_keychain(keychain: &Keychain, key_id: &Identifier) -> SwitchCommitHashKey { pub fn from_keychain(keychain: &Keychain, key_id: &Identifier) -> SwitchCommitHashKey {
SwitchCommitHashKey( SwitchCommitHashKey(
keychain.switch_commit_hash_key(key_id) keychain
.expect("failed to derive switch commit hash key") .switch_commit_hash_key(key_id)
.expect("failed to derive switch commit hash key"),
) )
} }
/// Reconstructs a switch commit hash key from a byte slice. /// Reconstructs a switch commit hash key from a byte slice.
pub fn from_bytes(bytes: &[u8]) -> SwitchCommitHashKey { pub fn from_bytes(bytes: &[u8]) -> SwitchCommitHashKey {
assert!(bytes.len() == 32, "switch_commit_hash_key requires 32 bytes"); assert!(
bytes.len() == 32,
"switch_commit_hash_key requires 32 bytes"
);
let mut key = [0; SWITCH_COMMIT_KEY_SIZE]; let mut key = [0; SWITCH_COMMIT_KEY_SIZE];
for i in 0..min(SWITCH_COMMIT_KEY_SIZE, bytes.len()) { for i in 0..min(SWITCH_COMMIT_KEY_SIZE, bytes.len()) {
@ -833,7 +826,8 @@ pub struct Output {
hashable_ord!(Output); hashable_ord!(Output);
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash implementations? /// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash
/// implementations?
impl ::std::hash::Hash for Output { impl ::std::hash::Hash for Output {
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) { fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
let mut vec = Vec::new(); let mut vec = Vec::new();
@ -866,9 +860,8 @@ impl Writeable for Output {
/// an Output from a binary stream. /// an Output from a binary stream.
impl Readable for Output { impl Readable for Output {
fn read(reader: &mut Reader) -> Result<Output, ser::Error> { fn read(reader: &mut Reader) -> Result<Output, ser::Error> {
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or( let features =
ser::Error::CorruptedData, OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
)?;
Ok(Output { Ok(Output {
features: features, features: features,
@ -899,7 +892,12 @@ impl Output {
pub fn verify_proof(&self) -> Result<(), secp::Error> { pub fn verify_proof(&self) -> Result<(), secp::Error> {
let secp = static_secp_instance(); let secp = static_secp_instance();
let secp = secp.lock().unwrap(); let secp = secp.lock().unwrap();
match Keychain::verify_range_proof(&secp, self.commit, self.proof, Some(self.switch_commit_hash.as_ref().to_vec())){ match Keychain::verify_range_proof(
&secp,
self.commit,
self.proof,
Some(self.switch_commit_hash.as_ref().to_vec()),
) {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(e) => Err(e), Err(e) => Err(e),
} }
@ -908,10 +906,16 @@ impl Output {
/// Given the original blinding factor we can recover the /// Given the original blinding factor we can recover the
/// value from the range proof and the commitment /// value from the range proof and the commitment
pub fn recover_value(&self, keychain: &Keychain, key_id: &Identifier) -> Option<u64> { pub fn recover_value(&self, keychain: &Keychain, key_id: &Identifier) -> Option<u64> {
match keychain.rewind_range_proof(key_id, self.commit, Some(self.switch_commit_hash.as_ref().to_vec()), self.proof) { match keychain.rewind_range_proof(
key_id,
self.commit,
Some(self.switch_commit_hash.as_ref().to_vec()),
self.proof,
) {
Ok(proof_info) => { Ok(proof_info) => {
if proof_info.success { if proof_info.success {
let elements = ProofMessageElements::from_proof_message(proof_info.message).unwrap(); let elements =
ProofMessageElements::from_proof_message(proof_info.message).unwrap();
Some(elements.value) Some(elements.value)
} else { } else {
None None
@ -920,7 +924,6 @@ impl Output {
Err(_) => None, Err(_) => None,
} }
} }
} }
/// An output_identifier can be build from either an input _or_ an output and /// An output_identifier can be build from either an input _or_ an output and
@ -980,9 +983,8 @@ impl Writeable for OutputIdentifier {
impl Readable for OutputIdentifier { impl Readable for OutputIdentifier {
fn read(reader: &mut Reader) -> Result<OutputIdentifier, ser::Error> { fn read(reader: &mut Reader) -> Result<OutputIdentifier, ser::Error> {
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or( let features =
ser::Error::CorruptedData, OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
)?;
Ok(OutputIdentifier { Ok(OutputIdentifier {
commit: Commitment::read(reader)?, commit: Commitment::read(reader)?,
features: features, features: features,
@ -1015,7 +1017,7 @@ impl OutputStoreable {
/// Return a regular output /// Return a regular output
pub fn to_output(self, rproof: RangeProof) -> Output { pub fn to_output(self, rproof: RangeProof) -> Output {
Output{ Output {
features: self.features, features: self.features,
commit: self.commit, commit: self.commit,
switch_commit_hash: self.switch_commit_hash, switch_commit_hash: self.switch_commit_hash,
@ -1043,9 +1045,8 @@ impl Writeable for OutputStoreable {
impl Readable for OutputStoreable { impl Readable for OutputStoreable {
fn read(reader: &mut Reader) -> Result<OutputStoreable, ser::Error> { fn read(reader: &mut Reader) -> Result<OutputStoreable, ser::Error> {
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or( let features =
ser::Error::CorruptedData, OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
)?;
Ok(OutputStoreable { Ok(OutputStoreable {
commit: Commitment::read(reader)?, commit: Commitment::read(reader)?,
switch_commit_hash: SwitchCommitHash::read(reader)?, switch_commit_hash: SwitchCommitHash::read(reader)?,
@ -1083,13 +1084,14 @@ impl Readable for ProofMessageElements {
impl ProofMessageElements { impl ProofMessageElements {
/// Serialise and return a ProofMessage /// Serialise and return a ProofMessage
pub fn to_proof_message(&self)->ProofMessage { pub fn to_proof_message(&self) -> ProofMessage {
ProofMessage::from_bytes(&ser_vec(self).unwrap()) ProofMessage::from_bytes(&ser_vec(self).unwrap())
} }
/// Deserialise and return the message elements /// Deserialise and return the message elements
pub fn from_proof_message(proof_message:ProofMessage) pub fn from_proof_message(
-> Result<ProofMessageElements, ser::Error> { proof_message: ProofMessage,
) -> Result<ProofMessageElements, ser::Error> {
let mut c = Cursor::new(proof_message.as_bytes()); let mut c = Cursor::new(proof_message.as_bytes());
ser::deserialize::<ProofMessageElements>(&mut c) ser::deserialize::<ProofMessageElements>(&mut c)
} }
@ -1109,7 +1111,7 @@ mod test {
let commit = keychain.commit(5, &key_id).unwrap(); let commit = keychain.commit(5, &key_id).unwrap();
// just some bytes for testing ser/deser // just some bytes for testing ser/deser
let sig = secp::Signature::from_raw_data(&[0;64]).unwrap(); let sig = secp::Signature::from_raw_data(&[0; 64]).unwrap();
let kernel = TxKernel { let kernel = TxKernel {
features: KernelFeatures::DEFAULT_KERNEL, features: KernelFeatures::DEFAULT_KERNEL,
@ -1153,13 +1155,18 @@ mod test {
let key_id = keychain.derive_key_id(1).unwrap(); let key_id = keychain.derive_key_id(1).unwrap();
let commit = keychain.commit(5, &key_id).unwrap(); let commit = keychain.commit(5, &key_id).unwrap();
let switch_commit = keychain.switch_commit(&key_id).unwrap(); let switch_commit = keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit( let switch_commit_hash =
switch_commit, SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id);
&keychain,
&key_id,
);
let msg = secp::pedersen::ProofMessage::empty(); let msg = secp::pedersen::ProofMessage::empty();
let proof = keychain.range_proof(5, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg).unwrap(); let proof = keychain
.range_proof(
5,
&key_id,
commit,
Some(switch_commit_hash.as_ref().to_vec()),
msg,
)
.unwrap();
let out = Output { let out = Output {
features: OutputFeatures::DEFAULT_OUTPUT, features: OutputFeatures::DEFAULT_OUTPUT,
@ -1185,16 +1192,19 @@ mod test {
let commit = keychain.commit(value, &key_id).unwrap(); let commit = keychain.commit(value, &key_id).unwrap();
let switch_commit = keychain.switch_commit(&key_id).unwrap(); let switch_commit = keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit( let switch_commit_hash =
switch_commit, SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id);
&keychain, let msg = (ProofMessageElements { value: value }).to_proof_message();
&key_id,
);
let msg = (ProofMessageElements {
value: value,
}).to_proof_message();
let proof = keychain.range_proof(value, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg).unwrap(); let proof = keychain
.range_proof(
value,
&key_id,
commit,
Some(switch_commit_hash.as_ref().to_vec()),
msg,
)
.unwrap();
let output = Output { let output = Output {
features: OutputFeatures::DEFAULT_OUTPUT, features: OutputFeatures::DEFAULT_OUTPUT,
@ -1212,7 +1222,8 @@ mod test {
return; return;
} }
// Bulletproofs message unwind will just be gibberish given the wrong blinding factor // Bulletproofs message unwind will just be gibberish given the wrong blinding
// factor
} }
#[test] #[test]
@ -1257,8 +1268,8 @@ mod test {
let short_id = input.short_id(&block_hash, nonce); let short_id = input.short_id(&block_hash, nonce);
assert_eq!(short_id, ShortId::from_hex("28fea5a693af").unwrap()); assert_eq!(short_id, ShortId::from_hex("28fea5a693af").unwrap());
// now generate the short_id for a *very* similar output (single feature flag different) // now generate the short_id for a *very* similar output (single feature flag
// and check it generates a different short_id // different) and check it generates a different short_id
let input = Input { let input = Input {
features: OutputFeatures::COINBASE_OUTPUT, features: OutputFeatures::COINBASE_OUTPUT,
commit: commit, commit: commit,

View file

@ -59,13 +59,13 @@ pub fn genesis_testnet1() -> core::Block {
..time::empty_tm() ..time::empty_tm()
}, },
nonce: 28205, nonce: 28205,
pow: core::Proof::new(vec![0x21e, 0x7a2, 0xeae, 0x144e, 0x1b1c, 0x1fbd, pow: core::Proof::new(vec![
0x203a, 0x214b, 0x293b, 0x2b74, 0x2bfa, 0x2c26, 0x21e, 0x7a2, 0xeae, 0x144e, 0x1b1c, 0x1fbd, 0x203a, 0x214b, 0x293b, 0x2b74,
0x32bb, 0x346a, 0x34c7, 0x37c5, 0x4164, 0x42cc, 0x2bfa, 0x2c26, 0x32bb, 0x346a, 0x34c7, 0x37c5, 0x4164, 0x42cc, 0x4cc3, 0x55af,
0x4cc3, 0x55af, 0x5a70, 0x5b14, 0x5e1c, 0x5f76, 0x5a70, 0x5b14, 0x5e1c, 0x5f76, 0x6061, 0x60f9, 0x61d7, 0x6318, 0x63a1, 0x63fb,
0x6061, 0x60f9, 0x61d7, 0x6318, 0x63a1, 0x63fb, 0x649b, 0x64e5, 0x65a1, 0x6b69, 0x70f8, 0x71c7, 0x71cd, 0x7492, 0x7b11, 0x7db8,
0x649b, 0x64e5, 0x65a1, 0x6b69, 0x70f8, 0x71c7, 0x7f29, 0x7ff8,
0x71cd, 0x7492, 0x7b11, 0x7db8, 0x7f29, 0x7ff8]), ]),
..Default::default() ..Default::default()
}, },
inputs: vec![], inputs: vec![],
@ -93,13 +93,14 @@ pub fn genesis_testnet2() -> core::Block {
difficulty: Difficulty::from_num(global::initial_block_difficulty()), difficulty: Difficulty::from_num(global::initial_block_difficulty()),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()), total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
nonce: 70081, nonce: 70081,
pow: core::Proof::new(vec![0x43ee48, 0x18d5a49, 0x2b76803, 0x3181a29, 0x39d6a8a, 0x39ef8d8, pow: core::Proof::new(vec![
0x478a0fb, 0x69c1f9e, 0x6da4bca, 0x6f8782c, 0x9d842d7, 0xa051397, 0x43ee48, 0x18d5a49, 0x2b76803, 0x3181a29, 0x39d6a8a, 0x39ef8d8, 0x478a0fb,
0xb56934c, 0xbf1f2c7, 0xc992c89, 0xce53a5a, 0xfa87225, 0x1070f99e, 0x69c1f9e, 0x6da4bca, 0x6f8782c, 0x9d842d7, 0xa051397, 0xb56934c, 0xbf1f2c7,
0x107b39af, 0x1160a11b, 0x11b379a8, 0x12420e02, 0x12991602, 0x12cc4a71, 0xc992c89, 0xce53a5a, 0xfa87225, 0x1070f99e, 0x107b39af, 0x1160a11b, 0x11b379a8,
0x13d91075, 0x15c950d0, 0x1659b7be, 0x1682c2b4, 0x1796c62f, 0x191cf4c9, 0x12420e02, 0x12991602, 0x12cc4a71, 0x13d91075, 0x15c950d0, 0x1659b7be, 0x1682c2b4,
0x19d71ac0, 0x1b812e44, 0x1d150efe, 0x1d15bd77, 0x1d172841, 0x1d51e967, 0x1796c62f, 0x191cf4c9, 0x19d71ac0, 0x1b812e44, 0x1d150efe, 0x1d15bd77, 0x1d172841,
0x1ee1de39, 0x1f35c9b3, 0x1f557204, 0x1fbf884f, 0x1fcf80bf, 0x1fd59d40]), 0x1d51e967, 0x1ee1de39, 0x1f35c9b3, 0x1f557204, 0x1fbf884f, 0x1fcf80bf, 0x1fd59d40,
]),
..Default::default() ..Default::default()
}, },
inputs: vec![], inputs: vec![],

View file

@ -25,8 +25,8 @@ use std::sync::RwLock;
use consensus::PROOFSIZE; use consensus::PROOFSIZE;
use consensus::DEFAULT_SIZESHIFT; use consensus::DEFAULT_SIZESHIFT;
use consensus::COINBASE_MATURITY; use consensus::COINBASE_MATURITY;
use consensus::{MEDIAN_TIME_WINDOW, INITIAL_DIFFICULTY, use consensus::{BLOCK_TIME_SEC, CUT_THROUGH_HORIZON, DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY,
BLOCK_TIME_SEC, DIFFICULTY_ADJUST_WINDOW, CUT_THROUGH_HORIZON}; MEDIAN_TIME_WINDOW};
use core::target::Difficulty; use core::target::Difficulty;
use consensus::TargetError; use consensus::TargetError;
@ -64,8 +64,9 @@ pub const TESTNET2_INITIAL_DIFFICULTY: u64 = 1;
/// The target is the 32-bytes hash block hashes must be lower than. /// The target is the 32-bytes hash block hashes must be lower than.
pub const MAX_PROOF_TARGET: [u8; 8] = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]; pub const MAX_PROOF_TARGET: [u8; 8] = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
/// We want to slow this right down for user testing at cuckoo 16, so pick a smaller max /// We want to slow this right down for user testing at cuckoo 16, so pick a
/// smaller max
pub const MAX_PROOF_TARGET_TESTING: [u8; 8] = [0x05, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]; pub const MAX_PROOF_TARGET_TESTING: [u8; 8] = [0x05, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
/// Types of chain a server can run with, dictates the genesis block and /// Types of chain a server can run with, dictates the genesis block and
@ -78,13 +79,13 @@ pub enum ChainTypes {
/// For User testing /// For User testing
UserTesting, UserTesting,
/// First test network /// First test network
Testnet1, Testnet1,
/// Second test network /// Second test network
Testnet2, Testnet2,
/// Main production network /// Main production network
Mainnet, Mainnet,
} }
@ -193,9 +194,8 @@ pub fn is_user_testing_mode() -> bool {
/// Are we in production mode (a live public network)? /// Are we in production mode (a live public network)?
pub fn is_production_mode() -> bool { pub fn is_production_mode() -> bool {
let param_ref = CHAIN_TYPE.read().unwrap(); let param_ref = CHAIN_TYPE.read().unwrap();
ChainTypes::Testnet1 == *param_ref || ChainTypes::Testnet1 == *param_ref || ChainTypes::Testnet2 == *param_ref
ChainTypes::Testnet2 == *param_ref || || ChainTypes::Mainnet == *param_ref
ChainTypes::Mainnet == *param_ref
} }
/// Helper function to get a nonce known to create a valid POW on /// Helper function to get a nonce known to create a valid POW on
@ -210,22 +210,21 @@ pub fn get_genesis_nonce() -> u64 {
// Magic nonce for current genesis block at cuckoo16 // Magic nonce for current genesis block at cuckoo16
ChainTypes::UserTesting => 27944, ChainTypes::UserTesting => 27944,
// Magic nonce for genesis block for testnet2 (cuckoo30) // Magic nonce for genesis block for testnet2 (cuckoo30)
_ => panic!("Pre-set"), _ => panic!("Pre-set"),
} }
} }
/// Converts an iterator of block difficulty data to more a more mangeable vector and pads /// Converts an iterator of block difficulty data to more a more mangeable vector and pads
/// if needed (which will) only be needed for the first few blocks after genesis /// if needed (which will) only be needed for the first few blocks after genesis
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<Result<(u64, Difficulty), TargetError>> pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<Result<(u64, Difficulty), TargetError>>
where where
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>> { T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
{
// Convert iterator to vector, so we can append to it if necessary // Convert iterator to vector, so we can append to it if necessary
let needed_block_count = (MEDIAN_TIME_WINDOW + DIFFICULTY_ADJUST_WINDOW) as usize; let needed_block_count = (MEDIAN_TIME_WINDOW + DIFFICULTY_ADJUST_WINDOW) as usize;
let mut last_n: Vec<Result<(u64, Difficulty), TargetError>> = cursor.into_iter() let mut last_n: Vec<Result<(u64, Difficulty), TargetError>> =
.take(needed_block_count) cursor.into_iter().take(needed_block_count).collect();
.collect();
// Sort blocks from earliest to latest (to keep conceptually easier) // Sort blocks from earliest to latest (to keep conceptually easier)
last_n.reverse(); last_n.reverse();
@ -235,18 +234,19 @@ pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<Result<(u64, Difficulty),
let block_count_difference = needed_block_count - last_n.len(); let block_count_difference = needed_block_count - last_n.len();
if block_count_difference > 0 { if block_count_difference > 0 {
// Collect any real data we have // Collect any real data we have
let mut live_intervals:Vec<(u64, Difficulty)> = last_n.iter() let mut live_intervals: Vec<(u64, Difficulty)> = last_n
.iter()
.map(|b| (b.clone().unwrap().0, b.clone().unwrap().1)) .map(|b| (b.clone().unwrap().0, b.clone().unwrap().1))
.collect(); .collect();
for i in (1..live_intervals.len()).rev() { for i in (1..live_intervals.len()).rev() {
// prevents issues with very fast automated test chains // prevents issues with very fast automated test chains
if live_intervals[i-1].0 > live_intervals[i].0 { if live_intervals[i - 1].0 > live_intervals[i].0 {
live_intervals[i].0 = 0; live_intervals[i].0 = 0;
} else { } else {
live_intervals[i].0=live_intervals[i].0-live_intervals[i-1].0; live_intervals[i].0 = live_intervals[i].0 - live_intervals[i - 1].0;
} }
} }
// //
// Remove genesis "interval" // Remove genesis "interval"
if live_intervals.len() > 1 { if live_intervals.len() > 1 {
live_intervals.remove(0); live_intervals.remove(0);
@ -266,7 +266,7 @@ pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<Result<(u64, Difficulty),
let last_diff = &live_intervals[interval_index].1; let last_diff = &live_intervals[interval_index].1;
last_n.insert(0, Ok((last_ts, last_diff.clone()))); last_n.insert(0, Ok((last_ts, last_diff.clone())));
interval_index = match interval_index { interval_index = match interval_index {
0 => live_intervals.len()-1, 0 => live_intervals.len() - 1,
_ => interval_index - 1, _ => interval_index - 1,
}; };
} }

View file

@ -22,20 +22,16 @@
use std::{cmp, error, fmt}; use std::{cmp, error, fmt};
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use byteorder::{BigEndian, ByteOrder, ReadBytesExt}; use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use keychain::{Identifier, BlindingFactor, IDENTIFIER_SIZE}; use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
use consensus; use consensus;
use consensus::VerifySortOrder; use consensus::VerifySortOrder;
use core::hash::Hashed; use core::hash::Hashed;
use core::transaction::{SWITCH_COMMIT_HASH_SIZE, SwitchCommitHash}; use core::transaction::{SwitchCommitHash, SWITCH_COMMIT_HASH_SIZE};
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
use util::secp::pedersen::RangeProof; use util::secp::pedersen::RangeProof;
use util::secp::Signature; use util::secp::Signature;
use util::secp::constants::{ use util::secp::constants::{AGG_SIGNATURE_SIZE, MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE,
MAX_PROOF_SIZE, SECRET_KEY_SIZE};
PEDERSEN_COMMITMENT_SIZE,
AGG_SIGNATURE_SIZE,
SECRET_KEY_SIZE,
};
/// Possible errors deriving from serializing or deserializing. /// Possible errors deriving from serializing or deserializing.
#[derive(Debug)] #[derive(Debug)]
@ -207,7 +203,8 @@ pub trait Writeable {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error>; fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error>;
} }
/// Trait to allow a collection of Writeables to be written in lexicographical sort order. /// Trait to allow a collection of Writeables to be written in lexicographical
/// sort order.
pub trait WriteableSorted { pub trait WriteableSorted {
/// Write the data but sort it first. /// Write the data but sort it first.
fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error>; fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error>;
@ -403,7 +400,6 @@ impl Writeable for Signature {
} }
} }
/// Utility wrapper for an underlying byte Writer. Defines higher level methods /// Utility wrapper for an underlying byte Writer. Defines higher level methods
/// to write numbers, byte vectors, hashes, etc. /// to write numbers, byte vectors, hashes, etc.
struct BinWriter<'a> { struct BinWriter<'a> {

View file

@ -20,9 +20,14 @@ use core::core::target::Difficulty;
use core::global; use core::global;
use core::consensus::*; use core::consensus::*;
// Builds an iterator for next difficulty calculation with the provided // Builds an iterator for next difficulty calculation with the provided
// constant time interval, difficulty and total length. // constant time interval, difficulty and total length.
fn repeat(interval: u64, diff: u64, len: u64, cur_time:Option<u64>) -> Vec<Result<(u64, Difficulty), TargetError>> { fn repeat(
interval: u64,
diff: u64,
len: u64,
cur_time: Option<u64>,
) -> Vec<Result<(u64, Difficulty), TargetError>> {
let cur_time = match cur_time { let cur_time = match cur_time {
Some(t) => t, Some(t) => t,
None => time::get_time().sec as u64, None => time::get_time().sec as u64,
@ -39,25 +44,32 @@ fn repeat(interval: u64, diff: u64, len: u64, cur_time:Option<u64>) -> Vec<Resul
// Creates a new chain with a genesis at a simulated difficulty // Creates a new chain with a genesis at a simulated difficulty
fn create_chain_sim(diff: u64) -> Vec<Result<(u64, Difficulty), TargetError>> { fn create_chain_sim(diff: u64) -> Vec<Result<(u64, Difficulty), TargetError>> {
vec![Ok((time::get_time().sec as u64, Difficulty::from_num(diff)))] vec![
Ok((time::get_time().sec as u64, Difficulty::from_num(diff))),
]
} }
// Adds another 'block' to the iterator, so to speak, with difficulty calculated // Adds another 'block' to the iterator, so to speak, with difficulty calculated
// from the difficulty adjustment at interval seconds from the previous block // from the difficulty adjustment at interval seconds from the previous block
fn add_block(interval: u64, chain_sim: Vec<Result<(u64, Difficulty), TargetError>>) fn add_block(
-> Vec<Result<(u64, Difficulty), TargetError>> { interval: u64,
chain_sim: Vec<Result<(u64, Difficulty), TargetError>>,
) -> Vec<Result<(u64, Difficulty), TargetError>> {
let mut return_chain = chain_sim.clone(); let mut return_chain = chain_sim.clone();
// get last interval // get last interval
let last_elem = chain_sim.first().as_ref().unwrap().as_ref().unwrap(); let last_elem = chain_sim.first().as_ref().unwrap().as_ref().unwrap();
return_chain.insert(0, Ok((last_elem.0+interval, last_elem.clone().1))); return_chain.insert(0, Ok((last_elem.0 + interval, last_elem.clone().1)));
let diff = next_difficulty(return_chain.clone()).unwrap(); let diff = next_difficulty(return_chain.clone()).unwrap();
return_chain[0]=Ok((last_elem.0+interval, diff)); return_chain[0] = Ok((last_elem.0 + interval, diff));
return_chain return_chain
} }
// Adds another n 'blocks' to the iterator, with difficulty calculated // Adds another n 'blocks' to the iterator, with difficulty calculated
fn add_block_repeated(interval: u64, chain_sim: Vec<Result<(u64, Difficulty), TargetError>>, iterations: usize) fn add_block_repeated(
-> Vec<Result<(u64, Difficulty), TargetError>> { interval: u64,
chain_sim: Vec<Result<(u64, Difficulty), TargetError>>,
iterations: usize,
) -> Vec<Result<(u64, Difficulty), TargetError>> {
let mut return_chain = chain_sim.clone(); let mut return_chain = chain_sim.clone();
for _ in 0..iterations { for _ in 0..iterations {
return_chain = add_block(interval, return_chain.clone()); return_chain = add_block(interval, return_chain.clone());
@ -65,19 +77,23 @@ fn add_block_repeated(interval: u64, chain_sim: Vec<Result<(u64, Difficulty), Ta
return_chain return_chain
} }
// Prints the contents of the iterator and its difficulties.. useful for tweaking // Prints the contents of the iterator and its difficulties.. useful for
fn print_chain_sim(chain_sim: &Vec<Result<(u64, Difficulty), TargetError>>) { // tweaking
let mut chain_sim=chain_sim.clone(); fn print_chain_sim(chain_sim: &Vec<Result<(u64, Difficulty), TargetError>>) {
let mut chain_sim = chain_sim.clone();
chain_sim.reverse(); chain_sim.reverse();
let mut last_time=0; let mut last_time = 0;
chain_sim.iter() chain_sim.iter().enumerate().for_each(|(i, b)| {
.enumerate() let block = b.as_ref().unwrap();
.for_each(|(i, b)| { println!(
let block = b.as_ref().unwrap(); "Height: {}, Time: {}, Interval: {}, Next network difficulty:{}",
println!("Height: {}, Time: {}, Interval: {}, Next network difficulty:{}", i,
i, block.0, block.0-last_time, block.1); block.0,
last_time=block.0; block.0 - last_time,
}); block.1
);
last_time = block.0;
});
} }
fn repeat_offs( fn repeat_offs(
@ -86,10 +102,13 @@ fn repeat_offs(
diff: u64, diff: u64,
len: u64, len: u64,
) -> Vec<Result<(u64, Difficulty), TargetError>> { ) -> Vec<Result<(u64, Difficulty), TargetError>> {
map_vec!(repeat(interval, diff, len, Some(from)), |e| match e.clone() { map_vec!(
Err(e) => Err(e), repeat(interval, diff, len, Some(from)),
Ok((t, d)) => Ok((t, d)), |e| match e.clone() {
}) Err(e) => Err(e),
Ok((t, d)) => Ok((t, d)),
}
)
} }
/// Checks different next_target adjustments and difficulty boundaries /// Checks different next_target adjustments and difficulty boundaries
@ -123,7 +142,7 @@ fn adjustment_scenarios() {
println!("*********************************************************"); println!("*********************************************************");
let just_enough = (DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW) as usize; let just_enough = (DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW) as usize;
// Steady difficulty for a good while, then a sudden drop // Steady difficulty for a good while, then a sudden drop
let chain_sim = create_chain_sim(global::initial_block_difficulty()); let chain_sim = create_chain_sim(global::initial_block_difficulty());
let chain_sim = add_block_repeated(10, chain_sim, just_enough as usize); let chain_sim = add_block_repeated(10, chain_sim, just_enough as usize);
let chain_sim = add_block_repeated(600, chain_sim, 10); let chain_sim = add_block_repeated(600, chain_sim, 10);
@ -135,7 +154,7 @@ fn adjustment_scenarios() {
print_chain_sim(&chain_sim); print_chain_sim(&chain_sim);
println!("*********************************************************"); println!("*********************************************************");
// Sudden increase // Sudden increase
let chain_sim = create_chain_sim(global::initial_block_difficulty()); let chain_sim = create_chain_sim(global::initial_block_difficulty());
let chain_sim = add_block_repeated(60, chain_sim, just_enough as usize); let chain_sim = add_block_repeated(60, chain_sim, just_enough as usize);
let chain_sim = add_block_repeated(10, chain_sim, 10); let chain_sim = add_block_repeated(10, chain_sim, 10);
@ -147,7 +166,7 @@ fn adjustment_scenarios() {
print_chain_sim(&chain_sim); print_chain_sim(&chain_sim);
println!("*********************************************************"); println!("*********************************************************");
// Oscillations // Oscillations
let chain_sim = create_chain_sim(global::initial_block_difficulty()); let chain_sim = create_chain_sim(global::initial_block_difficulty());
let chain_sim = add_block_repeated(60, chain_sim, just_enough as usize); let chain_sim = add_block_repeated(60, chain_sim, just_enough as usize);
let chain_sim = add_block_repeated(10, chain_sim, 10); let chain_sim = add_block_repeated(10, chain_sim, 10);
@ -166,7 +185,7 @@ fn adjustment_scenarios() {
#[test] #[test]
fn next_target_adjustment() { fn next_target_adjustment() {
global::set_mining_mode(global::ChainTypes::AutomatedTesting); global::set_mining_mode(global::ChainTypes::AutomatedTesting);
let cur_time = time::get_time().sec as u64; let cur_time = time::get_time().sec as u64;
assert_eq!( assert_eq!(
next_difficulty(vec![Ok((cur_time, Difficulty::one()))]).unwrap(), next_difficulty(vec![Ok((cur_time, Difficulty::one()))]).unwrap(),
@ -194,7 +213,12 @@ fn next_target_adjustment() {
// checking averaging works // checking averaging works
let sec = DIFFICULTY_ADJUST_WINDOW / 2 + MEDIAN_TIME_WINDOW; let sec = DIFFICULTY_ADJUST_WINDOW / 2 + MEDIAN_TIME_WINDOW;
let mut s1 = repeat(60, 500, sec, Some(cur_time)); let mut s1 = repeat(60, 500, sec, Some(cur_time));
let mut s2 = repeat_offs(cur_time+(sec * 60) as u64, 60, 1500, DIFFICULTY_ADJUST_WINDOW / 2); let mut s2 = repeat_offs(
cur_time + (sec * 60) as u64,
60,
1500,
DIFFICULTY_ADJUST_WINDOW / 2,
);
s2.append(&mut s1); s2.append(&mut s1);
assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(1000)); assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(1000));

View file

@ -15,7 +15,7 @@
use std::fs::File; use std::fs::File;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::ops::Deref; use std::ops::Deref;
use std::sync::{Arc, Weak, RwLock}; use std::sync::{Arc, RwLock, Weak};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use rand; use rand;
use rand::Rng; use rand::Rng;
@ -88,8 +88,8 @@ impl p2p::ChainAdapter for NetToChainAdapter {
b.header.height, b.header.height,
addr, addr,
); );
self.process_block(b, addr) self.process_block(b, addr)
} }
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool { fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
let bhash = cb.hash(); let bhash = cb.hash();
@ -105,7 +105,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let block = core::Block::hydrate_from(cb, vec![]); let block = core::Block::hydrate_from(cb, vec![]);
// push the freshly hydrated block through the chain pipeline // push the freshly hydrated block through the chain pipeline
self.process_block(block, addr) self.process_block(block, addr)
} else { } else {
// TODO - do we need to validate the header here? // TODO - do we need to validate the header here?
@ -114,11 +114,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
tx_pool.retrieve_transactions(&cb) tx_pool.retrieve_transactions(&cb)
}; };
debug!( debug!(LOGGER, "adapter: txs from tx pool - {}", txs.len(),);
LOGGER,
"adapter: txs from tx pool - {}",
txs.len(),
);
// TODO - 3 scenarios here - // TODO - 3 scenarios here -
// 1) we hydrate a valid block (good to go) // 1) we hydrate a valid block (good to go)
@ -131,7 +127,10 @@ impl p2p::ChainAdapter for NetToChainAdapter {
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!"); debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
self.process_block(block, addr) self.process_block(block, addr)
} else { } else {
debug!(LOGGER, "adapter: block invalid after hydration, requesting full block"); debug!(
LOGGER,
"adapter: block invalid after hydration, requesting full block"
);
self.request_block(&cb.header, &addr); self.request_block(&cb.header, &addr);
true true
} }
@ -142,10 +141,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let bhash = bh.hash(); let bhash = bh.hash();
debug!( debug!(
LOGGER, LOGGER,
"Received block header {} at {} from {}, going to process.", "Received block header {} at {} from {}, going to process.", bhash, bh.height, addr,
bhash,
bh.height,
addr,
); );
// pushing the new block header through the header chain pipeline // pushing the new block header through the header chain pipeline
@ -155,7 +151,10 @@ impl p2p::ChainAdapter for NetToChainAdapter {
if let &Err(ref e) = &res { if let &Err(ref e) = &res {
debug!(LOGGER, "Block header {} refused by chain: {:?}", bhash, e); debug!(LOGGER, "Block header {} refused by chain: {:?}", bhash, e);
if e.is_bad_data() { if e.is_bad_data() {
debug!(LOGGER, "header_received: {} is a bad header, resetting header head", bhash); debug!(
LOGGER,
"header_received: {} is a bad header, resetting header head", bhash
);
let _ = w(&self.chain).reset_head(); let _ = w(&self.chain).reset_head();
return false; return false;
} else { } else {
@ -226,22 +225,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
} }
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> { fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
debug!( debug!(LOGGER, "locate_headers: {:?}", locator,);
LOGGER,
"locate_headers: {:?}",
locator,
);
let header = match self.find_common_header(locator) { let header = match self.find_common_header(locator) {
Some(header) => header, Some(header) => header,
None => return vec![], None => return vec![],
}; };
debug!( debug!(LOGGER, "locate_headers: common header: {:?}", header.hash(),);
LOGGER,
"locate_headers: common header: {:?}",
header.hash(),
);
// looks like we know one, getting as many following headers as allowed // looks like we know one, getting as many following headers as allowed
let hh = header.height; let hh = header.height;
@ -287,8 +278,10 @@ impl p2p::ChainAdapter for NetToChainAdapter {
reader: read, reader: read,
}), }),
Err(e) => { Err(e) => {
warn!(LOGGER, "Couldn't produce sumtrees data for block {}: {:?}", warn!(
h, e); LOGGER,
"Couldn't produce sumtrees data for block {}: {:?}", h, e
);
None None
} }
} }
@ -307,9 +300,9 @@ impl p2p::ChainAdapter for NetToChainAdapter {
_peer_addr: SocketAddr, _peer_addr: SocketAddr,
) -> bool { ) -> bool {
// TODO check whether we should accept any sumtree now // TODO check whether we should accept any sumtree now
if let Err(e) = w(&self.chain). if let Err(e) =
sumtrees_write(h, rewind_to_output, rewind_to_kernel, sumtree_data) { w(&self.chain).sumtrees_write(h, rewind_to_output, rewind_to_kernel, sumtree_data)
{
error!(LOGGER, "Failed to save sumtree archive: {:?}", e); error!(LOGGER, "Failed to save sumtree archive: {:?}", e);
!e.is_bad_data() !e.is_bad_data()
} else { } else {
@ -362,10 +355,10 @@ impl NetToChainAdapter {
} else { } else {
self.find_common_header(locator[1..].to_vec()) self.find_common_header(locator[1..].to_vec())
} }
}, }
Err(chain::Error::StoreErr(store::Error::NotFoundErr, _)) => { Err(chain::Error::StoreErr(store::Error::NotFoundErr, _)) => {
self.find_common_header(locator[1..].to_vec()) self.find_common_header(locator[1..].to_vec())
}, }
Err(e) => { Err(e) => {
error!(LOGGER, "Could not build header locator: {:?}", e); error!(LOGGER, "Could not build header locator: {:?}", e);
None None
@ -375,31 +368,37 @@ impl NetToChainAdapter {
// pushing the new block through the chain pipeline // pushing the new block through the chain pipeline
// remembering to reset the head if we have a bad block // remembering to reset the head if we have a bad block
fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool { fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool {
let prev_hash = b.header.previous; let prev_hash = b.header.previous;
let bhash = b.hash(); let bhash = b.hash();
let chain = w(&self.chain); let chain = w(&self.chain);
match chain.process_block(b, self.chain_opts()) { match chain.process_block(b, self.chain_opts()) {
Ok(_) => true, Ok(_) => true,
Err(chain::Error::Orphan) => { Err(chain::Error::Orphan) => {
// make sure we did not miss the parent block // make sure we did not miss the parent block
if !self.currently_syncing.load(Ordering::Relaxed) && !chain.is_orphan(&prev_hash) { if !self.currently_syncing.load(Ordering::Relaxed) && !chain.is_orphan(&prev_hash) {
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash); debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
self.request_block_by_hash(prev_hash, &addr) self.request_block_by_hash(prev_hash, &addr)
} }
true true
} }
Err(ref e) if e.is_bad_data() => { Err(ref e) if e.is_bad_data() => {
debug!(LOGGER, "adapter: process_block: {} is a bad block, resetting head", bhash); debug!(
let _ = chain.reset_head(); LOGGER,
false "adapter: process_block: {} is a bad block, resetting head", bhash
} );
Err(e) => { let _ = chain.reset_head();
debug!(LOGGER, "adapter: process_block :block {} refused by chain: {:?}", bhash, e); false
true }
} Err(e) => {
} debug!(
} LOGGER,
"adapter: process_block :block {} refused by chain: {:?}", bhash, e
);
true
}
}
}
// After receiving a compact block if we cannot successfully hydrate // After receiving a compact block if we cannot successfully hydrate
// it into a full block then fallback to requesting the full block // it into a full block then fallback to requesting the full block
@ -408,12 +407,12 @@ impl NetToChainAdapter {
// TODO - currently only request block from a single peer // TODO - currently only request block from a single peer
// consider additional peers for redundancy? // consider additional peers for redundancy?
fn request_block(&self, bh: &BlockHeader, addr: &SocketAddr) { fn request_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
self.request_block_by_hash(bh.hash(), addr) self.request_block_by_hash(bh.hash(), addr)
} }
fn request_block_by_hash(&self, h: Hash, addr: &SocketAddr) { fn request_block_by_hash(&self, h: Hash, addr: &SocketAddr) {
self.send_block_request_to_peer(h, addr, |peer, h| peer.send_block_request(h)) self.send_block_request_to_peer(h, addr, |peer, h| peer.send_block_request(h))
} }
// After we have received a block header in "header first" propagation // After we have received a block header in "header first" propagation
// we need to go request the block (compact representation) from the // we need to go request the block (compact representation) from the
@ -422,12 +421,16 @@ impl NetToChainAdapter {
// TODO - currently only request block from a single peer // TODO - currently only request block from a single peer
// consider additional peers for redundancy? // consider additional peers for redundancy?
fn request_compact_block(&self, bh: &BlockHeader, addr: &SocketAddr) { fn request_compact_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
self.send_block_request_to_peer(bh.hash(), addr, |peer, h| peer.send_compact_block_request(h)) self.send_block_request_to_peer(bh.hash(), addr, |peer, h| {
peer.send_compact_block_request(h)
})
} }
fn send_block_request_to_peer<F>(&self, h: Hash, addr: &SocketAddr, f: F) fn send_block_request_to_peer<F>(&self, h: Hash, addr: &SocketAddr, f: F)
where F: Fn(&p2p::Peer, Hash) -> Result<(), p2p::Error> { where
match w(&self.chain).block_exists(h) { F: Fn(&p2p::Peer, Hash) -> Result<(), p2p::Error>,
{
match w(&self.chain).block_exists(h) {
Ok(false) => { Ok(false) => {
match wo(&self.peers).get_connected_peer(addr) { match wo(&self.peers).get_connected_peer(addr) {
None => debug!(LOGGER, "send_block_request_to_peer: can't send request to peer {:?}, not connected", addr), None => debug!(LOGGER, "send_block_request_to_peer: can't send request to peer {:?}, not connected", addr),
@ -446,7 +449,7 @@ impl NetToChainAdapter {
Ok(true) => debug!(LOGGER, "send_block_request_to_peer: block {} already known", h), Ok(true) => debug!(LOGGER, "send_block_request_to_peer: block {} already known", h),
Err(e) => error!(LOGGER, "send_block_request_to_peer: failed to check block exists: {:?}", e) Err(e) => error!(LOGGER, "send_block_request_to_peer: failed to check block exists: {:?}", e)
} }
} }
/// Prepare options for the chain pipeline /// Prepare options for the chain pipeline
fn chain_opts(&self) -> chain::Options { fn chain_opts(&self) -> chain::Options {
@ -490,7 +493,6 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
// but broadcast full block if we have no txs // but broadcast full block if we have no txs
let cb = b.as_compact_block(); let cb = b.as_compact_block();
if cb.kern_ids.is_empty() { if cb.kern_ids.is_empty() {
// in the interest of testing all code paths // in the interest of testing all code paths
// randomly decide how we send an empty block out // randomly decide how we send an empty block out
// TODO - lock this down once we are comfortable it works... // TODO - lock this down once we are comfortable it works...
@ -574,13 +576,11 @@ impl PoolToChainAdapter {
impl pool::BlockChain for PoolToChainAdapter { impl pool::BlockChain for PoolToChainAdapter {
fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, pool::PoolError> { fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, pool::PoolError> {
wo(&self.chain) wo(&self.chain).is_unspent(output_ref).map_err(|e| match e {
.is_unspent(output_ref) chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
.map_err(|e| match e { chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound, _ => pool::PoolError::GenericPoolError,
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent, })
_ => pool::PoolError::GenericPoolError,
})
} }
fn is_matured(&self, input: &Input, height: u64) -> Result<(), pool::PoolError> { fn is_matured(&self, input: &Input, height: u64) -> Result<(), pool::PoolError> {
@ -590,7 +590,7 @@ impl pool::BlockChain for PoolToChainAdapter {
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound, chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
_ => pool::PoolError::GenericPoolError, _ => pool::PoolError::GenericPoolError,
}) })
} }
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> { fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
wo(&self.chain) wo(&self.chain)

View file

@ -48,7 +48,6 @@ use pow::plugin::PluginMiner;
use itertools::Itertools; use itertools::Itertools;
// Max number of transactions this miner will assemble in a block // Max number of transactions this miner will assemble in a block
const MAX_TX: u32 = 5000; const MAX_TX: u32 = 5000;
@ -202,10 +201,13 @@ impl Miner {
if let Some(s) = job_handle.get_solution() { if let Some(s) = job_handle.get_solution() {
let proof = Proof::new(s.solution_nonces.to_vec()); let proof = Proof::new(s.solution_nonces.to_vec());
let proof_diff = proof.clone().to_difficulty(); let proof_diff = proof.clone().to_difficulty();
trace!(LOGGER, "Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})", trace!(
LOGGER,
"Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})",
s.get_nonce_as_u64(), s.get_nonce_as_u64(),
proof_diff.into_num(), proof_diff.into_num(),
difficulty.into_num()); difficulty.into_num()
);
if proof_diff >= b.header.difficulty { if proof_diff >= b.header.difficulty {
sol = Some(proof); sol = Some(proof);
b.header.nonce = s.get_nonce_as_u64(); b.header.nonce = s.get_nonce_as_u64();
@ -218,8 +220,11 @@ impl Miner {
let stats = job_handle.get_stats(i); let stats = job_handle.get_stats(i);
if let Ok(stat_vec) = stats { if let Ok(stat_vec) = stats {
for s in stat_vec { for s in stat_vec {
if s.in_use == 0 {continue;} if s.in_use == 0 {
let last_solution_time_secs = s.last_solution_time as f64 / 1000000000.0; continue;
}
let last_solution_time_secs =
s.last_solution_time as f64 / 1000000000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs; let last_hashes_per_sec = 1.0 / last_solution_time_secs;
let status = match s.has_errored { let status = match s.has_errored {
0 => "OK", 0 => "OK",
@ -274,8 +279,8 @@ impl Miner {
latest_hash: &mut Hash, latest_hash: &mut Hash,
) -> Option<Proof> { ) -> Option<Proof> {
// look for a pow for at most attempt_time_per_block sec on the same block (to // look for a pow for at most attempt_time_per_block sec on the same block (to
// give a chance to new // give a chance to new
// transactions) and as long as the head hasn't changed // transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64; let deadline = time::get_time().sec + attempt_time_per_block as i64;
let stat_check_interval = 3; let stat_check_interval = 3;
let mut next_stat_check = time::get_time().sec + stat_check_interval; let mut next_stat_check = time::get_time().sec + stat_check_interval;
@ -306,10 +311,13 @@ impl Miner {
let pow_hash = b.hash(); let pow_hash = b.hash();
if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) { if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty(); let proof_diff = proof.clone().to_difficulty();
trace!(LOGGER, "Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})", trace!(
LOGGER,
"Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})",
b.header.nonce, b.header.nonce,
proof_diff.into_num(), proof_diff.into_num(),
b.header.difficulty.into_num()); b.header.difficulty.into_num()
);
if proof_diff >= b.header.difficulty { if proof_diff >= b.header.difficulty {
sol = Some(proof); sol = Some(proof);
break; break;
@ -319,7 +327,9 @@ impl Miner {
if time::get_time().sec >= next_stat_check { if time::get_time().sec >= next_stat_check {
let stats_vec = plugin_miner.get_stats(0).unwrap(); let stats_vec = plugin_miner.get_stats(0).unwrap();
for s in stats_vec.into_iter() { for s in stats_vec.into_iter() {
if s.in_use == 0 {continue;} if s.in_use == 0 {
continue;
}
let last_solution_time_secs = s.last_solution_time as f64 / 1000000000.0; let last_solution_time_secs = s.last_solution_time as f64 / 1000000000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs; let last_hashes_per_sec = 1.0 / last_solution_time_secs;
let status = match s.has_errored { let status = match s.has_errored {
@ -336,7 +346,10 @@ impl Miner {
3, 3,
last_hashes_per_sec last_hashes_per_sec
); );
info!(LOGGER, "Mining at {} graphs per second", last_hashes_per_sec); info!(
LOGGER,
"Mining at {} graphs per second", last_hashes_per_sec
);
} }
next_stat_check = time::get_time().sec + stat_check_interval; next_stat_check = time::get_time().sec + stat_check_interval;
} }
@ -443,8 +456,7 @@ impl Miner {
pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) { pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) {
info!( info!(
LOGGER, LOGGER,
"(Server ID: {}) Starting miner loop.", "(Server ID: {}) Starting miner loop.", self.debug_output_id
self.debug_output_id
); );
let mut plugin_miner = None; let mut plugin_miner = None;
let mut miner = None; let mut miner = None;
@ -555,8 +567,7 @@ impl Miner {
} else { } else {
debug!( debug!(
LOGGER, LOGGER,
"setting pubkey in miner to pubkey from block_fees - {:?}", "setting pubkey in miner to pubkey from block_fees - {:?}", block_fees
block_fees
); );
key_id = block_fees.key_id(); key_id = block_fees.key_id();
} }
@ -574,7 +585,6 @@ impl Miner {
head: &core::BlockHeader, head: &core::BlockHeader,
key_id: Option<Identifier>, key_id: Option<Identifier>,
) -> Result<(core::Block, BlockFees), Error> { ) -> Result<(core::Block, BlockFees), Error> {
// prepare the block header timestamp // prepare the block header timestamp
let mut now_sec = time::get_time().sec; let mut now_sec = time::get_time().sec;
let head_sec = head.timestamp.to_timespec().sec; let head_sec = head.timestamp.to_timespec().sec;
@ -637,7 +647,10 @@ impl Miner {
//Some other issue, possibly duplicate kernel //Some other issue, possibly duplicate kernel
Err(e) => { Err(e) => {
error!(LOGGER, "Error setting sumtree root to build a block: {:?}", e); error!(
LOGGER,
"Error setting sumtree root to build a block: {:?}", e
);
Err(Error::Chain(chain::Error::Other(format!("{:?}", e)))) Err(Error::Chain(chain::Error::Other(format!("{:?}", e))))
} }
} }
@ -652,12 +665,9 @@ impl Miner {
) -> Result<(core::Output, core::TxKernel, BlockFees), Error> { ) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap(); let key_id = keychain.derive_key_id(1).unwrap();
let (out, kernel) = core::Block::reward_output( let (out, kernel) =
&keychain, core::Block::reward_output(&keychain, &key_id, block_fees.fees, block_fees.height)
&key_id, .unwrap();
block_fees.fees,
block_fees.height,
).unwrap();
Ok((out, kernel, block_fees)) Ok((out, kernel, block_fees))
} }

View file

@ -19,7 +19,7 @@
use std::io::Read; use std::io::Read;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::str; use std::str;
use std::sync::{Arc, mpsc}; use std::sync::{mpsc, Arc};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::time::Duration; use std::time::Duration;
use std::thread; use std::thread;
@ -41,7 +41,6 @@ pub fn connect_and_monitor(
seed_list: Box<Fn() -> Vec<SocketAddr> + Send>, seed_list: Box<Fn() -> Vec<SocketAddr> + Send>,
stop: Arc<AtomicBool>, stop: Arc<AtomicBool>,
) { ) {
let _ = thread::Builder::new() let _ = thread::Builder::new()
.name("seed".to_string()) .name("seed".to_string())
.spawn(move || { .spawn(move || {
@ -106,7 +105,7 @@ fn monitor_peers(
debug!( debug!(
LOGGER, LOGGER,
"monitor_peers: unbanned {} after {} seconds", x.addr, interval "monitor_peers: unbanned {} after {} seconds", x.addr, interval
); );
} else { } else {
banned_count += 1; banned_count += 1;
} }
@ -123,7 +122,7 @@ fn monitor_peers(
healthy_count, healthy_count,
banned_count, banned_count,
defunct_count, defunct_count,
); );
// maintenance step first, clean up p2p server peers // maintenance step first, clean up p2p server peers
peers.clean_peers(PEER_MAX_COUNT as usize); peers.clean_peers(PEER_MAX_COUNT as usize);
@ -160,7 +159,6 @@ fn connect_to_seeds(
tx: mpsc::Sender<SocketAddr>, tx: mpsc::Sender<SocketAddr>,
seed_list: Box<Fn() -> Vec<SocketAddr>>, seed_list: Box<Fn() -> Vec<SocketAddr>>,
) { ) {
// check if we have some peers in db // check if we have some peers in db
let peers = peers.find_peers(p2p::State::Healthy, p2p::Capabilities::FULL_HIST, 100); let peers = peers.find_peers(p2p::State::Healthy, p2p::Capabilities::FULL_HIST, 100);
@ -190,7 +188,6 @@ fn listen_for_addrs(
capab: p2p::Capabilities, capab: p2p::Capabilities,
rx: &mpsc::Receiver<SocketAddr>, rx: &mpsc::Receiver<SocketAddr>,
) { ) {
let pc = peers.peer_count(); let pc = peers.peer_count();
for addr in rx.try_iter() { for addr in rx.try_iter() {
if pc < PEER_MAX_COUNT { if pc < PEER_MAX_COUNT {
@ -201,11 +198,11 @@ fn listen_for_addrs(
if let Ok(p) = p.try_read() { if let Ok(p) = p.try_read() {
let _ = p.send_peer_request(capab); let _ = p.send_peer_request(capab);
} }
}, }
Err(e) => { Err(e) => {
debug!(LOGGER, "connect_and_req: {} is Defunct; {:?}", addr, e); debug!(LOGGER, "connect_and_req: {} is Defunct; {:?}", addr, e);
let _ = peers.update_state(addr, p2p::State::Defunct); let _ = peers.update_state(addr, p2p::State::Defunct);
}, }
} }
} }
} }
@ -219,12 +216,16 @@ pub fn web_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
debug!(LOGGER, "Retrieving seed nodes from {}", &SEEDS_URL); debug!(LOGGER, "Retrieving seed nodes from {}", &SEEDS_URL);
// http get, filtering out non 200 results // http get, filtering out non 200 results
let mut res = client.get(SEEDS_URL).send().expect("Failed to resolve seeds."); let mut res = client
.get(SEEDS_URL)
.send()
.expect("Failed to resolve seeds.");
if res.status != hyper::Ok { if res.status != hyper::Ok {
panic!("Failed to resolve seeds, got status {}.", res.status); panic!("Failed to resolve seeds, got status {}.", res.status);
} }
let mut buf = vec![]; let mut buf = vec![];
res.read_to_end(&mut buf).expect("Could not read seed list."); res.read_to_end(&mut buf)
.expect("Could not read seed list.");
let text = str::from_utf8(&buf[..]).expect("Corrupted seed list."); let text = str::from_utf8(&buf[..]).expect("Corrupted seed list.");
let addrs = text.split_whitespace() let addrs = text.split_whitespace()

View file

@ -25,7 +25,7 @@ use std::time;
use adapters::*; use adapters::*;
use api; use api;
use chain; use chain;
use core::{global, genesis}; use core::{genesis, global};
use miner; use miner;
use p2p; use p2p;
use pool; use pool;
@ -85,11 +85,7 @@ impl Server {
//global::ChainTypes::Testnet2 => genesis::genesis_testnet2(), //global::ChainTypes::Testnet2 => genesis::genesis_testnet2(),
_ => pow::mine_genesis_block(config.mining_config.clone())?, _ => pow::mine_genesis_block(config.mining_config.clone())?,
}; };
info!( info!(LOGGER, "Starting server, genesis block: {}", genesis.hash(),);
LOGGER,
"Starting server, genesis block: {}",
genesis.hash(),
);
let shared_chain = Arc::new(chain::Chain::init( let shared_chain = Arc::new(chain::Chain::init(
config.db_root.clone(), config.db_root.clone(),
@ -122,22 +118,24 @@ impl Server {
net_adapter.init(Arc::downgrade(&p2p_server.peers)); net_adapter.init(Arc::downgrade(&p2p_server.peers));
if config.seeding_type.clone() != Seeding::Programmatic { if config.seeding_type.clone() != Seeding::Programmatic {
let seeder = match config.seeding_type.clone() { let seeder = match config.seeding_type.clone() {
Seeding::None => { Seeding::None => {
warn!(LOGGER, "No seed configured, will stay solo until connected to"); warn!(
LOGGER,
"No seed configured, will stay solo until connected to"
);
seed::predefined_seeds(vec![]) seed::predefined_seeds(vec![])
} }
Seeding::List => { Seeding::List => seed::predefined_seeds(config.seeds.as_mut().unwrap().clone()),
seed::predefined_seeds(config.seeds.as_mut().unwrap().clone()) Seeding::WebStatic => seed::web_seeds(),
}
Seeding::WebStatic => {
seed::web_seeds()
}
_ => unreachable!(), _ => unreachable!(),
}; };
seed::connect_and_monitor( seed::connect_and_monitor(
p2p_server.clone(), config.capabilities, seeder, stop.clone()); p2p_server.clone(),
config.capabilities,
seeder,
stop.clone(),
);
} }
// Defaults to None (optional) in config file. // Defaults to None (optional) in config file.
@ -164,9 +162,9 @@ impl Server {
); );
let p2p_inner = p2p_server.clone(); let p2p_inner = p2p_server.clone();
let _ = thread::Builder::new().name("p2p-server".to_string()).spawn(move || { let _ = thread::Builder::new()
p2p_inner.listen() .name("p2p-server".to_string())
}); .spawn(move || p2p_inner.listen());
info!(LOGGER, "Starting rest apis at: {}", &config.api_http_addr); info!(LOGGER, "Starting rest apis at: {}", &config.api_http_addr);
@ -207,7 +205,11 @@ impl Server {
let currently_syncing = self.currently_syncing.clone(); let currently_syncing = self.currently_syncing.clone();
let mut miner = miner::Miner::new( let mut miner = miner::Miner::new(
config.clone(), self.chain.clone(), self.tx_pool.clone(), self.stop.clone()); config.clone(),
self.chain.clone(),
self.tx_pool.clone(),
self.stop.clone(),
);
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port)); miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port));
let _ = thread::Builder::new() let _ = thread::Builder::new()
.name("miner".to_string()) .name("miner".to_string())

View file

@ -63,50 +63,52 @@ pub fn run_sync(
// in archival nodes (no fast sync) we just consider we have the whole // in archival nodes (no fast sync) we just consider we have the whole
// state already // state already
let have_sumtrees = !fast_sync || head.height > 0 && let have_sumtrees = !fast_sync
header_head.height.saturating_sub(head.height) <= horizon; || head.height > 0 && header_head.height.saturating_sub(head.height) <= horizon;
let syncing = needs_syncing( let syncing = needs_syncing(
currently_syncing.clone(), peers.clone(), chain.clone(), !have_sumtrees); currently_syncing.clone(),
peers.clone(),
chain.clone(),
!have_sumtrees,
);
let current_time = time::now_utc(); let current_time = time::now_utc();
if syncing { if syncing {
// run the header sync every 10s // run the header sync every 10s
if current_time - prev_header_sync > time::Duration::seconds(10) { if current_time - prev_header_sync > time::Duration::seconds(10) {
header_sync( header_sync(peers.clone(), chain.clone());
peers.clone(),
chain.clone(),
);
prev_header_sync = current_time; prev_header_sync = current_time;
} }
// run the body_sync every 5s // run the body_sync every 5s
if have_sumtrees && current_time - prev_body_sync > time::Duration::seconds(5) { if have_sumtrees && current_time - prev_body_sync > time::Duration::seconds(5) {
body_sync( body_sync(peers.clone(), chain.clone());
peers.clone(),
chain.clone(),
);
prev_body_sync = current_time; prev_body_sync = current_time;
} }
} else if !have_sumtrees
} else if !have_sumtrees && && current_time - prev_state_sync > time::Duration::seconds(5 * 60)
current_time - prev_state_sync > time::Duration::seconds(5*60) { {
if let Some(peer) = peers.most_work_peer() { if let Some(peer) = peers.most_work_peer() {
if let Ok(p) = peer.try_read() { if let Ok(p) = peer.try_read() {
debug!(LOGGER, "Header head before sumtree request: {} / {}", debug!(
header_head.height, header_head.last_block_h); LOGGER,
"Header head before sumtree request: {} / {}",
header_head.height,
header_head.last_block_h
);
// just to handle corner case of a too early start // just to handle corner case of a too early start
if header_head.height > horizon { if header_head.height > horizon {
// ask for sumtree at horizon // ask for sumtree at horizon
let mut sumtree_head = chain.get_block_header(&header_head.prev_block_h).unwrap(); let mut sumtree_head =
for _ in 0..horizon-2 { chain.get_block_header(&header_head.prev_block_h).unwrap();
sumtree_head = chain.get_block_header(&sumtree_head.previous).unwrap(); for _ in 0..horizon - 2 {
sumtree_head =
chain.get_block_header(&sumtree_head.previous).unwrap();
} }
p.send_sumtrees_request(sumtree_head.height, sumtree_head.hash()).unwrap(); p.send_sumtrees_request(sumtree_head.height, sumtree_head.hash())
.unwrap();
prev_state_sync = current_time; prev_state_sync = current_time;
} }
} }
@ -122,7 +124,6 @@ pub fn run_sync(
} }
fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) { fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
let body_head: chain::Tip = chain.head().unwrap(); let body_head: chain::Tip = chain.head().unwrap();
let header_head: chain::Tip = chain.get_header_head().unwrap(); let header_head: chain::Tip = chain.get_header_head().unwrap();
let sync_head: chain::Tip = chain.get_sync_head().unwrap(); let sync_head: chain::Tip = chain.get_sync_head().unwrap();
@ -143,7 +144,6 @@ fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
if header_head.total_difficulty > body_head.total_difficulty { if header_head.total_difficulty > body_head.total_difficulty {
let mut current = chain.get_block_header(&header_head.last_block_h); let mut current = chain.get_block_header(&header_head.last_block_h);
while let Ok(header) = current { while let Ok(header) = current {
// break out of the while loop when we find a header common // break out of the while loop when we find a header common
// between the this chain and the current chain // between the this chain and the current chain
if let Ok(_) = chain.is_on_current_chain(&header) { if let Ok(_) = chain.is_on_current_chain(&header) {
@ -156,8 +156,8 @@ fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
} }
hashes.reverse(); hashes.reverse();
// if we have 5 peers to sync from then ask for 50 blocks total (peer_count * 10) // if we have 5 peers to sync from then ask for 50 blocks total (peer_count *
// max will be 80 if all 8 peers are advertising more work // 10) max will be 80 if all 8 peers are advertising more work
let peer_count = cmp::min(peers.more_work_peers().len(), 10); let peer_count = cmp::min(peers.more_work_peers().len(), 10);
let block_count = peer_count * 10; let block_count = peer_count * 10;
@ -180,10 +180,11 @@ fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
header_head.height, header_head.height,
hashes_to_get, hashes_to_get,
peer_count, peer_count,
); );
for hash in hashes_to_get.clone() { for hash in hashes_to_get.clone() {
// TODO - Is there a threshold where we sync from most_work_peer (not more_work_peer)? // TODO - Is there a threshold where we sync from most_work_peer (not
// more_work_peer)?
let peer = peers.more_work_peer(); let peer = peers.more_work_peer();
if let Some(peer) = peer { if let Some(peer) = peer {
if let Ok(peer) = peer.try_read() { if let Ok(peer) = peer.try_read() {
@ -202,10 +203,7 @@ pub fn header_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
if let Ok(p) = peer.try_read() { if let Ok(p) = peer.try_read() {
let peer_difficulty = p.info.total_difficulty.clone(); let peer_difficulty = p.info.total_difficulty.clone();
if peer_difficulty > difficulty { if peer_difficulty > difficulty {
let _ = request_headers( let _ = request_headers(peer.clone(), chain.clone());
peer.clone(),
chain.clone(),
);
} }
} }
} }
@ -213,17 +211,12 @@ pub fn header_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
} }
/// Request some block headers from a peer to advance us. /// Request some block headers from a peer to advance us.
fn request_headers( fn request_headers(peer: Arc<RwLock<Peer>>, chain: Arc<chain::Chain>) -> Result<(), Error> {
peer: Arc<RwLock<Peer>>,
chain: Arc<chain::Chain>,
) -> Result<(), Error> {
let locator = get_locator(chain)?; let locator = get_locator(chain)?;
if let Ok(peer) = peer.try_read() { if let Ok(peer) = peer.try_read() {
debug!( debug!(
LOGGER, LOGGER,
"sync: request_headers: asking {} for headers, {:?}", "sync: request_headers: asking {} for headers, {:?}", peer.info.addr, locator,
peer.info.addr,
locator,
); );
let _ = peer.send_header_request(locator); let _ = peer.send_header_request(locator);
} else { } else {
@ -236,15 +229,14 @@ fn request_headers(
Ok(()) Ok(())
} }
/// Whether we're currently syncing the chain or we're fully caught up and /// Whether we're currently syncing the chain or we're fully caught up and
/// just receiving blocks through gossip. /// just receiving blocks through gossip.
pub fn needs_syncing( pub fn needs_syncing(
currently_syncing: Arc<AtomicBool>, currently_syncing: Arc<AtomicBool>,
peers: Arc<Peers>, peers: Arc<Peers>,
chain: Arc<chain::Chain>, chain: Arc<chain::Chain>,
header_only: bool) -> bool { header_only: bool,
) -> bool {
let local_diff = if header_only { let local_diff = if header_only {
chain.total_header_difficulty().unwrap() chain.total_header_difficulty().unwrap()
} else { } else {
@ -252,15 +244,22 @@ pub fn needs_syncing(
}; };
let peer = peers.most_work_peer(); let peer = peers.most_work_peer();
// if we're already syncing, we're caught up if no peer has a higher // if we're already syncing, we're caught up if no peer has a higher
// difficulty than us // difficulty than us
if currently_syncing.load(Ordering::Relaxed) { if currently_syncing.load(Ordering::Relaxed) {
if let Some(peer) = peer { if let Some(peer) = peer {
if let Ok(peer) = peer.try_read() { if let Ok(peer) = peer.try_read() {
debug!(LOGGER, "needs_syncing {} {} {}", local_diff, peer.info.total_difficulty, header_only); debug!(
LOGGER,
"needs_syncing {} {} {}", local_diff, peer.info.total_difficulty, header_only
);
if peer.info.total_difficulty <= local_diff { if peer.info.total_difficulty <= local_diff {
info!(LOGGER, "synchronized at {:?} @ {:?}", local_diff, chain.head().unwrap().height); info!(
LOGGER,
"synchronized at {:?} @ {:?}",
local_diff,
chain.head().unwrap().height
);
currently_syncing.store(false, Ordering::Relaxed); currently_syncing.store(false, Ordering::Relaxed);
if !header_only { if !header_only {
let _ = chain.reset_head(); let _ = chain.reset_head();
@ -327,11 +326,7 @@ fn get_locator_heights(height: u64) -> Vec<u64> {
while current > 0 { while current > 0 {
heights.push(current); heights.push(current);
let next = 2u64.pow(heights.len() as u32); let next = 2u64.pow(heights.len() as u32);
current = if current > next { current = if current > next { current - next } else { 0 }
current - next
} else {
0
}
} }
heights.push(0); heights.push(0);
heights heights
@ -353,10 +348,13 @@ mod test {
get_locator_heights(1000), get_locator_heights(1000),
vec![1000, 998, 994, 986, 970, 938, 874, 746, 490, 0] vec![1000, 998, 994, 986, 970, 938, 874, 746, 490, 0]
); );
// check the locator is still a manageable length, even for large numbers of headers // check the locator is still a manageable length, even for large numbers of
// headers
assert_eq!( assert_eq!(
get_locator_heights(10000), get_locator_heights(10000),
vec![10000, 9998, 9994, 9986, 9970, 9938, 9874, 9746, 9490, 8978, 7954, 5906, 1810, 0] vec![
10000, 9998, 9994, 9986, 9970, 9938, 9874, 9746, 9490, 8978, 7954, 5906, 1810, 0
]
); );
} }
} }

View file

@ -17,13 +17,13 @@ extern crate slog;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_chain as chain; extern crate grin_chain as chain;
extern crate grin_config as config;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_grin as grin; extern crate grin_grin as grin;
extern crate grin_p2p as p2p; extern crate grin_p2p as p2p;
extern crate grin_pow as pow; extern crate grin_pow as pow;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
extern crate grin_config as config;
mod framework; mod framework;
@ -33,7 +33,7 @@ use std::sync::{Arc, Mutex};
use core::global; use core::global;
use core::global::ChainTypes; use core::global::ChainTypes;
use framework::{LocalServerContainer,LocalServerContainerConfig}; use framework::{LocalServerContainer, LocalServerContainerConfig};
use util::{init_test_logger, LOGGER}; use util::{init_test_logger, LOGGER};
#[test] #[test]
@ -47,9 +47,11 @@ fn simple_server_wallet() {
// Run a separate coinbase wallet for coinbase transactions // Run a separate coinbase wallet for coinbase transactions
let mut coinbase_config = LocalServerContainerConfig::default(); let mut coinbase_config = LocalServerContainerConfig::default();
coinbase_config.name = String::from("coinbase_wallet_api"); coinbase_config.name = String::from("coinbase_wallet_api");
coinbase_config.wallet_validating_node_url=String::from("http://127.0.0.1:40001"); coinbase_config.wallet_validating_node_url = String::from("http://127.0.0.1:40001");
coinbase_config.wallet_port = 50002; coinbase_config.wallet_port = 50002;
let coinbase_wallet = Arc::new(Mutex::new(LocalServerContainer::new(coinbase_config).unwrap())); let coinbase_wallet = Arc::new(Mutex::new(
LocalServerContainer::new(coinbase_config).unwrap(),
));
let _ = thread::spawn(move || { let _ = thread::spawn(move || {
let mut w = coinbase_wallet.lock().unwrap(); let mut w = coinbase_wallet.lock().unwrap();
@ -62,11 +64,8 @@ fn simple_server_wallet() {
server_config.api_server_port = 40001; server_config.api_server_port = 40001;
server_config.start_miner = true; server_config.start_miner = true;
server_config.start_wallet = false; server_config.start_wallet = false;
server_config.coinbase_wallet_address = String::from(format!( server_config.coinbase_wallet_address =
"http://{}:{}", String::from(format!("http://{}:{}", server_config.base_addr, 50002));
server_config.base_addr,
50002
));
let mut server_one = LocalServerContainer::new(server_config.clone()).unwrap(); let mut server_one = LocalServerContainer::new(server_config.clone()).unwrap();
// Spawn server and let it run for a bit // Spawn server and let it run for a bit
@ -90,26 +89,29 @@ fn simple_server_wallet() {
// Be sure that at least a block is mined by Travis // Be sure that at least a block is mined by Travis
let mut current_tip = get_tip(&base_addr, api_server_port).unwrap(); let mut current_tip = get_tip(&base_addr, api_server_port).unwrap();
while current_tip.height == 0 { while current_tip.height == 0 {
thread::sleep(time::Duration::from_millis(1000)); thread::sleep(time::Duration::from_millis(1000));
current_tip = get_tip(&base_addr, api_server_port).unwrap(); current_tip = get_tip(&base_addr, api_server_port).unwrap();
} }
warn!(LOGGER, "Testing block handler"); warn!(LOGGER, "Testing block handler");
let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height); let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height);
assert!(last_block_by_height.is_ok()); assert!(last_block_by_height.is_ok());
let last_block_by_height_compact = get_block_by_height_compact(&base_addr, api_server_port, current_tip.height); let last_block_by_height_compact =
get_block_by_height_compact(&base_addr, api_server_port, current_tip.height);
assert!(last_block_by_height_compact.is_ok()); assert!(last_block_by_height_compact.is_ok());
let block_hash = current_tip.last_block_pushed; let block_hash = current_tip.last_block_pushed;
let last_block_by_hash = get_block_by_hash(&base_addr, api_server_port, &block_hash); let last_block_by_hash = get_block_by_hash(&base_addr, api_server_port, &block_hash);
assert!(last_block_by_hash.is_ok()); assert!(last_block_by_hash.is_ok());
let last_block_by_hash_compact = get_block_by_hash_compact(&base_addr, api_server_port, &block_hash); let last_block_by_hash_compact =
get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
assert!(last_block_by_hash_compact.is_ok()); assert!(last_block_by_hash_compact.is_ok());
warn!(LOGGER, "Testing chain utxo handler"); warn!(LOGGER, "Testing chain utxo handler");
let start_height = 0; let start_height = 0;
let end_height = current_tip.height; let end_height = current_tip.height;
let utxos_by_height = get_utxos_by_height(&base_addr, api_server_port, start_height, end_height); let utxos_by_height =
get_utxos_by_height(&base_addr, api_server_port, start_height, end_height);
assert!(utxos_by_height.is_ok()); assert!(utxos_by_height.is_ok());
let ids = get_ids_from_block_outputs(utxos_by_height.unwrap()); let ids = get_ids_from_block_outputs(utxos_by_height.unwrap());
let utxos_by_ids1 = get_utxos_by_ids1(&base_addr, api_server_port, ids.clone()); let utxos_by_ids1 = get_utxos_by_ids1(&base_addr, api_server_port, ids.clone());
@ -169,7 +171,10 @@ fn test_p2p() {
server_config_two.start_wallet = false; server_config_two.start_wallet = false;
server_config_two.is_seeding = false; server_config_two.is_seeding = false;
let mut server_two = LocalServerContainer::new(server_config_two.clone()).unwrap(); let mut server_two = LocalServerContainer::new(server_config_two.clone()).unwrap();
server_two.add_peer(format!("{}:{}", server_config_one.base_addr, server_config_one.p2p_server_port)); server_two.add_peer(format!(
"{}:{}",
server_config_one.base_addr, server_config_one.p2p_server_port
));
let _ = thread::spawn(move || server_two.run_server(120)); let _ = thread::spawn(move || server_two.run_server(120));
// Let them do the handshake // Let them do the handshake
@ -191,7 +196,10 @@ fn test_p2p() {
assert_eq!(peers_all.unwrap().len(), 1); assert_eq!(peers_all.unwrap().len(), 1);
// Check that the peer status is Healthy // Check that the peer status is Healthy
let addr = format!("{}:{}", server_config_two.base_addr, server_config_two.p2p_server_port); let addr = format!(
"{}:{}",
server_config_two.base_addr, server_config_two.p2p_server_port
);
let peer = get_peer(&base_addr, api_server_port, &addr); let peer = get_peer(&base_addr, api_server_port, &addr);
assert!(peer.is_ok()); assert!(peer.is_ok());
assert_eq!(peer.unwrap().flags, p2p::State::Healthy); assert_eq!(peer.unwrap().flags, p2p::State::Healthy);
@ -239,85 +247,171 @@ fn get_status(base_addr: &String, api_server_port: u16) -> Result<api::Status, E
} }
// Block handler functions // Block handler functions
fn get_block_by_height(base_addr: &String, api_server_port: u16, height: u64) -> Result<api::BlockPrintable, Error> { fn get_block_by_height(
let url = format!("http://{}:{}/v1/blocks/{}", base_addr, api_server_port, height); base_addr: &String,
api_server_port: u16,
height: u64,
) -> Result<api::BlockPrintable, Error> {
let url = format!(
"http://{}:{}/v1/blocks/{}",
base_addr, api_server_port, height
);
api::client::get::<api::BlockPrintable>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<api::BlockPrintable>(url.as_str()).map_err(|e| Error::API(e))
} }
fn get_block_by_height_compact(base_addr: &String, api_server_port: u16, height: u64) -> Result<api::CompactBlockPrintable, Error> { fn get_block_by_height_compact(
let url = format!("http://{}:{}/v1/blocks/{}?compact", base_addr, api_server_port, height); base_addr: &String,
api_server_port: u16,
height: u64,
) -> Result<api::CompactBlockPrintable, Error> {
let url = format!(
"http://{}:{}/v1/blocks/{}?compact",
base_addr, api_server_port, height
);
api::client::get::<api::CompactBlockPrintable>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<api::CompactBlockPrintable>(url.as_str()).map_err(|e| Error::API(e))
} }
fn get_block_by_hash(base_addr: &String, api_server_port: u16, block_hash: &String) -> Result<api::BlockPrintable, Error> { fn get_block_by_hash(
let url = format!("http://{}:{}/v1/blocks/{}", base_addr, api_server_port, block_hash); base_addr: &String,
api_server_port: u16,
block_hash: &String,
) -> Result<api::BlockPrintable, Error> {
let url = format!(
"http://{}:{}/v1/blocks/{}",
base_addr, api_server_port, block_hash
);
api::client::get::<api::BlockPrintable>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<api::BlockPrintable>(url.as_str()).map_err(|e| Error::API(e))
} }
fn get_block_by_hash_compact(base_addr: &String, api_server_port: u16, block_hash: &String) -> Result<api::CompactBlockPrintable, Error> { fn get_block_by_hash_compact(
let url = format!("http://{}:{}/v1/blocks/{}?compact", base_addr, api_server_port, block_hash); base_addr: &String,
api_server_port: u16,
block_hash: &String,
) -> Result<api::CompactBlockPrintable, Error> {
let url = format!(
"http://{}:{}/v1/blocks/{}?compact",
base_addr, api_server_port, block_hash
);
api::client::get::<api::CompactBlockPrintable>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<api::CompactBlockPrintable>(url.as_str()).map_err(|e| Error::API(e))
} }
// Chain utxo handler functions // Chain utxo handler functions
fn get_utxos_by_ids1(base_addr: &String, api_server_port: u16, ids: Vec<String>) -> Result<Vec<api::Utxo>, Error> { fn get_utxos_by_ids1(
let url = format!("http://{}:{}/v1/chain/utxos/byids?id={}", base_addr, api_server_port, ids.join(",")); base_addr: &String,
api_server_port: u16,
ids: Vec<String>,
) -> Result<Vec<api::Utxo>, Error> {
let url = format!(
"http://{}:{}/v1/chain/utxos/byids?id={}",
base_addr,
api_server_port,
ids.join(",")
);
api::client::get::<Vec<api::Utxo>>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<Vec<api::Utxo>>(url.as_str()).map_err(|e| Error::API(e))
} }
fn get_utxos_by_ids2(base_addr: &String, api_server_port: u16, ids: Vec<String>) -> Result<Vec<api::Utxo>, Error> { fn get_utxos_by_ids2(
base_addr: &String,
api_server_port: u16,
ids: Vec<String>,
) -> Result<Vec<api::Utxo>, Error> {
let mut ids_string: String = String::from(""); let mut ids_string: String = String::from("");
for id in ids { for id in ids {
ids_string = ids_string + "?id=" + &id; ids_string = ids_string + "?id=" + &id;
} }
let ids_string = String::from(&ids_string[1..ids_string.len()]); let ids_string = String::from(&ids_string[1..ids_string.len()]);
println!("{}", ids_string); println!("{}", ids_string);
let url = format!("http://{}:{}/v1/chain/utxos/byids?{}", base_addr, api_server_port, ids_string); let url = format!(
"http://{}:{}/v1/chain/utxos/byids?{}",
base_addr, api_server_port, ids_string
);
api::client::get::<Vec<api::Utxo>>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<Vec<api::Utxo>>(url.as_str()).map_err(|e| Error::API(e))
} }
fn get_utxos_by_height(base_addr: &String, api_server_port: u16, start_height: u64, end_height: u64) -> Result<Vec<api::BlockOutputs>, Error> { fn get_utxos_by_height(
let url = format!("http://{}:{}/v1/chain/utxos/byheight?start_height={}&end_height={}", base_addr, api_server_port, start_height, end_height); base_addr: &String,
api_server_port: u16,
start_height: u64,
end_height: u64,
) -> Result<Vec<api::BlockOutputs>, Error> {
let url = format!(
"http://{}:{}/v1/chain/utxos/byheight?start_height={}&end_height={}",
base_addr, api_server_port, start_height, end_height
);
api::client::get::<Vec<api::BlockOutputs>>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<Vec<api::BlockOutputs>>(url.as_str()).map_err(|e| Error::API(e))
} }
// Sumtree handler functions // Sumtree handler functions
fn get_sumtree_roots(base_addr: &String, api_server_port: u16) -> Result<api::SumTrees, Error> { fn get_sumtree_roots(base_addr: &String, api_server_port: u16) -> Result<api::SumTrees, Error> {
let url = format!("http://{}:{}/v1/pmmrtrees/roots", base_addr, api_server_port); let url = format!(
"http://{}:{}/v1/pmmrtrees/roots",
base_addr, api_server_port
);
api::client::get::<api::SumTrees>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<api::SumTrees>(url.as_str()).map_err(|e| Error::API(e))
} }
fn get_sumtree_lastutxos(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> { fn get_sumtree_lastutxos(
base_addr: &String,
api_server_port: u16,
n: u64,
) -> Result<Vec<api::PmmrTreeNode>, Error> {
let url: String; let url: String;
if n == 0 { if n == 0 {
url = format!("http://{}:{}/v1/pmmrtrees/lastutxos", base_addr, api_server_port); url = format!(
"http://{}:{}/v1/pmmrtrees/lastutxos",
base_addr, api_server_port
);
} else { } else {
url = format!("http://{}:{}/v1/pmmrtrees/lastutxos?n={}", base_addr, api_server_port, n); url = format!(
"http://{}:{}/v1/pmmrtrees/lastutxos?n={}",
base_addr, api_server_port, n
);
} }
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
} }
fn get_sumtree_lastrangeproofs(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> { fn get_sumtree_lastrangeproofs(
base_addr: &String,
api_server_port: u16,
n: u64,
) -> Result<Vec<api::PmmrTreeNode>, Error> {
let url: String; let url: String;
if n == 0 { if n == 0 {
url = format!("http://{}:{}/v1/pmmrtrees/lastrangeproofs", base_addr, api_server_port); url = format!(
"http://{}:{}/v1/pmmrtrees/lastrangeproofs",
base_addr, api_server_port
);
} else { } else {
url = format!("http://{}:{}/v1/pmmrtrees/lastrangeproofs?n={}", base_addr, api_server_port, n); url = format!(
"http://{}:{}/v1/pmmrtrees/lastrangeproofs?n={}",
base_addr, api_server_port, n
);
} }
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
} }
fn getsumtree_lastkernels(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> { fn getsumtree_lastkernels(
base_addr: &String,
api_server_port: u16,
n: u64,
) -> Result<Vec<api::PmmrTreeNode>, Error> {
let url: String; let url: String;
if n == 0 { if n == 0 {
url = format!("http://{}:{}/v1/pmmrtrees/lastkernels", base_addr, api_server_port); url = format!(
"http://{}:{}/v1/pmmrtrees/lastkernels",
base_addr, api_server_port
);
} else { } else {
url = format!("http://{}:{}/v1/pmmrtrees/lastkernels?n={}", base_addr, api_server_port, n); url = format!(
"http://{}:{}/v1/pmmrtrees/lastkernels?n={}",
base_addr, api_server_port, n
);
} }
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
} }
// Helper function to get a vec of commitment output ids from a vec of block outputs // Helper function to get a vec of commitment output ids from a vec of block
// outputs
fn get_ids_from_block_outputs(block_outputs: Vec<api::BlockOutputs>) -> Vec<String> { fn get_ids_from_block_outputs(block_outputs: Vec<api::BlockOutputs>) -> Vec<String> {
let mut ids: Vec<String> = Vec::new(); let mut ids: Vec<String> = Vec::new();
for block_output in block_outputs { for block_output in block_outputs {
@ -331,32 +425,51 @@ fn get_ids_from_block_outputs(block_outputs: Vec<api::BlockOutputs>) -> Vec<Stri
pub fn ban_peer(base_addr: &String, api_server_port: u16, peer_addr: &String) -> Result<(), Error> { pub fn ban_peer(base_addr: &String, api_server_port: u16, peer_addr: &String) -> Result<(), Error> {
let url = format!( let url = format!(
"http://{}:{}/v1/peers/{}/ban", base_addr, api_server_port, peer_addr "http://{}:{}/v1/peers/{}/ban",
base_addr, api_server_port, peer_addr
); );
api::client::post(url.as_str(), &"").map_err(|e| Error::API(e)) api::client::post(url.as_str(), &"").map_err(|e| Error::API(e))
} }
pub fn unban_peer(base_addr: &String, api_server_port: u16, peer_addr: &String) -> Result<(), Error> { pub fn unban_peer(
base_addr: &String,
api_server_port: u16,
peer_addr: &String,
) -> Result<(), Error> {
let url = format!( let url = format!(
"http://{}:{}/v1/peers/{}/unban", "http://{}:{}/v1/peers/{}/unban",
base_addr, base_addr, api_server_port, peer_addr
api_server_port,
peer_addr
); );
api::client::post(url.as_str(), &"").map_err(|e| Error::API(e)) api::client::post(url.as_str(), &"").map_err(|e| Error::API(e))
} }
pub fn get_peer(base_addr: &String, api_server_port: u16, peer_addr: &String) -> Result<p2p::PeerData, Error> { pub fn get_peer(
let url = format!("http://{}:{}/v1/peers/{}", base_addr, api_server_port, peer_addr); base_addr: &String,
api_server_port: u16,
peer_addr: &String,
) -> Result<p2p::PeerData, Error> {
let url = format!(
"http://{}:{}/v1/peers/{}",
base_addr, api_server_port, peer_addr
);
api::client::get::<p2p::PeerData>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<p2p::PeerData>(url.as_str()).map_err(|e| Error::API(e))
} }
pub fn get_connected_peers(base_addr: &String, api_server_port: u16) -> Result<Vec<p2p::PeerInfo>, Error> { pub fn get_connected_peers(
let url = format!("http://{}:{}/v1/peers/connected", base_addr, api_server_port); base_addr: &String,
api_server_port: u16,
) -> Result<Vec<p2p::PeerInfo>, Error> {
let url = format!(
"http://{}:{}/v1/peers/connected",
base_addr, api_server_port
);
api::client::get::<Vec<p2p::PeerInfo>>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<Vec<p2p::PeerInfo>>(url.as_str()).map_err(|e| Error::API(e))
} }
pub fn get_all_peers(base_addr: &String, api_server_port: u16) -> Result<Vec<p2p::PeerData>, Error> { pub fn get_all_peers(
base_addr: &String,
api_server_port: u16,
) -> Result<Vec<p2p::PeerData>, Error> {
let url = format!("http://{}:{}/v1/peers/all", base_addr, api_server_port); let url = format!("http://{}:{}/v1/peers/all", base_addr, api_server_port);
api::client::get::<Vec<p2p::PeerData>>(url.as_str()).map_err(|e| Error::API(e)) api::client::get::<Vec<p2p::PeerData>>(url.as_str()).map_err(|e| Error::API(e))
} }

View file

@ -119,7 +119,6 @@ impl Default for LocalServerContainerConfig {
} }
} }
/// A top-level container to hold everything that might be running /// A top-level container to hold everything that might be running
/// on a server, i.e. server, wallet in send or receive mode /// on a server, i.e. server, wallet in send or receive mode
@ -151,7 +150,7 @@ pub struct LocalServerContainer {
pub working_dir: String, pub working_dir: String,
// Wallet configuration // Wallet configuration
pub wallet_config:WalletConfig, pub wallet_config: WalletConfig,
} }
impl LocalServerContainer { impl LocalServerContainer {
@ -166,19 +165,17 @@ impl LocalServerContainer {
wallet_config.api_listen_port = format!("{}", config.wallet_port); wallet_config.api_listen_port = format!("{}", config.wallet_port);
wallet_config.check_node_api_http_addr = config.wallet_validating_node_url.clone(); wallet_config.check_node_api_http_addr = config.wallet_validating_node_url.clone();
wallet_config.data_file_dir = working_dir.clone(); wallet_config.data_file_dir = working_dir.clone();
Ok( Ok(LocalServerContainer {
LocalServerContainer { config: config,
config: config, p2p_server_stats: None,
p2p_server_stats: None, api_server: None,
api_server: None, server_is_running: false,
server_is_running: false, server_is_mining: false,
server_is_mining: false, wallet_is_running: false,
wallet_is_running: false, working_dir: working_dir,
working_dir: working_dir, peer_list: Vec::new(),
peer_list: Vec::new(), wallet_config: wallet_config,
wallet_config:wallet_config, })
},
)
} }
pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats { pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats {
@ -192,21 +189,19 @@ impl LocalServerContainer {
seeds = vec![self.config.seed_addr.to_string()]; seeds = vec![self.config.seed_addr.to_string()];
} }
let s = grin::Server::new( let s = grin::Server::new(grin::ServerConfig {
grin::ServerConfig { api_http_addr: api_addr,
api_http_addr: api_addr, db_root: format!("{}/.grin", self.working_dir),
db_root: format!("{}/.grin", self.working_dir), p2p_config: p2p::P2PConfig {
p2p_config: p2p::P2PConfig { port: self.config.p2p_server_port,
port: self.config.p2p_server_port, ..p2p::P2PConfig::default()
..p2p::P2PConfig::default()
},
seeds: Some(seeds),
seeding_type: seeding_type,
chain_type: core::global::ChainTypes::AutomatedTesting,
skip_sync_wait:Some(true),
..Default::default()
}, },
).unwrap(); seeds: Some(seeds),
seeding_type: seeding_type,
chain_type: core::global::ChainTypes::AutomatedTesting,
skip_sync_wait: Some(true),
..Default::default()
}).unwrap();
self.p2p_server_stats = Some(s.get_server_stats().unwrap()); self.p2p_server_stats = Some(s.get_server_stats().unwrap());
@ -262,7 +257,6 @@ impl LocalServerContainer {
let _seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes()); let _seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes());
println!( println!(
"Starting the Grin wallet receiving daemon on {} ", "Starting the Grin wallet receiving daemon on {} ",
self.config.wallet_port self.config.wallet_port
@ -271,57 +265,59 @@ impl LocalServerContainer {
self.wallet_config = WalletConfig::default(); self.wallet_config = WalletConfig::default();
self.wallet_config.api_listen_port = format!("{}", self.config.wallet_port); self.wallet_config.api_listen_port = format!("{}", self.config.wallet_port);
self.wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone(); self.wallet_config.check_node_api_http_addr =
self.config.wallet_validating_node_url.clone();
self.wallet_config.data_file_dir = self.working_dir.clone(); self.wallet_config.data_file_dir = self.working_dir.clone();
let _=fs::create_dir_all(self.wallet_config.clone().data_file_dir); let _ = fs::create_dir_all(self.wallet_config.clone().data_file_dir);
wallet::WalletSeed::init_file(&self.wallet_config); wallet::WalletSeed::init_file(&self.wallet_config);
let wallet_seed = let wallet_seed = wallet::WalletSeed::from_file(&self.wallet_config)
wallet::WalletSeed::from_file(&self.wallet_config).expect("Failed to read wallet seed file."); .expect("Failed to read wallet seed file.");
let keychain = wallet_seed.derive_keychain("grin_test").expect( let keychain = wallet_seed
"Failed to derive keychain from seed file and passphrase.", .derive_keychain("grin_test")
); .expect("Failed to derive keychain from seed file and passphrase.");
wallet::server::start_rest_apis(self.wallet_config.clone(), keychain); wallet::server::start_rest_apis(self.wallet_config.clone(), keychain);
self.wallet_is_running = true; self.wallet_is_running = true;
} }
pub fn get_wallet_seed(config: &WalletConfig) -> wallet::WalletSeed { pub fn get_wallet_seed(config: &WalletConfig) -> wallet::WalletSeed {
let _=fs::create_dir_all(config.clone().data_file_dir); let _ = fs::create_dir_all(config.clone().data_file_dir);
wallet::WalletSeed::init_file(config); wallet::WalletSeed::init_file(config);
let wallet_seed = let wallet_seed =
wallet::WalletSeed::from_file(config).expect("Failed to read wallet seed file."); wallet::WalletSeed::from_file(config).expect("Failed to read wallet seed file.");
wallet_seed wallet_seed
} }
pub fn get_wallet_info(config: &WalletConfig, wallet_seed: &wallet::WalletSeed) -> wallet::WalletInfo { pub fn get_wallet_info(
let keychain = wallet_seed.derive_keychain("grin_test").expect( config: &WalletConfig,
"Failed to derive keychain from seed file and passphrase.", wallet_seed: &wallet::WalletSeed,
); ) -> wallet::WalletInfo {
let keychain = wallet_seed
.derive_keychain("grin_test")
.expect("Failed to derive keychain from seed file and passphrase.");
wallet::retrieve_info(config, &keychain) wallet::retrieve_info(config, &keychain)
} }
pub fn send_amount_to(
pub fn send_amount_to(config: &WalletConfig, config: &WalletConfig,
amount:&str, amount: &str,
minimum_confirmations: u64, minimum_confirmations: u64,
selection_strategy:&str, selection_strategy: &str,
dest: &str){ dest: &str,
) {
let amount = core::core::amount_from_hr_string(amount).expect( let amount = core::core::amount_from_hr_string(amount)
"Could not parse amount as a number with optional decimal point.", .expect("Could not parse amount as a number with optional decimal point.");
);
let wallet_seed = let wallet_seed =
wallet::WalletSeed::from_file(config).expect("Failed to read wallet seed file."); wallet::WalletSeed::from_file(config).expect("Failed to read wallet seed file.");
let mut keychain = wallet_seed.derive_keychain("grin_test").expect( let mut keychain = wallet_seed
"Failed to derive keychain from seed file and passphrase.", .derive_keychain("grin_test")
); .expect("Failed to derive keychain from seed file and passphrase.");
let max_outputs = 500; let max_outputs = 500;
let result = wallet::issue_send_tx( let result = wallet::issue_send_tx(
config, config,
@ -331,27 +327,25 @@ impl LocalServerContainer {
dest.to_string(), dest.to_string(),
max_outputs, max_outputs,
selection_strategy == "all", selection_strategy == "all",
); );
match result { match result {
Ok(_) => { Ok(_) => println!(
println!( "Tx sent: {} grin to {} (strategy '{}')",
"Tx sent: {} grin to {} (strategy '{}')", core::core::amount_to_hr_string(amount),
core::core::amount_to_hr_string(amount), dest,
dest, selection_strategy,
selection_strategy, ),
) Err(e) => match e.kind() {
} wallet::ErrorKind::NotEnoughFunds(available) => {
Err(e) => match e.kind() { println!(
wallet::ErrorKind::NotEnoughFunds(available) => { "Tx not sent: insufficient funds (max: {})",
println!( core::core::amount_to_hr_string(available),
"Tx not sent: insufficient funds (max: {})", );
core::core::amount_to_hr_string(available), }
); _ => {
} println!("Tx not sent to {}: {:?}", dest, e);
_ => { }
println!("Tx not sent to {}: {:?}", dest, e); },
}
}
}; };
} }
@ -456,20 +450,15 @@ impl LocalServerContainerPool {
server_config.name = String::from(format!( server_config.name = String::from(format!(
"{}/{}-{}", "{}/{}-{}",
self.config.base_name, self.config.base_name, self.config.base_name, server_config.p2p_server_port
self.config.base_name,
server_config.p2p_server_port
)); ));
// Use self as coinbase wallet // Use self as coinbase wallet
server_config.coinbase_wallet_address = String::from(format!( server_config.coinbase_wallet_address = String::from(format!(
"http://{}:{}", "http://{}:{}",
server_config.base_addr, server_config.base_addr, server_config.wallet_port
server_config.wallet_port
)); ));
self.next_p2p_port += 1; self.next_p2p_port += 1;
self.next_api_port += 1; self.next_api_port += 1;
self.next_wallet_port += 1; self.next_wallet_port += 1;
@ -480,8 +469,7 @@ impl LocalServerContainerPool {
let _server_address = format!( let _server_address = format!(
"{}:{}", "{}:{}",
server_config.base_addr, server_config.base_addr, server_config.p2p_server_port
server_config.p2p_server_port
); );
let server_container = LocalServerContainer::new(server_config.clone()).unwrap(); let server_container = LocalServerContainer::new(server_config.clone()).unwrap();

View file

@ -101,8 +101,7 @@ fn simulate_seeding() {
server_config.is_seeding = false; server_config.is_seeding = false;
server_config.seed_addr = String::from(format!( server_config.seed_addr = String::from(format!(
"{}:{}", "{}:{}",
server_config.base_addr, server_config.base_addr, server_config.p2p_server_port
server_config.p2p_server_port
)); ));
for _ in 0..4 { for _ in 0..4 {
@ -153,8 +152,7 @@ fn simulate_parallel_mining() {
server_config.is_seeding = false; server_config.is_seeding = false;
server_config.seed_addr = String::from(format!( server_config.seed_addr = String::from(format!(
"{}:{}", "{}:{}",
server_config.base_addr, server_config.base_addr, server_config.p2p_server_port
server_config.p2p_server_port
)); ));
// And create 4 more, then let them run for a while // And create 4 more, then let them run for a while
@ -169,8 +167,8 @@ fn simulate_parallel_mining() {
let _ = pool.run_all_servers(); let _ = pool.run_all_servers();
// Check mining difficulty here?, though I'd think it's more valuable // Check mining difficulty here?, though I'd think it's more valuable
// to simply output it. Can at least see the evolution of the difficulty target // to simply output it. Can at least see the evolution of the difficulty target
// in the debug log output for now // in the debug log output for now
} }
// TODO: Convert these tests to newer framework format // TODO: Convert these tests to newer framework format
@ -190,20 +188,18 @@ fn a_simulate_block_propagation() {
// instantiates 5 servers on different ports // instantiates 5 servers on different ports
let mut servers = vec![]; let mut servers = vec![];
for n in 0..5 { for n in 0..5 {
let s = grin::Server::new( let s = grin::Server::new(grin::ServerConfig {
grin::ServerConfig { api_http_addr: format!("127.0.0.1:{}", 19000 + n),
api_http_addr: format!("127.0.0.1:{}", 19000 + n), db_root: format!("target/{}/grin-prop-{}", test_name_dir, n),
db_root: format!("target/{}/grin-prop-{}", test_name_dir, n), p2p_config: p2p::P2PConfig {
p2p_config: p2p::P2PConfig { port: 18000 + n,
port: 18000 + n, ..p2p::P2PConfig::default()
..p2p::P2PConfig::default()
},
seeding_type: grin::Seeding::List,
seeds: Some(vec!["127.0.0.1:18000".to_string()]),
chain_type: core::global::ChainTypes::AutomatedTesting,
..Default::default()
}, },
).unwrap(); seeding_type: grin::Seeding::List,
seeds: Some(vec!["127.0.0.1:18000".to_string()]),
chain_type: core::global::ChainTypes::AutomatedTesting,
..Default::default()
}).unwrap();
servers.push(s); servers.push(s);
} }

View file

@ -19,19 +19,19 @@ extern crate slog;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_chain as chain; extern crate grin_chain as chain;
extern crate grin_config as config;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_grin as grin; extern crate grin_grin as grin;
extern crate grin_p2p as p2p; extern crate grin_p2p as p2p;
extern crate grin_pow as pow; extern crate grin_pow as pow;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
extern crate grin_config as config;
mod framework; mod framework;
use std::{thread, time}; use std::{thread, time};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use framework::{LocalServerContainer,LocalServerContainerConfig}; use framework::{LocalServerContainer, LocalServerContainerConfig};
use util::LOGGER; use util::LOGGER;
@ -51,12 +51,12 @@ fn basic_wallet_transactions() {
// Run a separate coinbase wallet for coinbase transactions // Run a separate coinbase wallet for coinbase transactions
let mut coinbase_config = LocalServerContainerConfig::default(); let mut coinbase_config = LocalServerContainerConfig::default();
coinbase_config.name = String::from("coinbase_wallet"); coinbase_config.name = String::from("coinbase_wallet");
coinbase_config.wallet_validating_node_url=String::from("http://127.0.0.1:30001"); coinbase_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001");
coinbase_config.wallet_port = 10002; coinbase_config.wallet_port = 10002;
let coinbase_wallet = Arc::new(Mutex::new(LocalServerContainer::new(coinbase_config).unwrap())); let coinbase_wallet = Arc::new(Mutex::new(
let coinbase_wallet_config = { LocalServerContainer::new(coinbase_config).unwrap(),
coinbase_wallet.lock().unwrap().wallet_config.clone() ));
}; let coinbase_wallet_config = { coinbase_wallet.lock().unwrap().wallet_config.clone() };
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config); let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
@ -67,13 +67,11 @@ fn basic_wallet_transactions() {
let mut recp_config = LocalServerContainerConfig::default(); let mut recp_config = LocalServerContainerConfig::default();
recp_config.name = String::from("target_wallet"); recp_config.name = String::from("target_wallet");
recp_config.wallet_validating_node_url=String::from("http://127.0.0.1:30001"); recp_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001");
recp_config.wallet_port = 20002; recp_config.wallet_port = 20002;
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap())); let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
let target_wallet_cloned = target_wallet.clone(); let target_wallet_cloned = target_wallet.clone();
let recp_wallet_config = { let recp_wallet_config = { target_wallet.lock().unwrap().wallet_config.clone() };
target_wallet.lock().unwrap().wallet_config.clone()
};
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config); let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
//Start up a second wallet, to receive //Start up a second wallet, to receive
@ -90,54 +88,83 @@ fn basic_wallet_transactions() {
server_config.api_server_port = 30001; server_config.api_server_port = 30001;
server_config.start_miner = true; server_config.start_miner = true;
server_config.start_wallet = false; server_config.start_wallet = false;
server_config.coinbase_wallet_address = String::from(format!( server_config.coinbase_wallet_address =
"http://{}:{}", String::from(format!("http://{}:{}", server_config.base_addr, 10002));
server_config.base_addr,
10002
));
let mut server_one = LocalServerContainer::new(server_config).unwrap(); let mut server_one = LocalServerContainer::new(server_config).unwrap();
server_one.run_server(120); server_one.run_server(120);
}); });
//Wait until we have some funds to send //Wait until we have some funds to send
let mut coinbase_info = LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed); let mut coinbase_info =
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
let mut slept_time = 0; let mut slept_time = 0;
while coinbase_info.amount_currently_spendable < 100000000000{ while coinbase_info.amount_currently_spendable < 100000000000 {
thread::sleep(time::Duration::from_millis(500)); thread::sleep(time::Duration::from_millis(500));
slept_time+=500; slept_time += 500;
if slept_time > 10000 { if slept_time > 10000 {
panic!("Coinbase not confirming in time"); panic!("Coinbase not confirming in time");
} }
coinbase_info = LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed); coinbase_info =
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
} }
warn!(LOGGER, "Sending 50 Grins to recipient wallet"); warn!(LOGGER, "Sending 50 Grins to recipient wallet");
LocalServerContainer::send_amount_to(&coinbase_wallet_config, "50.00", 1, "not_all", "http://127.0.0.1:20002"); LocalServerContainer::send_amount_to(
&coinbase_wallet_config,
"50.00",
1,
"not_all",
"http://127.0.0.1:20002",
);
//Wait for a confirmation //Wait for a confirmation
thread::sleep(time::Duration::from_millis(3000)); thread::sleep(time::Duration::from_millis(3000));
let coinbase_info = LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed); let coinbase_info =
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
println!("Coinbase wallet info: {:?}", coinbase_info); println!("Coinbase wallet info: {:?}", coinbase_info);
let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed); let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed);
println!("Recipient wallet info: {:?}", recipient_info); println!("Recipient wallet info: {:?}", recipient_info);
assert!(recipient_info.data_confirmed && recipient_info.amount_currently_spendable==50000000000); assert!(
recipient_info.data_confirmed && recipient_info.amount_currently_spendable == 50000000000
);
warn!(LOGGER, "Sending many small transactions to recipient wallet"); warn!(
LOGGER,
"Sending many small transactions to recipient wallet"
);
for _ in 0..10 { for _ in 0..10 {
LocalServerContainer::send_amount_to(&coinbase_wallet_config, "1.00", 1, "not_all", "http://127.0.0.1:20002"); LocalServerContainer::send_amount_to(
&coinbase_wallet_config,
"1.00",
1,
"not_all",
"http://127.0.0.1:20002",
);
} }
thread::sleep(time::Duration::from_millis(10000)); thread::sleep(time::Duration::from_millis(10000));
let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed); let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed);
println!("Recipient wallet info post little sends: {:?}", recipient_info); println!(
"Recipient wallet info post little sends: {:?}",
recipient_info
);
assert!(recipient_info.data_confirmed && recipient_info.amount_currently_spendable==60000000000); assert!(
recipient_info.data_confirmed && recipient_info.amount_currently_spendable == 60000000000
);
//send some cash right back //send some cash right back
LocalServerContainer::send_amount_to(&recp_wallet_config, "25.00", 1, "all", "http://127.0.0.1:10002"); LocalServerContainer::send_amount_to(
&recp_wallet_config,
"25.00",
1,
"all",
"http://127.0.0.1:10002",
);
thread::sleep(time::Duration::from_millis(5000)); thread::sleep(time::Duration::from_millis(5000));
let coinbase_info = LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed); let coinbase_info =
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
println!("Coinbase wallet info final: {:?}", coinbase_info); println!("Coinbase wallet info final: {:?}", coinbase_info);
} }

View file

@ -22,7 +22,6 @@ use util;
use util::secp::{self, Secp256k1}; use util::secp::{self, Secp256k1};
use util::secp::constants::SECRET_KEY_SIZE; use util::secp::constants::SECRET_KEY_SIZE;
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)] #[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
pub struct BlindingFactor([u8; SECRET_KEY_SIZE]); pub struct BlindingFactor([u8; SECRET_KEY_SIZE]);
@ -65,8 +64,7 @@ impl BlindingFactor {
// and secp lib checks this // and secp lib checks this
Ok(secp::key::ZERO_KEY) Ok(secp::key::ZERO_KEY)
} else { } else {
secp::key::SecretKey::from_slice(secp, &self.0) secp::key::SecretKey::from_slice(secp, &self.0).map_err(|e| Error::Secp(e))
.map_err(|e| Error::Secp(e))
} }
} }
@ -86,10 +84,7 @@ impl BlindingFactor {
let blind_1 = BlindingFactor::from_secret_key(skey_1); let blind_1 = BlindingFactor::from_secret_key(skey_1);
let blind_2 = BlindingFactor::from_secret_key(skey_2); let blind_2 = BlindingFactor::from_secret_key(skey_2);
Ok(SplitBlindingFactor { Ok(SplitBlindingFactor { blind_1, blind_2 })
blind_1,
blind_2,
})
} }
} }

View file

@ -218,8 +218,8 @@ impl ExtendedKey {
let derived = blake2b(64, b"Grin/MW Seed", seed); let derived = blake2b(64, b"Grin/MW Seed", seed);
let slice = derived.as_bytes(); let slice = derived.as_bytes();
let key = SecretKey::from_slice(&secp, &slice[0..32]) let key =
.expect("Error deriving key (from_slice)"); SecretKey::from_slice(&secp, &slice[0..32]).expect("Error deriving key (from_slice)");
let mut chain_code: [u8; 32] = Default::default(); let mut chain_code: [u8; 32] = Default::default();
(&mut chain_code).copy_from_slice(&slice[32..64]); (&mut chain_code).copy_from_slice(&slice[32..64]);
@ -295,7 +295,6 @@ impl ExtendedKey {
} }
} }
#[cfg(test)] #[cfg(test)]
mod test { mod test {
use serde_json; use serde_json;
@ -341,10 +340,7 @@ mod test {
let identifier = from_hex("6f7c1a053ca54592e783"); let identifier = from_hex("6f7c1a053ca54592e783");
let n_child = 0; let n_child = 0;
assert_eq!(extk.key, secret_key); assert_eq!(extk.key, secret_key);
assert_eq!( assert_eq!(extk.key_id, Identifier::from_bytes(identifier.as_slice()));
extk.key_id,
Identifier::from_bytes(identifier.as_slice())
);
assert_eq!( assert_eq!(
extk.root_key_id, extk.root_key_id,
Identifier::from_bytes(identifier.as_slice()) Identifier::from_bytes(identifier.as_slice())

View file

@ -19,8 +19,8 @@ use std::{error, fmt};
use util::secp; use util::secp;
use util::secp::{Message, Secp256k1, Signature}; use util::secp::{Message, Secp256k1, Signature};
use util::secp::key::{SecretKey, PublicKey}; use util::secp::key::{PublicKey, SecretKey};
use util::secp::pedersen::{Commitment, ProofMessage, ProofInfo, RangeProof}; use util::secp::pedersen::{Commitment, ProofInfo, ProofMessage, RangeProof};
use util::secp::aggsig; use util::secp::aggsig;
use util::logger::LOGGER; use util::logger::LOGGER;
use util::kernel_sig_msg; use util::kernel_sig_msg;
@ -82,7 +82,7 @@ pub struct AggSigTxContext {
pub struct Keychain { pub struct Keychain {
secp: Secp256k1, secp: Secp256k1,
extkey: extkey::ExtendedKey, extkey: extkey::ExtendedKey,
pub aggsig_contexts: Arc<RwLock<Option<HashMap<Uuid,AggSigTxContext>>>>, pub aggsig_contexts: Arc<RwLock<Option<HashMap<Uuid, AggSigTxContext>>>>,
key_overrides: HashMap<Identifier, SecretKey>, key_overrides: HashMap<Identifier, SecretKey>,
key_derivation_cache: Arc<RwLock<HashMap<Identifier, u32>>>, key_derivation_cache: Arc<RwLock<HashMap<Identifier, u32>>>,
} }
@ -133,7 +133,11 @@ impl Keychain {
fn derived_key(&self, key_id: &Identifier) -> Result<SecretKey, Error> { fn derived_key(&self, key_id: &Identifier) -> Result<SecretKey, Error> {
// first check our overrides and just return the key if we have one in there // first check our overrides and just return the key if we have one in there
if let Some(key) = self.key_overrides.get(key_id) { if let Some(key) = self.key_overrides.get(key_id) {
trace!(LOGGER, "... Derived Key (using override) key_id: {}", key_id); trace!(
LOGGER,
"... Derived Key (using override) key_id: {}",
key_id
);
return Ok(*key); return Ok(*key);
} }
@ -149,8 +153,13 @@ impl Keychain {
{ {
let cache = self.key_derivation_cache.read().unwrap(); let cache = self.key_derivation_cache.read().unwrap();
if let Some(derivation) = cache.get(key_id) { if let Some(derivation) = cache.get(key_id) {
trace!(LOGGER, "... Derived Key (cache hit) key_id: {}, derivation: {}", key_id, derivation); trace!(
return Ok(self.derived_key_from_index(*derivation)?) LOGGER,
"... Derived Key (cache hit) key_id: {}, derivation: {}",
key_id,
derivation
);
return Ok(self.derived_key_from_index(*derivation)?);
} }
} }
@ -180,19 +189,17 @@ impl Keychain {
} }
} }
Err(Error::KeyDerivation( Err(Error::KeyDerivation(format!(
format!("failed to derive child_key for {:?}", key_id), "failed to derive child_key for {:?}",
)) key_id
)))
} }
// if we know the derivation index we can just straight to deriving the key // if we know the derivation index we can just straight to deriving the key
fn derived_key_from_index( fn derived_key_from_index(&self, derivation: u32) -> Result<extkey::ChildKey, Error> {
&self,
derivation: u32,
) -> Result<extkey::ChildKey, Error> {
trace!(LOGGER, "Derived Key (fast) by derivation: {}", derivation); trace!(LOGGER, "Derived Key (fast) by derivation: {}", derivation);
let child_key = self.extkey.derive(&self.secp, derivation)?; let child_key = self.extkey.derive(&self.secp, derivation)?;
return Ok(child_key) return Ok(child_key);
} }
pub fn commit(&self, amount: u64, key_id: &Identifier) -> Result<Commitment, Error> { pub fn commit(&self, amount: u64, key_id: &Identifier) -> Result<Commitment, Error> {
@ -201,11 +208,7 @@ impl Keychain {
Ok(commit) Ok(commit)
} }
pub fn commit_with_key_index( pub fn commit_with_key_index(&self, amount: u64, derivation: u32) -> Result<Commitment, Error> {
&self,
amount: u64,
derivation: u32,
) -> Result<Commitment, Error> {
let child_key = self.derived_key_from_index(derivation)?; let child_key = self.derived_key_from_index(derivation)?;
let commit = self.secp.commit(amount, child_key.key)?; let commit = self.secp.commit(amount, child_key.key)?;
Ok(commit) Ok(commit)
@ -217,7 +220,7 @@ impl Keychain {
Ok(commit) Ok(commit)
} }
pub fn switch_commit_from_index(&self, index:u32) -> Result<Commitment, Error> { pub fn switch_commit_from_index(&self, index: u32) -> Result<Commitment, Error> {
// just do this directly, because cache seems really slow for wallet reconstruct // just do this directly, because cache seems really slow for wallet reconstruct
let skey = self.extkey.derive(&self.secp, index)?; let skey = self.extkey.derive(&self.secp, index)?;
let skey = skey.key; let skey = skey.key;
@ -252,7 +255,9 @@ impl Keychain {
} else { } else {
if msg.len() != 64 { if msg.len() != 64 {
error!(LOGGER, "Bullet proof message must be 64 bytes."); error!(LOGGER, "Bullet proof message must be 64 bytes.");
return Err(Error::RangeProof("Bullet proof message must be 64 bytes".to_string())); return Err(Error::RangeProof(
"Bullet proof message must be 64 bytes".to_string(),
));
} }
} }
return Ok(self.secp.bullet_proof(amount, skey, extra_data, Some(msg))); return Ok(self.secp.bullet_proof(amount, skey, extra_data, Some(msg)));
@ -262,14 +267,14 @@ impl Keychain {
secp: &Secp256k1, secp: &Secp256k1,
commit: Commitment, commit: Commitment,
proof: RangeProof, proof: RangeProof,
extra_data: Option<Vec<u8>>) extra_data: Option<Vec<u8>>,
-> Result<(), secp::Error> { ) -> Result<(), secp::Error> {
let result = secp.verify_bullet_proof(commit, proof, extra_data); let result = secp.verify_bullet_proof(commit, proof, extra_data);
match result { match result {
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(e) => Err(e), Err(e) => Err(e),
}
} }
}
pub fn rewind_range_proof( pub fn rewind_range_proof(
&self, &self,
@ -279,9 +284,10 @@ impl Keychain {
proof: RangeProof, proof: RangeProof,
) -> Result<ProofInfo, Error> { ) -> Result<ProofInfo, Error> {
let nonce = self.derived_key(key_id)?; let nonce = self.derived_key(key_id)?;
let proof_message = self.secp.unwind_bullet_proof(commit, nonce, extra_data, proof); let proof_message = self.secp
.unwind_bullet_proof(commit, nonce, extra_data, proof);
let proof_info = match proof_message { let proof_info = match proof_message {
Ok(p) => ProofInfo { Ok(p) => ProofInfo {
success: true, success: true,
value: 0, value: 0,
message: p, message: p,
@ -300,7 +306,7 @@ impl Keychain {
max: 0, max: 0,
exp: 0, exp: 0,
mantissa: 0, mantissa: 0,
} },
}; };
return Ok(proof_info); return Ok(proof_info);
} }
@ -334,26 +340,34 @@ impl Keychain {
Ok(BlindingFactor::from_secret_key(sum)) Ok(BlindingFactor::from_secret_key(sum))
} }
pub fn aggsig_create_context(&self, transaction_id: &Uuid, sec_key:SecretKey) pub fn aggsig_create_context(
-> Result<(), Error>{ &self,
transaction_id: &Uuid,
sec_key: SecretKey,
) -> Result<(), Error> {
let mut contexts = self.aggsig_contexts.write().unwrap(); let mut contexts = self.aggsig_contexts.write().unwrap();
if contexts.is_none() { if contexts.is_none() {
*contexts = Some(HashMap::new()) *contexts = Some(HashMap::new())
} }
if contexts.as_mut().unwrap().contains_key(transaction_id) { if contexts.as_mut().unwrap().contains_key(transaction_id) {
return Err(Error::Transaction(String::from("Duplication transaction id"))); return Err(Error::Transaction(String::from(
"Duplication transaction id",
)));
} }
contexts.as_mut().unwrap().insert(transaction_id.clone(), AggSigTxContext{ contexts.as_mut().unwrap().insert(
sec_key: sec_key, transaction_id.clone(),
sec_nonce: aggsig::export_secnonce_single(&self.secp).unwrap(), AggSigTxContext {
output_ids: vec![], sec_key: sec_key,
}); sec_nonce: aggsig::export_secnonce_single(&self.secp).unwrap(),
output_ids: vec![],
},
);
Ok(()) Ok(())
} }
/// Tracks an output contributing to my excess value (if it needs to /// Tracks an output contributing to my excess value (if it needs to
/// be kept between invocations /// be kept between invocations
pub fn aggsig_add_output(&self, transaction_id: &Uuid, output_id:&Identifier){ pub fn aggsig_add_output(&self, transaction_id: &Uuid, output_id: &Identifier) {
let mut agg_contexts = self.aggsig_contexts.write().unwrap(); let mut agg_contexts = self.aggsig_contexts.write().unwrap();
let mut agg_contexts_local = agg_contexts.as_mut().unwrap().clone(); let mut agg_contexts_local = agg_contexts.as_mut().unwrap().clone();
let mut agg_context = agg_contexts_local.get(transaction_id).unwrap().clone(); let mut agg_context = agg_contexts_local.get(transaction_id).unwrap().clone();
@ -374,68 +388,87 @@ impl Keychain {
/// Returns private key, private nonce /// Returns private key, private nonce
pub fn aggsig_get_private_keys(&self, transaction_id: &Uuid) -> (SecretKey, SecretKey) { pub fn aggsig_get_private_keys(&self, transaction_id: &Uuid) -> (SecretKey, SecretKey) {
let contexts = self.aggsig_contexts.clone(); let contexts = self.aggsig_contexts.clone();
let contexts_read=contexts.read().unwrap(); let contexts_read = contexts.read().unwrap();
let agg_context = contexts_read.as_ref().unwrap(); let agg_context = contexts_read.as_ref().unwrap();
let agg_context_return = agg_context.get(transaction_id); let agg_context_return = agg_context.get(transaction_id);
(agg_context_return.unwrap().sec_key.clone(), (
agg_context_return.unwrap().sec_nonce.clone()) agg_context_return.unwrap().sec_key.clone(),
agg_context_return.unwrap().sec_nonce.clone(),
)
} }
/// Returns public key, public nonce /// Returns public key, public nonce
pub fn aggsig_get_public_keys(&self, transaction_id: &Uuid) -> (PublicKey, PublicKey) { pub fn aggsig_get_public_keys(&self, transaction_id: &Uuid) -> (PublicKey, PublicKey) {
let contexts = self.aggsig_contexts.clone(); let contexts = self.aggsig_contexts.clone();
let contexts_read=contexts.read().unwrap(); let contexts_read = contexts.read().unwrap();
let agg_context = contexts_read.as_ref().unwrap(); let agg_context = contexts_read.as_ref().unwrap();
let agg_context_return = agg_context.get(transaction_id); let agg_context_return = agg_context.get(transaction_id);
(PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_key).unwrap(), (
PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_nonce).unwrap()) PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_key).unwrap(),
PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_nonce).unwrap(),
)
} }
/// Note 'secnonce' here is used to perform the signature, while 'pubnonce' just allows you to /// Note 'secnonce' here is used to perform the signature, while 'pubnonce' just allows you to
/// provide a custom public nonce to include while calculating e /// provide a custom public nonce to include while calculating e
/// nonce_sum is the sum used to decide whether secnonce should be inverted during sig time /// nonce_sum is the sum used to decide whether secnonce should be inverted during sig time
pub fn aggsig_sign_single(&self, pub fn aggsig_sign_single(
&self,
transaction_id: &Uuid, transaction_id: &Uuid,
msg: &Message, msg: &Message,
secnonce:Option<&SecretKey>, secnonce: Option<&SecretKey>,
pubnonce: Option<&PublicKey>, pubnonce: Option<&PublicKey>,
nonce_sum: Option<&PublicKey>) -> Result<Signature, Error> { nonce_sum: Option<&PublicKey>,
) -> Result<Signature, Error> {
let contexts = self.aggsig_contexts.clone(); let contexts = self.aggsig_contexts.clone();
let contexts_read=contexts.read().unwrap(); let contexts_read = contexts.read().unwrap();
let agg_context = contexts_read.as_ref().unwrap(); let agg_context = contexts_read.as_ref().unwrap();
let agg_context_return = agg_context.get(transaction_id); let agg_context_return = agg_context.get(transaction_id);
let sig = aggsig::sign_single(&self.secp, msg, &agg_context_return.unwrap().sec_key, secnonce, pubnonce, nonce_sum)?; let sig = aggsig::sign_single(
&self.secp,
msg,
&agg_context_return.unwrap().sec_key,
secnonce,
pubnonce,
nonce_sum,
)?;
Ok(sig) Ok(sig)
} }
//Verifies an aggsig signature //Verifies an aggsig signature
pub fn aggsig_verify_single(&self, pub fn aggsig_verify_single(
&self,
sig: &Signature, sig: &Signature,
msg: &Message, msg: &Message,
pubnonce:Option<&PublicKey>, pubnonce: Option<&PublicKey>,
pubkey:&PublicKey, pubkey: &PublicKey,
is_partial:bool) -> bool { is_partial: bool,
) -> bool {
aggsig::verify_single(&self.secp, sig, msg, pubnonce, pubkey, is_partial) aggsig::verify_single(&self.secp, sig, msg, pubnonce, pubkey, is_partial)
} }
//Verifies other final sig corresponds with what we're expecting //Verifies other final sig corresponds with what we're expecting
pub fn aggsig_verify_final_sig_build_msg(&self, pub fn aggsig_verify_final_sig_build_msg(
&self,
sig: &Signature, sig: &Signature,
pubkey: &PublicKey, pubkey: &PublicKey,
fee: u64, fee: u64,
lock_height:u64) -> bool { lock_height: u64,
) -> bool {
let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height)).unwrap(); let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height)).unwrap();
self.aggsig_verify_single(sig, &msg, None, pubkey, true) self.aggsig_verify_single(sig, &msg, None, pubkey, true)
} }
//Verifies other party's sig corresponds with what we're expecting //Verifies other party's sig corresponds with what we're expecting
pub fn aggsig_verify_partial_sig(&self, pub fn aggsig_verify_partial_sig(
&self,
transaction_id: &Uuid, transaction_id: &Uuid,
sig: &Signature, sig: &Signature,
other_pub_nonce:&PublicKey, other_pub_nonce: &PublicKey,
pubkey:&PublicKey, pubkey: &PublicKey,
fee: u64, fee: u64,
lock_height:u64) -> bool { lock_height: u64,
) -> bool {
let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id); let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id);
let mut nonce_sum = other_pub_nonce.clone(); let mut nonce_sum = other_pub_nonce.clone();
let _ = nonce_sum.add_exp_assign(&self.secp, &sec_nonce); let _ = nonce_sum.add_exp_assign(&self.secp, &sec_nonce);
@ -449,7 +482,8 @@ impl Keychain {
transaction_id: &Uuid, transaction_id: &Uuid,
other_pub_nonce: &PublicKey, other_pub_nonce: &PublicKey,
fee: u64, fee: u64,
lock_height: u64) -> Result<Signature, Error>{ lock_height: u64,
) -> Result<Signature, Error> {
// Add public nonces kR*G + kS*G // Add public nonces kR*G + kS*G
let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id); let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id);
let mut nonce_sum = other_pub_nonce.clone(); let mut nonce_sum = other_pub_nonce.clone();
@ -457,7 +491,13 @@ impl Keychain {
let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height))?; let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height))?;
//Now calculate signature using message M=fee, nonce in e=nonce_sum //Now calculate signature using message M=fee, nonce in e=nonce_sum
self.aggsig_sign_single(transaction_id, &msg, Some(&sec_nonce), Some(&nonce_sum), Some(&nonce_sum)) self.aggsig_sign_single(
transaction_id,
&msg,
Some(&sec_nonce),
Some(&nonce_sum),
Some(&nonce_sum),
)
} }
/// Helper function to calculate final signature /// Helper function to calculate final signature
@ -466,7 +506,8 @@ impl Keychain {
transaction_id: &Uuid, transaction_id: &Uuid,
their_sig: &Signature, their_sig: &Signature,
our_sig: &Signature, our_sig: &Signature,
their_pub_nonce: &PublicKey) -> Result<Signature, Error> { their_pub_nonce: &PublicKey,
) -> Result<Signature, Error> {
// Add public nonces kR*G + kS*G // Add public nonces kR*G + kS*G
let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id); let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id);
let mut nonce_sum = their_pub_nonce.clone(); let mut nonce_sum = their_pub_nonce.clone();
@ -500,13 +541,13 @@ impl Keychain {
/// Verifies a sig given a commitment /// Verifies a sig given a commitment
pub fn aggsig_verify_single_from_commit( pub fn aggsig_verify_single_from_commit(
secp:&Secp256k1, secp: &Secp256k1,
sig: &Signature, sig: &Signature,
msg: &Message, msg: &Message,
commit: &Commitment, commit: &Commitment,
) -> bool { ) -> bool {
// Extract the pubkey, unfortunately we need this hack for now, (we just hope one is valid) // Extract the pubkey, unfortunately we need this hack for now, (we just hope
// TODO: Create better secp256k1 API to do this // one is valid) TODO: Create better secp256k1 API to do this
let pubkeys = commit.to_two_pubkeys(secp); let pubkeys = commit.to_two_pubkeys(secp);
let mut valid = false; let mut valid = false;
for i in 0..pubkeys.len() { for i in 0..pubkeys.len() {
@ -562,7 +603,6 @@ mod test {
use util::secp::pedersen::ProofMessage; use util::secp::pedersen::ProofMessage;
use util::secp::key::SecretKey; use util::secp::key::SecretKey;
#[test] #[test]
fn test_key_derivation() { fn test_key_derivation() {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
@ -591,8 +631,12 @@ mod test {
let mut msg = ProofMessage::from_bytes(&[0u8; 64]); let mut msg = ProofMessage::from_bytes(&[0u8; 64]);
let extra_data = [99u8; 64]; let extra_data = [99u8; 64];
let proof = keychain.range_proof(5, &key_id, commit, Some(extra_data.to_vec().clone()), msg).unwrap(); let proof = keychain
let proof_info = keychain.rewind_range_proof(&key_id, commit, Some(extra_data.to_vec().clone()), proof).unwrap(); .range_proof(5, &key_id, commit, Some(extra_data.to_vec().clone()), msg)
.unwrap();
let proof_info = keychain
.rewind_range_proof(&key_id, commit, Some(extra_data.to_vec().clone()), proof)
.unwrap();
assert_eq!(proof_info.success, true); assert_eq!(proof_info.success, true);
@ -610,8 +654,8 @@ mod test {
let proof_info = keychain let proof_info = keychain
.rewind_range_proof(&key_id2, commit, Some(extra_data.to_vec().clone()), proof) .rewind_range_proof(&key_id2, commit, Some(extra_data.to_vec().clone()), proof)
.unwrap(); .unwrap();
// With bullet proofs, if you provide the wrong nonce you'll get gibberish back as opposed // With bullet proofs, if you provide the wrong nonce you'll get gibberish back
// to a failure to recover the message // as opposed to a failure to recover the message
assert_ne!( assert_ne!(
proof_info.message, proof_info.message,
secp::pedersen::ProofMessage::from_bytes(&[0; secp::constants::BULLET_PROOF_MSG_SIZE]) secp::pedersen::ProofMessage::from_bytes(&[0; secp::constants::BULLET_PROOF_MSG_SIZE])
@ -638,7 +682,12 @@ mod test {
let commit3 = keychain.commit(4, &key_id).unwrap(); let commit3 = keychain.commit(4, &key_id).unwrap();
let wrong_extra_data = [98u8; 64]; let wrong_extra_data = [98u8; 64];
let should_err = keychain let should_err = keychain
.rewind_range_proof(&key_id, commit3, Some(wrong_extra_data.to_vec().clone()), proof) .rewind_range_proof(
&key_id,
commit3,
Some(wrong_extra_data.to_vec().clone()),
proof,
)
.unwrap(); .unwrap();
assert_eq!(proof_info.success, false); assert_eq!(proof_info.success, false);
@ -656,20 +705,16 @@ mod test {
let skey1 = SecretKey::from_slice( let skey1 = SecretKey::from_slice(
&keychain.secp, &keychain.secp,
&[ &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 1,
], ],
).unwrap(); ).unwrap();
let skey2 = SecretKey::from_slice( let skey2 = SecretKey::from_slice(
&keychain.secp, &keychain.secp,
&[ &[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2,
0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 2,
], ],
).unwrap(); ).unwrap();
@ -684,10 +729,10 @@ mod test {
let commit_3 = keychain.secp.commit(0, skey3).unwrap(); let commit_3 = keychain.secp.commit(0, skey3).unwrap();
// now sum commitments for keys 1 and 2 // now sum commitments for keys 1 and 2
let sum = keychain.secp.commit_sum( let sum = keychain
vec![commit_1.clone(), commit_2.clone()], .secp
vec![], .commit_sum(vec![commit_1.clone(), commit_2.clone()], vec![])
).unwrap(); .unwrap();
// confirm the commitment for key 3 matches the sum of the commitments 1 and 2 // confirm the commitment for key 3 matches the sum of the commitments 1 and 2
assert_eq!(sum, commit_3); assert_eq!(sum, commit_3);
@ -695,10 +740,11 @@ mod test {
// now check we can sum keys up using keychain.blind_sum() // now check we can sum keys up using keychain.blind_sum()
// in the same way (convenience function) // in the same way (convenience function)
assert_eq!( assert_eq!(
keychain.blind_sum(&BlindSum::new() keychain
.add_blinding_factor(BlindingFactor::from_secret_key(skey1)) .blind_sum(&BlindSum::new()
.add_blinding_factor(BlindingFactor::from_secret_key(skey2)) .add_blinding_factor(BlindingFactor::from_secret_key(skey1))
).unwrap(), .add_blinding_factor(BlindingFactor::from_secret_key(skey2)))
.unwrap(),
BlindingFactor::from_secret_key(skey3), BlindingFactor::from_secret_key(skey3),
); );
} }
@ -714,41 +760,41 @@ mod test {
// Calculate the kernel excess here for convenience. // Calculate the kernel excess here for convenience.
// Normally this would happen during transaction building. // Normally this would happen during transaction building.
let kernel_excess = { let kernel_excess = {
let skey1 = sender_keychain.derived_key( let skey1 = sender_keychain
&sender_keychain.derive_key_id(1).unwrap(), .derived_key(&sender_keychain.derive_key_id(1).unwrap())
).unwrap(); .unwrap();
let skey2 = receiver_keychain.derived_key( let skey2 = receiver_keychain
&receiver_keychain.derive_key_id(1).unwrap(), .derived_key(&receiver_keychain.derive_key_id(1).unwrap())
).unwrap(); .unwrap();
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let blinding_factor = keychain.blind_sum( let blinding_factor = keychain
&BlindSum::new() .blind_sum(&BlindSum::new()
.sub_blinding_factor(BlindingFactor::from_secret_key(skey1)) .sub_blinding_factor(BlindingFactor::from_secret_key(skey1))
.add_blinding_factor(BlindingFactor::from_secret_key(skey2)) .add_blinding_factor(BlindingFactor::from_secret_key(skey2)))
).unwrap(); .unwrap();
keychain.secp.commit( keychain
0, .secp
blinding_factor.secret_key(&keychain.secp).unwrap(), .commit(0, blinding_factor.secret_key(&keychain.secp).unwrap())
).unwrap() .unwrap()
}; };
// sender starts the tx interaction // sender starts the tx interaction
let (sender_pub_excess, sender_pub_nonce) = { let (sender_pub_excess, sender_pub_nonce) = {
let keychain = sender_keychain.clone(); let keychain = sender_keychain.clone();
let skey = keychain.derived_key( let skey = keychain
&keychain.derive_key_id(1).unwrap(), .derived_key(&keychain.derive_key_id(1).unwrap())
).unwrap(); .unwrap();
// dealing with an input here so we need to negate the blinding_factor // dealing with an input here so we need to negate the blinding_factor
// rather than use it as is // rather than use it as is
let blinding_factor = keychain.blind_sum( let blinding_factor = keychain
&BlindSum::new() .blind_sum(&BlindSum::new()
.sub_blinding_factor(BlindingFactor::from_secret_key(skey)) .sub_blinding_factor(BlindingFactor::from_secret_key(skey)))
).unwrap(); .unwrap();
let blind = blinding_factor.secret_key(&keychain.secp()).unwrap(); let blind = blinding_factor.secret_key(&keychain.secp()).unwrap();
@ -768,12 +814,9 @@ mod test {
let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(&tx_id); let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(&tx_id);
keychain.aggsig_add_output(&tx_id, &key_id); keychain.aggsig_add_output(&tx_id, &key_id);
let sig_part = keychain.aggsig_calculate_partial_sig( let sig_part = keychain
&tx_id, .aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0)
&sender_pub_nonce, .unwrap();
0,
0,
).unwrap();
(pub_excess, pub_nonce, sig_part) (pub_excess, pub_nonce, sig_part)
}; };
@ -795,12 +838,9 @@ mod test {
// now sender signs with their key // now sender signs with their key
let sender_sig_part = { let sender_sig_part = {
let keychain = sender_keychain.clone(); let keychain = sender_keychain.clone();
keychain.aggsig_calculate_partial_sig( keychain
&tx_id, .aggsig_calculate_partial_sig(&tx_id, &receiver_pub_nonce, 0, 0)
&receiver_pub_nonce, .unwrap()
0,
0,
).unwrap()
}; };
// check the receiver can verify the partial signature // check the receiver can verify the partial signature
@ -823,23 +863,24 @@ mod test {
let keychain = receiver_keychain.clone(); let keychain = receiver_keychain.clone();
// Receiver recreates their partial sig (we do not maintain state from earlier) // Receiver recreates their partial sig (we do not maintain state from earlier)
let our_sig_part = keychain.aggsig_calculate_partial_sig( let our_sig_part = keychain
&tx_id, .aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0)
&sender_pub_nonce, .unwrap();
0,
0,
).unwrap();
// Receiver now generates final signature from the two parts // Receiver now generates final signature from the two parts
let final_sig = keychain.aggsig_calculate_final_sig( let final_sig = keychain
&tx_id, .aggsig_calculate_final_sig(
&sender_sig_part, &tx_id,
&our_sig_part, &sender_sig_part,
&sender_pub_nonce, &our_sig_part,
).unwrap(); &sender_pub_nonce,
)
.unwrap();
// Receiver calculates the final public key (to verify sig later) // Receiver calculates the final public key (to verify sig later)
let final_pubkey = keychain.aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess).unwrap(); let final_pubkey = keychain
.aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess)
.unwrap();
(final_sig, final_pubkey) (final_sig, final_pubkey)
}; };
@ -849,12 +890,8 @@ mod test {
let keychain = receiver_keychain.clone(); let keychain = receiver_keychain.clone();
// Receiver check the final signature verifies // Receiver check the final signature verifies
let sig_verifies = keychain.aggsig_verify_final_sig_build_msg( let sig_verifies =
&final_sig, keychain.aggsig_verify_final_sig_build_msg(&final_sig, &final_pubkey, 0, 0);
&final_pubkey,
0,
0,
);
assert!(sig_verifies); assert!(sig_verifies);
} }
@ -862,12 +899,7 @@ mod test {
{ {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let msg = secp::Message::from_slice( let msg = secp::Message::from_slice(&kernel_sig_msg(0, 0)).unwrap();
&kernel_sig_msg(
0,
0,
),
).unwrap();
let sig_verifies = Keychain::aggsig_verify_single_from_commit( let sig_verifies = Keychain::aggsig_verify_single_from_commit(
&keychain.secp, &keychain.secp,
@ -896,47 +928,47 @@ mod test {
// Calculate the kernel excess here for convenience. // Calculate the kernel excess here for convenience.
// Normally this would happen during transaction building. // Normally this would happen during transaction building.
let kernel_excess = { let kernel_excess = {
let skey1 = sender_keychain.derived_key( let skey1 = sender_keychain
&sender_keychain.derive_key_id(1).unwrap(), .derived_key(&sender_keychain.derive_key_id(1).unwrap())
).unwrap(); .unwrap();
let skey2 = receiver_keychain.derived_key( let skey2 = receiver_keychain
&receiver_keychain.derive_key_id(1).unwrap(), .derived_key(&receiver_keychain.derive_key_id(1).unwrap())
).unwrap(); .unwrap();
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let blinding_factor = keychain.blind_sum( let blinding_factor = keychain
&BlindSum::new() .blind_sum(&BlindSum::new()
.sub_blinding_factor(BlindingFactor::from_secret_key(skey1)) .sub_blinding_factor(BlindingFactor::from_secret_key(skey1))
.add_blinding_factor(BlindingFactor::from_secret_key(skey2)) .add_blinding_factor(BlindingFactor::from_secret_key(skey2))
// subtract the kernel offset here like as would when // subtract the kernel offset here like as would when
// verifying a kernel signature // verifying a kernel signature
.sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset)) .sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset)))
).unwrap(); .unwrap();
keychain.secp.commit( keychain
0, .secp
blinding_factor.secret_key(&keychain.secp).unwrap(), .commit(0, blinding_factor.secret_key(&keychain.secp).unwrap())
).unwrap() .unwrap()
}; };
// sender starts the tx interaction // sender starts the tx interaction
let (sender_pub_excess, sender_pub_nonce) = { let (sender_pub_excess, sender_pub_nonce) = {
let keychain = sender_keychain.clone(); let keychain = sender_keychain.clone();
let skey = keychain.derived_key( let skey = keychain
&keychain.derive_key_id(1).unwrap(), .derived_key(&keychain.derive_key_id(1).unwrap())
).unwrap(); .unwrap();
// dealing with an input here so we need to negate the blinding_factor // dealing with an input here so we need to negate the blinding_factor
// rather than use it as is // rather than use it as is
let blinding_factor = keychain.blind_sum( let blinding_factor = keychain
&BlindSum::new() .blind_sum(&BlindSum::new()
.sub_blinding_factor(BlindingFactor::from_secret_key(skey)) .sub_blinding_factor(BlindingFactor::from_secret_key(skey))
// subtract the kernel offset to create an aggsig context // subtract the kernel offset to create an aggsig context
// with our "split" key // with our "split" key
.sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset)) .sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset)))
).unwrap(); .unwrap();
let blind = blinding_factor.secret_key(&keychain.secp()).unwrap(); let blind = blinding_factor.secret_key(&keychain.secp()).unwrap();
@ -955,12 +987,9 @@ mod test {
let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(&tx_id); let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(&tx_id);
keychain.aggsig_add_output(&tx_id, &key_id); keychain.aggsig_add_output(&tx_id, &key_id);
let sig_part = keychain.aggsig_calculate_partial_sig( let sig_part = keychain
&tx_id, .aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0)
&sender_pub_nonce, .unwrap();
0,
0,
).unwrap();
(pub_excess, pub_nonce, sig_part) (pub_excess, pub_nonce, sig_part)
}; };
@ -982,12 +1011,9 @@ mod test {
// now sender signs with their key // now sender signs with their key
let sender_sig_part = { let sender_sig_part = {
let keychain = sender_keychain.clone(); let keychain = sender_keychain.clone();
keychain.aggsig_calculate_partial_sig( keychain
&tx_id, .aggsig_calculate_partial_sig(&tx_id, &receiver_pub_nonce, 0, 0)
&receiver_pub_nonce, .unwrap()
0,
0,
).unwrap()
}; };
// check the receiver can verify the partial signature // check the receiver can verify the partial signature
@ -1010,23 +1036,24 @@ mod test {
let keychain = receiver_keychain.clone(); let keychain = receiver_keychain.clone();
// Receiver recreates their partial sig (we do not maintain state from earlier) // Receiver recreates their partial sig (we do not maintain state from earlier)
let our_sig_part = keychain.aggsig_calculate_partial_sig( let our_sig_part = keychain
&tx_id, .aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0)
&sender_pub_nonce, .unwrap();
0,
0,
).unwrap();
// Receiver now generates final signature from the two parts // Receiver now generates final signature from the two parts
let final_sig = keychain.aggsig_calculate_final_sig( let final_sig = keychain
&tx_id, .aggsig_calculate_final_sig(
&sender_sig_part, &tx_id,
&our_sig_part, &sender_sig_part,
&sender_pub_nonce, &our_sig_part,
).unwrap(); &sender_pub_nonce,
)
.unwrap();
// Receiver calculates the final public key (to verify sig later) // Receiver calculates the final public key (to verify sig later)
let final_pubkey = keychain.aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess).unwrap(); let final_pubkey = keychain
.aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess)
.unwrap();
(final_sig, final_pubkey) (final_sig, final_pubkey)
}; };
@ -1036,12 +1063,8 @@ mod test {
let keychain = receiver_keychain.clone(); let keychain = receiver_keychain.clone();
// Receiver check the final signature verifies // Receiver check the final signature verifies
let sig_verifies = keychain.aggsig_verify_final_sig_build_msg( let sig_verifies =
&final_sig, keychain.aggsig_verify_final_sig_build_msg(&final_sig, &final_pubkey, 0, 0);
&final_pubkey,
0,
0,
);
assert!(sig_verifies); assert!(sig_verifies);
} }
@ -1049,12 +1072,7 @@ mod test {
{ {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let msg = secp::Message::from_slice( let msg = secp::Message::from_slice(&kernel_sig_msg(0, 0)).unwrap();
&kernel_sig_msg(
0,
0,
),
).unwrap();
let sig_verifies = Keychain::aggsig_verify_single_from_commit( let sig_verifies = Keychain::aggsig_verify_single_from_commit(
&keychain.secp, &keychain.secp,

View file

@ -18,13 +18,13 @@ extern crate blake2_rfc as blake2;
extern crate byteorder; extern crate byteorder;
extern crate grin_util as util; extern crate grin_util as util;
extern crate rand; extern crate rand;
extern crate uuid;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
#[macro_use] #[macro_use]
extern crate slog; extern crate slog;
extern crate uuid;
mod blind; mod blind;
mod extkey; mod extkey;
@ -32,4 +32,4 @@ mod extkey;
pub use blind::{BlindSum, BlindingFactor}; pub use blind::{BlindSum, BlindingFactor};
pub use extkey::{ExtendedKey, Identifier, IDENTIFIER_SIZE}; pub use extkey::{ExtendedKey, Identifier, IDENTIFIER_SIZE};
pub mod keychain; pub mod keychain;
pub use keychain::{Error, Keychain, AggSigTxContext}; pub use keychain::{AggSigTxContext, Error, Keychain};

View file

@ -23,7 +23,7 @@
use std::cmp; use std::cmp;
use std::fs::File; use std::fs::File;
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::sync::{Arc, Mutex, mpsc}; use std::sync::{mpsc, Arc, Mutex};
use std::net::TcpStream; use std::net::TcpStream;
use std::thread; use std::thread;
use std::time; use std::time;
@ -64,13 +64,15 @@ pub struct Message<'a> {
} }
impl<'a> Message<'a> { impl<'a> Message<'a> {
fn from_header(header: MsgHeader, conn: &'a mut TcpStream) -> Message<'a> { fn from_header(header: MsgHeader, conn: &'a mut TcpStream) -> Message<'a> {
Message{header, conn} Message { header, conn }
} }
/// Read the message body from the underlying connection /// Read the message body from the underlying connection
pub fn body<T>(&mut self) -> Result<T, Error> where T: ser::Readable { pub fn body<T>(&mut self) -> Result<T, Error>
where
T: ser::Readable,
{
read_body(&self.header, self.conn) read_body(&self.header, self.conn)
} }
@ -89,10 +91,10 @@ impl<'a> Message<'a> {
/// Respond to the message with the provided message type and body /// Respond to the message with the provided message type and body
pub fn respond<T>(self, resp_type: Type, body: T) -> Response<'a> pub fn respond<T>(self, resp_type: Type, body: T) -> Response<'a>
where where
T: ser::Writeable T: ser::Writeable,
{ {
let body = ser::ser_vec(&body).unwrap(); let body = ser::ser_vec(&body).unwrap();
Response{ Response {
resp_type: resp_type, resp_type: resp_type,
body: body, body: body,
conn: self.conn, conn: self.conn,
@ -111,7 +113,8 @@ pub struct Response<'a> {
impl<'a> Response<'a> { impl<'a> Response<'a> {
fn write(mut self) -> Result<(), Error> { fn write(mut self) -> Result<(), Error> {
let mut msg = ser::ser_vec(&MsgHeader::new(self.resp_type, self.body.len() as u64)).unwrap(); let mut msg =
ser::ser_vec(&MsgHeader::new(self.resp_type, self.body.len() as u64)).unwrap();
msg.append(&mut self.body); msg.append(&mut self.body);
write_all(&mut self.conn, &msg[..], 10000)?; write_all(&mut self.conn, &msg[..], 10000)?;
if let Some(mut file) = self.attachment { if let Some(mut file) = self.attachment {
@ -149,7 +152,7 @@ pub struct Tracker {
impl Tracker { impl Tracker {
pub fn send<T>(&self, body: T, msg_type: Type) -> Result<(), Error> pub fn send<T>(&self, body: T, msg_type: Type) -> Result<(), Error>
where where
T: ser::Writeable T: ser::Writeable,
{ {
let buf = write_to_buf(body, msg_type); let buf = write_to_buf(body, msg_type);
self.send_channel.send(buf)?; self.send_channel.send(buf)?;
@ -168,7 +171,9 @@ where
let (close_tx, close_rx) = mpsc::channel(); let (close_tx, close_rx) = mpsc::channel();
let (error_tx, error_rx) = mpsc::channel(); let (error_tx, error_rx) = mpsc::channel();
stream.set_nonblocking(true).expect("Non-blocking IO not available."); stream
.set_nonblocking(true)
.expect("Non-blocking IO not available.");
poll(stream, handler, send_rx, error_tx, close_rx); poll(stream, handler, send_rx, error_tx, close_rx);
Tracker { Tracker {
@ -185,54 +190,67 @@ fn poll<H>(
handler: H, handler: H,
send_rx: mpsc::Receiver<Vec<u8>>, send_rx: mpsc::Receiver<Vec<u8>>,
error_tx: mpsc::Sender<Error>, error_tx: mpsc::Sender<Error>,
close_rx: mpsc::Receiver<()> close_rx: mpsc::Receiver<()>,
) ) where
where
H: MessageHandler, H: MessageHandler,
{ {
let mut conn = conn; let mut conn = conn;
let _ = thread::Builder::new().name("peer".to_string()).spawn(move || { let _ = thread::Builder::new()
let sleep_time = time::Duration::from_millis(1); .name("peer".to_string())
.spawn(move || {
let sleep_time = time::Duration::from_millis(1);
let conn = &mut conn; let conn = &mut conn;
let mut retry_send = Err(()); let mut retry_send = Err(());
loop { loop {
// check the read end // check the read end
if let Some(h) = try_break!(error_tx, read_header(conn)) { if let Some(h) = try_break!(error_tx, read_header(conn)) {
let msg = Message::from_header(h, conn); let msg = Message::from_header(h, conn);
debug!(LOGGER, "Received message header, type {:?}, len {}.", msg.header.msg_type, msg.header.msg_len); debug!(
if let Some(Some(resp)) = try_break!(error_tx, handler.consume(msg)) { LOGGER,
try_break!(error_tx, resp.write()); "Received message header, type {:?}, len {}.",
msg.header.msg_type,
msg.header.msg_len
);
if let Some(Some(resp)) = try_break!(error_tx, handler.consume(msg)) {
try_break!(error_tx, resp.write());
}
} }
}
// check the write end // check the write end
if let Ok::<Vec<u8>, ()>(data) = retry_send { if let Ok::<Vec<u8>, ()>(data) = retry_send {
if let None = try_break!(error_tx, conn.write_all(&data[..]).map_err(&From::from)) { if let None =
retry_send = Ok(data); try_break!(error_tx, conn.write_all(&data[..]).map_err(&From::from))
{
retry_send = Ok(data);
} else {
retry_send = Err(());
}
} else if let Ok(data) = send_rx.try_recv() {
if let None =
try_break!(error_tx, conn.write_all(&data[..]).map_err(&From::from))
{
retry_send = Ok(data);
} else {
retry_send = Err(());
}
} else { } else {
retry_send = Err(()); retry_send = Err(());
} }
} else if let Ok(data) = send_rx.try_recv() {
if let None = try_break!(error_tx, conn.write_all(&data[..]).map_err(&From::from)) { // check the close channel
retry_send = Ok(data); if let Ok(_) = close_rx.try_recv() {
} else { debug!(
retry_send = Err(()); LOGGER,
"Connection close with {} initiated by us",
conn.peer_addr()
.map(|a| a.to_string())
.unwrap_or("?".to_owned())
);
break;
} }
} else {
retry_send = Err(());
}
// check the close channel thread::sleep(sleep_time);
if let Ok(_) = close_rx.try_recv() {
debug!(LOGGER,
"Connection close with {} initiated by us",
conn.peer_addr().map(|a| a.to_string()).unwrap_or("?".to_owned()));
break;
} }
});
thread::sleep(sleep_time);
}
});
} }

View file

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::{TcpStream, SocketAddr}; use std::net::{SocketAddr, TcpStream};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use rand::Rng; use rand::Rng;
@ -60,7 +60,6 @@ impl Handshake {
self_addr: SocketAddr, self_addr: SocketAddr,
conn: &mut TcpStream, conn: &mut TcpStream,
) -> Result<PeerInfo, Error> { ) -> Result<PeerInfo, Error> {
// prepare the first part of the handshake // prepare the first part of the handshake
let nonce = self.next_nonce(); let nonce = self.next_nonce();
let peer_addr = match conn.peer_addr() { let peer_addr = match conn.peer_addr() {
@ -115,7 +114,7 @@ impl Handshake {
peer_info.addr, peer_info.addr,
peer_info.user_agent, peer_info.user_agent,
peer_info.capabilities peer_info.capabilities
); );
// when more than one protocol version is supported, choosing should go here // when more than one protocol version is supported, choosing should go here
Ok(peer_info) Ok(peer_info)
} }
@ -126,7 +125,6 @@ impl Handshake {
total_difficulty: Difficulty, total_difficulty: Difficulty,
conn: &mut TcpStream, conn: &mut TcpStream,
) -> Result<PeerInfo, Error> { ) -> Result<PeerInfo, Error> {
let hand: Hand = read_message(conn, Type::Hand)?; let hand: Hand = read_message(conn, Type::Hand)?;
// all the reasons we could refuse this connection for // all the reasons we could refuse this connection for
@ -201,23 +199,23 @@ impl Handshake {
// port reported by the connection is always incorrect for receiving // port reported by the connection is always incorrect for receiving
// connections as it's dynamically allocated by the server. // connections as it's dynamically allocated by the server.
fn extract_ip(advertised: &SocketAddr, conn: &TcpStream) -> SocketAddr { fn extract_ip(advertised: &SocketAddr, conn: &TcpStream) -> SocketAddr {
match advertised { match advertised {
&SocketAddr::V4(v4sock) => { &SocketAddr::V4(v4sock) => {
let ip = v4sock.ip(); let ip = v4sock.ip();
if ip.is_loopback() || ip.is_unspecified() { if ip.is_loopback() || ip.is_unspecified() {
if let Ok(addr) = conn.peer_addr() { if let Ok(addr) = conn.peer_addr() {
return SocketAddr::new(addr.ip(), advertised.port()); return SocketAddr::new(addr.ip(), advertised.port());
} }
} }
} }
&SocketAddr::V6(v6sock) => { &SocketAddr::V6(v6sock) => {
let ip = v6sock.ip(); let ip = v6sock.ip();
if ip.is_loopback() || ip.is_unspecified() { if ip.is_loopback() || ip.is_unspecified() {
if let Ok(addr) = conn.peer_addr() { if let Ok(addr) = conn.peer_addr() {
return SocketAddr::new(addr.ip(), advertised.port()); return SocketAddr::new(addr.ip(), advertised.port());
} }
} }
} }
} }
advertised.clone() advertised.clone()
} }

View file

@ -49,9 +49,9 @@ mod serv;
mod store; mod store;
mod types; mod types;
pub use serv::{Server, DummyAdapter}; pub use serv::{DummyAdapter, Server};
pub use peers::Peers; pub use peers::Peers;
pub use peer::Peer; pub use peer::Peer;
pub use types::{Capabilities, Error, ChainAdapter, SumtreesRead, P2PConfig, pub use types::{Capabilities, ChainAdapter, Error, P2PConfig, PeerInfo, SumtreesRead,
PeerInfo, MAX_BLOCK_HEADERS, MAX_PEER_ADDRS}; MAX_BLOCK_HEADERS, MAX_PEER_ADDRS};
pub use store::{PeerData, State}; pub use store::{PeerData, State};

View file

@ -15,7 +15,7 @@
//! Message types that transit over the network and related serialization code. //! Message types that transit over the network and related serialization code.
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
use std::net::{TcpStream, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6}; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream};
use std::thread; use std::thread;
use std::time; use std::time;
use num::FromPrimitive; use num::FromPrimitive;
@ -81,14 +81,13 @@ enum_from_primitive! {
/// time is not guaranteed to be exact. To support cases where we want to poll /// time is not guaranteed to be exact. To support cases where we want to poll
/// instead of blocking, a `block_on_empty` boolean, when false, ensures /// instead of blocking, a `block_on_empty` boolean, when false, ensures
/// `read_exact` returns early with a `io::ErrorKind::WouldBlock` if nothing /// `read_exact` returns early with a `io::ErrorKind::WouldBlock` if nothing
/// has been read from the socket. /// has been read from the socket.
pub fn read_exact( pub fn read_exact(
conn: &mut TcpStream, conn: &mut TcpStream,
mut buf: &mut [u8], mut buf: &mut [u8],
timeout: u32, timeout: u32,
block_on_empty: bool, block_on_empty: bool,
) -> io::Result<()> { ) -> io::Result<()> {
let sleep_time = time::Duration::from_millis(1); let sleep_time = time::Duration::from_millis(1);
let mut count = 0; let mut count = 0;
@ -116,7 +115,10 @@ pub fn read_exact(
break; break;
} }
if count > timeout { if count > timeout {
return Err(io::Error::new(io::ErrorKind::TimedOut, "reading from tcp stream")); return Err(io::Error::new(
io::ErrorKind::TimedOut,
"reading from tcp stream",
));
} }
} }
Ok(()) Ok(())
@ -124,14 +126,17 @@ pub fn read_exact(
/// Same as `read_exact` but for writing. /// Same as `read_exact` but for writing.
pub fn write_all(conn: &mut Write, mut buf: &[u8], timeout: u32) -> io::Result<()> { pub fn write_all(conn: &mut Write, mut buf: &[u8], timeout: u32) -> io::Result<()> {
let sleep_time = time::Duration::from_millis(1); let sleep_time = time::Duration::from_millis(1);
let mut count = 0; let mut count = 0;
while !buf.is_empty() { while !buf.is_empty() {
match conn.write(buf) { match conn.write(buf) {
Ok(0) => return Err(io::Error::new(io::ErrorKind::WriteZero, Ok(0) => {
"failed to write whole buffer")), return Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write whole buffer",
))
}
Ok(n) => buf = &buf[n..], Ok(n) => buf = &buf[n..],
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {} Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {} Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
@ -144,7 +149,10 @@ pub fn write_all(conn: &mut Write, mut buf: &[u8], timeout: u32) -> io::Result<(
break; break;
} }
if count > timeout { if count > timeout {
return Err(io::Error::new(io::ErrorKind::TimedOut, "reading from tcp stream")); return Err(io::Error::new(
io::ErrorKind::TimedOut,
"reading from tcp stream",
));
} }
} }
Ok(()) Ok(())
@ -154,7 +162,6 @@ pub fn write_all(conn: &mut Write, mut buf: &[u8], timeout: u32) -> io::Result<(
/// underlying stream is async. Typically headers will be polled for, so /// underlying stream is async. Typically headers will be polled for, so
/// we do not want to block. /// we do not want to block.
pub fn read_header(conn: &mut TcpStream) -> Result<MsgHeader, Error> { pub fn read_header(conn: &mut TcpStream) -> Result<MsgHeader, Error> {
let mut head = vec![0u8; HEADER_LEN as usize]; let mut head = vec![0u8; HEADER_LEN as usize];
read_exact(conn, &mut head, 10000, false)?; read_exact(conn, &mut head, 10000, false)?;
let header = ser::deserialize::<MsgHeader>(&mut &head[..])?; let header = ser::deserialize::<MsgHeader>(&mut &head[..])?;
@ -188,10 +195,7 @@ where
read_body(&header, conn) read_body(&header, conn)
} }
pub fn write_to_buf<T>( pub fn write_to_buf<T>(msg: T, msg_type: Type) -> Vec<u8>
msg: T,
msg_type: Type,
) -> Vec<u8>
where where
T: Writeable, T: Writeable,
{ {
@ -208,11 +212,7 @@ where
msg_buf msg_buf
} }
pub fn write_message<T>( pub fn write_message<T>(conn: &mut TcpStream, msg: T, msg_type: Type) -> Result<(), Error>
conn: &mut TcpStream,
msg: T,
msg_type: Type,
) -> Result<(), Error>
where where
T: Writeable + 'static, T: Writeable + 'static,
{ {
@ -597,11 +597,14 @@ impl Readable for Ping {
Ok(diff) => diff, Ok(diff) => diff,
Err(_) => Difficulty::zero(), Err(_) => Difficulty::zero(),
}; };
let height = match reader.read_u64(){ let height = match reader.read_u64() {
Ok(h) => h, Ok(h) => h,
Err(_) => 0, Err(_) => 0,
}; };
Ok(Ping { total_difficulty, height }) Ok(Ping {
total_difficulty,
height,
})
} }
} }
@ -610,7 +613,7 @@ pub struct Pong {
/// may be needed /// may be needed
pub total_difficulty: Difficulty, pub total_difficulty: Difficulty,
/// height accumulated by sender /// height accumulated by sender
pub height: u64 pub height: u64,
} }
impl Writeable for Pong { impl Writeable for Pong {
@ -632,7 +635,10 @@ impl Readable for Pong {
Ok(h) => h, Ok(h) => h,
Err(_) => 0, Err(_) => 0,
}; };
Ok(Pong { total_difficulty, height }) Ok(Pong {
total_difficulty,
height,
})
} }
} }
@ -641,8 +647,8 @@ impl Readable for Pong {
pub struct SumtreesRequest { pub struct SumtreesRequest {
/// Hash of the block for which the sumtrees should be provided /// Hash of the block for which the sumtrees should be provided
pub hash: Hash, pub hash: Hash,
/// Height of the corresponding block /// Height of the corresponding block
pub height: u64 pub height: u64,
} }
impl Writeable for SumtreesRequest { impl Writeable for SumtreesRequest {
@ -667,7 +673,7 @@ impl Readable for SumtreesRequest {
pub struct SumtreesArchive { pub struct SumtreesArchive {
/// Hash of the block for which the sumtrees are provided /// Hash of the block for which the sumtrees are provided
pub hash: Hash, pub hash: Hash,
/// Height of the corresponding block /// Height of the corresponding block
pub height: u64, pub height: u64,
/// Output tree index the receiver should rewind to /// Output tree index the receiver should rewind to
pub rewind_to_output: u64, pub rewind_to_output: u64,
@ -680,10 +686,13 @@ pub struct SumtreesArchive {
impl Writeable for SumtreesArchive { impl Writeable for SumtreesArchive {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> { fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
self.hash.write(writer)?; self.hash.write(writer)?;
ser_multiwrite!(writer, [write_u64, self.height], ser_multiwrite!(
[write_u64, self.rewind_to_output], writer,
[write_u64, self.rewind_to_kernel], [write_u64, self.height],
[write_u64, self.bytes]); [write_u64, self.rewind_to_output],
[write_u64, self.rewind_to_kernel],
[write_u64, self.bytes]
);
Ok(()) Ok(())
} }
} }
@ -694,6 +703,12 @@ impl Readable for SumtreesArchive {
let (height, rewind_to_output, rewind_to_kernel, bytes) = let (height, rewind_to_output, rewind_to_kernel, bytes) =
ser_multiread!(reader, read_u64, read_u64, read_u64, read_u64); ser_multiread!(reader, read_u64, read_u64, read_u64, read_u64);
Ok(SumtreesArchive {hash, height, rewind_to_output, rewind_to_kernel, bytes}) Ok(SumtreesArchive {
hash,
height,
rewind_to_output,
rewind_to_kernel,
bytes,
})
} }
} }

View file

@ -41,7 +41,7 @@ pub struct Peer {
state: Arc<RwLock<State>>, state: Arc<RwLock<State>>,
// set of all hashes known to this peer (so no need to send) // set of all hashes known to this peer (so no need to send)
tracking_adapter: TrackingAdapter, tracking_adapter: TrackingAdapter,
connection: Option<conn::Tracker> connection: Option<conn::Tracker>,
} }
unsafe impl Sync for Peer {} unsafe impl Sync for Peer {}
@ -65,7 +65,6 @@ impl Peer {
hs: &Handshake, hs: &Handshake,
na: Arc<NetAdapter>, na: Arc<NetAdapter>,
) -> Result<Peer, Error> { ) -> Result<Peer, Error> {
let info = hs.accept(capab, total_difficulty, conn)?; let info = hs.accept(capab, total_difficulty, conn)?;
Ok(Peer::new(info, na)) Ok(Peer::new(info, na))
} }
@ -78,7 +77,6 @@ impl Peer {
hs: &Handshake, hs: &Handshake,
na: Arc<NetAdapter>, na: Arc<NetAdapter>,
) -> Result<Peer, Error> { ) -> Result<Peer, Error> {
let info = hs.initiate(capab, total_difficulty, self_addr, conn)?; let info = hs.initiate(capab, total_difficulty, self_addr, conn)?;
Ok(Peer::new(info, na)) Ok(Peer::new(info, na))
} }
@ -96,31 +94,41 @@ impl Peer {
let peer = format!("{}:{}", peer_addr.ip(), peer_addr.port()); let peer = format!("{}:{}", peer_addr.ip(), peer_addr.port());
if let Some(ref denied) = config.peers_deny { if let Some(ref denied) = config.peers_deny {
if denied.contains(&peer) { if denied.contains(&peer) {
debug!(LOGGER, "checking peer allowed/denied: {:?} explicitly denied", peer_addr); debug!(
LOGGER,
"checking peer allowed/denied: {:?} explicitly denied", peer_addr
);
return true; return true;
} }
} }
if let Some(ref allowed) = config.peers_allow { if let Some(ref allowed) = config.peers_allow {
if allowed.contains(&peer) { if allowed.contains(&peer) {
debug!(LOGGER, "checking peer allowed/denied: {:?} explicitly allowed", peer_addr); debug!(
LOGGER,
"checking peer allowed/denied: {:?} explicitly allowed", peer_addr
);
return false; return false;
} else { } else {
debug!(LOGGER, "checking peer allowed/denied: {:?} not explicitly allowed, denying", peer_addr); debug!(
LOGGER,
"checking peer allowed/denied: {:?} not explicitly allowed, denying", peer_addr
);
return true; return true;
} }
} }
// default to allowing peer connection if we do not explicitly allow or deny the peer // default to allowing peer connection if we do not explicitly allow or deny
// the peer
false false
} }
/// Whether this peer is still connected. /// Whether this peer is still connected.
pub fn is_connected(&self) -> bool { pub fn is_connected(&self) -> bool {
if !self.check_connection() { if !self.check_connection() {
return false return false;
} }
let state = self.state.read().unwrap(); let state = self.state.read().unwrap();
*state == State::Connected *state == State::Connected
} }
/// Whether this peer has been banned. /// Whether this peer has been banned.
@ -136,10 +144,17 @@ impl Peer {
*state = State::Banned; *state = State::Banned;
} }
/// Send a ping to the remote peer, providing our local difficulty and height /// Send a ping to the remote peer, providing our local difficulty and
/// height
pub fn send_ping(&self, total_difficulty: Difficulty, height: u64) -> Result<(), Error> { pub fn send_ping(&self, total_difficulty: Difficulty, height: u64) -> Result<(), Error> {
let ping_msg = Ping{total_difficulty, height}; let ping_msg = Ping {
self.connection.as_ref().unwrap().send(ping_msg, msg::Type::Ping) total_difficulty,
height,
};
self.connection
.as_ref()
.unwrap()
.send(ping_msg, msg::Type::Ping)
} }
/// Sends the provided block to the remote peer. The request may be dropped /// Sends the provided block to the remote peer. The request may be dropped
@ -161,8 +176,16 @@ impl Peer {
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<(), Error> { pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<(), Error> {
if !self.tracking_adapter.has(b.hash()) { if !self.tracking_adapter.has(b.hash()) {
debug!(LOGGER, "Send compact block {} to {}", b.hash(), self.info.addr); debug!(
self.connection.as_ref().unwrap().send(b, msg::Type::CompactBlock) LOGGER,
"Send compact block {} to {}",
b.hash(),
self.info.addr
);
self.connection
.as_ref()
.unwrap()
.send(b, msg::Type::CompactBlock)
} else { } else {
debug!( debug!(
LOGGER, LOGGER,
@ -177,7 +200,10 @@ impl Peer {
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<(), Error> { pub fn send_header(&self, bh: &core::BlockHeader) -> Result<(), Error> {
if !self.tracking_adapter.has(bh.hash()) { if !self.tracking_adapter.has(bh.hash()) {
debug!(LOGGER, "Send header {} to {}", bh.hash(), self.info.addr); debug!(LOGGER, "Send header {} to {}", bh.hash(), self.info.addr);
self.connection.as_ref().unwrap().send(bh, msg::Type::Header) self.connection
.as_ref()
.unwrap()
.send(bh, msg::Type::Header)
} else { } else {
debug!( debug!(
LOGGER, LOGGER,
@ -194,32 +220,51 @@ impl Peer {
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<(), Error> { pub fn send_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
if !self.tracking_adapter.has(tx.hash()) { if !self.tracking_adapter.has(tx.hash()) {
debug!(LOGGER, "Send tx {} to {}", tx.hash(), self.info.addr); debug!(LOGGER, "Send tx {} to {}", tx.hash(), self.info.addr);
self.connection.as_ref().unwrap().send(tx, msg::Type::Transaction) self.connection
.as_ref()
.unwrap()
.send(tx, msg::Type::Transaction)
} else { } else {
debug!(LOGGER, "Not sending tx {} to {} (already seen)", tx.hash(), self.info.addr); debug!(
LOGGER,
"Not sending tx {} to {} (already seen)",
tx.hash(),
self.info.addr
);
Ok(()) Ok(())
} }
} }
/// Sends a request for block headers from the provided block locator /// Sends a request for block headers from the provided block locator
pub fn send_header_request(&self, locator: Vec<Hash>) -> Result<(), Error> { pub fn send_header_request(&self, locator: Vec<Hash>) -> Result<(), Error> {
self.connection.as_ref().unwrap().send( self.connection
&Locator { .as_ref()
hashes: locator, .unwrap()
}, .send(&Locator { hashes: locator }, msg::Type::GetHeaders)
msg::Type::GetHeaders)
} }
/// Sends a request for a specific block by hash /// Sends a request for a specific block by hash
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> { pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
debug!(LOGGER, "Requesting block {} from peer {}.", h, self.info.addr); debug!(
self.connection.as_ref().unwrap().send(&h, msg::Type::GetBlock) LOGGER,
"Requesting block {} from peer {}.", h, self.info.addr
);
self.connection
.as_ref()
.unwrap()
.send(&h, msg::Type::GetBlock)
} }
/// Sends a request for a specific compact block by hash /// Sends a request for a specific compact block by hash
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> { pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
debug!(LOGGER, "Requesting compact block {} from {}", h, self.info.addr); debug!(
self.connection.as_ref().unwrap().send(&h, msg::Type::GetCompactBlock) LOGGER,
"Requesting compact block {} from {}", h, self.info.addr
);
self.connection
.as_ref()
.unwrap()
.send(&h, msg::Type::GetCompactBlock)
} }
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> { pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
@ -228,14 +273,19 @@ impl Peer {
&GetPeerAddrs { &GetPeerAddrs {
capabilities: capab, capabilities: capab,
}, },
msg::Type::GetPeerAddrs) msg::Type::GetPeerAddrs,
)
} }
pub fn send_sumtrees_request(&self, height: u64, hash: Hash) -> Result<(), Error> { pub fn send_sumtrees_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
debug!(LOGGER, "Asking {} for sumtree archive at {} {}.", debug!(
self.info.addr, height, hash); LOGGER,
"Asking {} for sumtree archive at {} {}.", self.info.addr, height, hash
);
self.connection.as_ref().unwrap().send( self.connection.as_ref().unwrap().send(
&SumtreesRequest {hash, height }, msg::Type::SumtreesRequest) &SumtreesRequest { hash, height },
msg::Type::SumtreesRequest,
)
} }
/// Stops the peer, closing its connection /// Stops the peer, closing its connection
@ -248,7 +298,10 @@ impl Peer {
Ok(Error::Serialization(e)) => { Ok(Error::Serialization(e)) => {
let mut state = self.state.write().unwrap(); let mut state = self.state.write().unwrap();
*state = State::Banned; *state = State::Banned;
info!(LOGGER, "Client {} corrupted, ban ({:?}).", self.info.addr, e); info!(
LOGGER,
"Client {} corrupted, ban ({:?}).", self.info.addr, e
);
false false
} }
Ok(e) => { Ok(e) => {
@ -339,11 +392,21 @@ impl ChainAdapter for TrackingAdapter {
self.adapter.sumtrees_read(h) self.adapter.sumtrees_read(h)
} }
fn sumtrees_write(&self, h: Hash, fn sumtrees_write(
rewind_to_output: u64, rewind_to_kernel: u64, &self,
sumtree_data: File, peer_addr: SocketAddr) -> bool { h: Hash,
self.adapter.sumtrees_write(h, rewind_to_output, rewind_to_kernel, rewind_to_output: u64,
sumtree_data, peer_addr) rewind_to_kernel: u64,
sumtree_data: File,
peer_addr: SocketAddr,
) -> bool {
self.adapter.sumtrees_write(
h,
rewind_to_output,
rewind_to_kernel,
sumtree_data,
peer_addr,
)
} }
} }
@ -356,7 +419,7 @@ impl NetAdapter for TrackingAdapter {
self.adapter.peer_addrs_received(addrs) self.adapter.peer_addrs_received(addrs)
} }
fn peer_difficulty(&self, addr: SocketAddr, diff: Difficulty, height:u64) { fn peer_difficulty(&self, addr: SocketAddr, diff: Difficulty, height: u64) {
self.adapter.peer_difficulty(addr, diff, height) self.adapter.peer_difficulty(addr, diff, height)
} }
} }

View file

@ -26,7 +26,7 @@ use util::LOGGER;
use time; use time;
use peer::Peer; use peer::Peer;
use store::{PeerStore, PeerData, State}; use store::{PeerData, PeerStore, State};
use types::*; use types::*;
pub struct Peers { pub struct Peers {
@ -446,7 +446,10 @@ impl ChainAdapter for Peers {
if !self.adapter.block_received(b, peer_addr) { if !self.adapter.block_received(b, peer_addr) {
// if the peer sent us a block that's intrinsically bad // if the peer sent us a block that's intrinsically bad
// they are either mistaken or manevolent, both of which require a ban // they are either mistaken or manevolent, both of which require a ban
debug!(LOGGER, "Received a bad block {} from {}, the peer will be banned", hash, peer_addr); debug!(
LOGGER,
"Received a bad block {} from {}, the peer will be banned", hash, peer_addr
);
self.ban_peer(&peer_addr); self.ban_peer(&peer_addr);
false false
} else { } else {
@ -458,7 +461,12 @@ impl ChainAdapter for Peers {
if !self.adapter.compact_block_received(cb, peer_addr) { if !self.adapter.compact_block_received(cb, peer_addr) {
// if the peer sent us a block that's intrinsically bad // if the peer sent us a block that's intrinsically bad
// they are either mistaken or manevolent, both of which require a ban // they are either mistaken or manevolent, both of which require a ban
debug!(LOGGER, "Received a bad compact block {} from {}, the peer will be banned", hash, &peer_addr); debug!(
LOGGER,
"Received a bad compact block {} from {}, the peer will be banned",
hash,
&peer_addr
);
self.ban_peer(&peer_addr); self.ban_peer(&peer_addr);
false false
} else { } else {
@ -495,9 +503,17 @@ impl ChainAdapter for Peers {
sumtree_data: File, sumtree_data: File,
peer_addr: SocketAddr, peer_addr: SocketAddr,
) -> bool { ) -> bool {
if !self.adapter.sumtrees_write(h, rewind_to_output, rewind_to_kernel, if !self.adapter.sumtrees_write(
sumtree_data, peer_addr) { h,
debug!(LOGGER, "Received a bad sumtree data from {}, the peer will be banned", &peer_addr); rewind_to_output,
rewind_to_kernel,
sumtree_data,
peer_addr,
) {
debug!(
LOGGER,
"Received a bad sumtree data from {}, the peer will be banned", &peer_addr
);
self.ban_peer(&peer_addr); self.ban_peer(&peer_addr);
false false
} else { } else {

View file

@ -33,7 +33,7 @@ pub struct Protocol {
impl Protocol { impl Protocol {
pub fn new(adapter: Arc<NetAdapter>, addr: SocketAddr) -> Protocol { pub fn new(adapter: Arc<NetAdapter>, addr: SocketAddr) -> Protocol {
Protocol{adapter, addr} Protocol { adapter, addr }
} }
} }
@ -42,26 +42,24 @@ impl MessageHandler for Protocol {
let adapter = &self.adapter; let adapter = &self.adapter;
match msg.header.msg_type { match msg.header.msg_type {
Type::Ping => { Type::Ping => {
let ping: Ping = msg.body()?; let ping: Ping = msg.body()?;
adapter.peer_difficulty(self.addr, ping.total_difficulty, ping.height); adapter.peer_difficulty(self.addr, ping.total_difficulty, ping.height);
Ok(Some( Ok(Some(msg.respond(
msg.respond( Type::Pong,
Type::Pong, Pong {
Pong { total_difficulty: adapter.total_difficulty(),
total_difficulty: adapter.total_difficulty(), height: adapter.total_height(),
height: adapter.total_height(), },
}) )))
))
} }
Type::Pong => { Type::Pong => {
let pong: Pong = msg.body()?; let pong: Pong = msg.body()?;
adapter.peer_difficulty(self.addr, pong.total_difficulty, pong.height); adapter.peer_difficulty(self.addr, pong.total_difficulty, pong.height);
Ok(None) Ok(None)
}, }
Type::Transaction => { Type::Transaction => {
let tx: core::Transaction = msg.body()?; let tx: core::Transaction = msg.body()?;
@ -90,7 +88,6 @@ impl MessageHandler for Protocol {
Ok(None) Ok(None)
} }
Type::GetCompactBlock => { Type::GetCompactBlock => {
let h: Hash = msg.body()?; let h: Hash = msg.body()?;
debug!(LOGGER, "handle_payload: GetCompactBlock: {}", h); debug!(LOGGER, "handle_payload: GetCompactBlock: {}", h);
@ -110,7 +107,7 @@ impl MessageHandler for Protocol {
debug!( debug!(
LOGGER, LOGGER,
"handle_payload: GetCompactBlock: empty block, sending full block", "handle_payload: GetCompactBlock: empty block, sending full block",
); );
Ok(Some(msg.respond(Type::Block, b))) Ok(Some(msg.respond(Type::Block, b)))
} else { } else {
@ -136,7 +133,10 @@ impl MessageHandler for Protocol {
let headers = adapter.locate_headers(loc.hashes); let headers = adapter.locate_headers(loc.hashes);
// serialize and send all the headers over // serialize and send all the headers over
Ok(Some(msg.respond(Type::Headers, Headers { headers: headers }))) Ok(Some(msg.respond(
Type::Headers,
Headers { headers: headers },
)))
} }
// "header first" block propagation - if we have not yet seen this block // "header first" block propagation - if we have not yet seen this block
@ -160,13 +160,12 @@ impl MessageHandler for Protocol {
Type::GetPeerAddrs => { Type::GetPeerAddrs => {
let get_peers: GetPeerAddrs = msg.body()?; let get_peers: GetPeerAddrs = msg.body()?;
let peer_addrs = adapter.find_peer_addrs(get_peers.capabilities); let peer_addrs = adapter.find_peer_addrs(get_peers.capabilities);
Ok(Some( Ok(Some(msg.respond(
msg.respond( Type::PeerAddrs,
Type::PeerAddrs, PeerAddrs {
PeerAddrs { peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(),
peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(), },
}) )))
))
} }
Type::PeerAddrs => { Type::PeerAddrs => {
@ -177,8 +176,10 @@ impl MessageHandler for Protocol {
Type::SumtreesRequest => { Type::SumtreesRequest => {
let sm_req: SumtreesRequest = msg.body()?; let sm_req: SumtreesRequest = msg.body()?;
debug!(LOGGER, "handle_payload: sumtree req for {} at {}", debug!(
sm_req.hash, sm_req.height); LOGGER,
"handle_payload: sumtree req for {} at {}", sm_req.hash, sm_req.height
);
let sumtrees = self.adapter.sumtrees_read(sm_req.hash); let sumtrees = self.adapter.sumtrees_read(sm_req.hash);
@ -192,7 +193,8 @@ impl MessageHandler for Protocol {
rewind_to_output: sumtrees.output_index, rewind_to_output: sumtrees.output_index,
rewind_to_kernel: sumtrees.kernel_index, rewind_to_kernel: sumtrees.kernel_index,
bytes: file_sz, bytes: file_sz,
}); },
);
resp.add_attachment(sumtrees.reader); resp.add_attachment(sumtrees.reader);
Ok(Some(resp)) Ok(Some(resp))
} else { } else {
@ -202,22 +204,31 @@ impl MessageHandler for Protocol {
Type::SumtreesArchive => { Type::SumtreesArchive => {
let sm_arch: SumtreesArchive = msg.body()?; let sm_arch: SumtreesArchive = msg.body()?;
debug!(LOGGER, "handle_payload: sumtree archive for {} at {} rewind to {}/{}", debug!(
sm_arch.hash, sm_arch.height, LOGGER,
sm_arch.rewind_to_output, sm_arch.rewind_to_kernel); "handle_payload: sumtree archive for {} at {} rewind to {}/{}",
sm_arch.hash,
sm_arch.height,
sm_arch.rewind_to_output,
sm_arch.rewind_to_kernel
);
let mut tmp = env::temp_dir(); let mut tmp = env::temp_dir();
tmp.push("sumtree.zip"); tmp.push("sumtree.zip");
{ {
let mut tmp_zip = File::create(tmp.clone())?; let mut tmp_zip = File::create(tmp.clone())?;
msg.copy_attachment(sm_arch.bytes as usize, &mut tmp_zip)?; msg.copy_attachment(sm_arch.bytes as usize, &mut tmp_zip)?;
tmp_zip.sync_all()?; tmp_zip.sync_all()?;
} }
let tmp_zip = File::open(tmp)?; let tmp_zip = File::open(tmp)?;
self.adapter.sumtrees_write( self.adapter.sumtrees_write(
sm_arch.hash, sm_arch.rewind_to_output, sm_arch.hash,
sm_arch.rewind_to_kernel, tmp_zip, self.addr); sm_arch.rewind_to_output,
sm_arch.rewind_to_kernel,
tmp_zip,
self.addr,
);
Ok(None) Ok(None)
} }

View file

@ -14,7 +14,7 @@
use std::fs::File; use std::fs::File;
use std::io; use std::io;
use std::net::{TcpListener, TcpStream, SocketAddr, Shutdown}; use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::thread; use std::thread;
@ -45,7 +45,6 @@ unsafe impl Send for Server {}
// TODO TLS // TODO TLS
impl Server { impl Server {
/// Creates a new idle p2p server with no peers /// Creates a new idle p2p server with no peers
pub fn new( pub fn new(
db_root: String, db_root: String,
@ -55,7 +54,6 @@ impl Server {
genesis: Hash, genesis: Hash,
stop: Arc<AtomicBool>, stop: Arc<AtomicBool>,
) -> Result<Server, Error> { ) -> Result<Server, Error> {
Ok(Server { Ok(Server {
config: config.clone(), config: config.clone(),
capabilities: capab, capabilities: capab,
@ -71,8 +69,9 @@ impl Server {
// start peer monitoring thread // start peer monitoring thread
let peers_inner = self.peers.clone(); let peers_inner = self.peers.clone();
let stop = self.stop.clone(); let stop = self.stop.clone();
let _ = thread::Builder::new().name("p2p-monitor".to_string()).spawn(move || { let _ = thread::Builder::new()
loop { .name("p2p-monitor".to_string())
.spawn(move || loop {
let total_diff = peers_inner.total_difficulty(); let total_diff = peers_inner.total_difficulty();
let total_height = peers_inner.total_height(); let total_height = peers_inner.total_height();
peers_inner.check_all(total_diff, total_height); peers_inner.check_all(total_diff, total_height);
@ -80,8 +79,7 @@ impl Server {
if stop.load(Ordering::Relaxed) { if stop.load(Ordering::Relaxed) {
break; break;
} }
} });
});
// start TCP listener and handle incoming connections // start TCP listener and handle incoming connections
let addr = SocketAddr::new(self.config.host, self.config.port); let addr = SocketAddr::new(self.config.host, self.config.port);
@ -98,7 +96,8 @@ impl Server {
LOGGER, LOGGER,
"Error accepting peer {}: {:?}", "Error accepting peer {}: {:?}",
peer_addr.to_string(), peer_addr.to_string(),
e); e
);
} }
} }
} }
@ -207,10 +206,16 @@ impl ChainAdapter for DummyAdapter {
0 0
} }
fn transaction_received(&self, _: core::Transaction) {} fn transaction_received(&self, _: core::Transaction) {}
fn compact_block_received(&self, _cb: core::CompactBlock, _addr: SocketAddr) -> bool { true } fn compact_block_received(&self, _cb: core::CompactBlock, _addr: SocketAddr) -> bool {
fn header_received(&self, _bh: core::BlockHeader, _addr: SocketAddr) -> bool { true } true
fn block_received(&self, _: core::Block, _: SocketAddr) -> bool { true } }
fn headers_received(&self, _: Vec<core::BlockHeader>, _:SocketAddr) {} fn header_received(&self, _bh: core::BlockHeader, _addr: SocketAddr) -> bool {
true
}
fn block_received(&self, _: core::Block, _: SocketAddr) -> bool {
true
}
fn headers_received(&self, _: Vec<core::BlockHeader>, _: SocketAddr) {}
fn locate_headers(&self, _: Vec<Hash>) -> Vec<core::BlockHeader> { fn locate_headers(&self, _: Vec<Hash>) -> Vec<core::BlockHeader> {
vec![] vec![]
} }
@ -221,9 +226,14 @@ impl ChainAdapter for DummyAdapter {
unimplemented!() unimplemented!()
} }
fn sumtrees_write(&self, _h: Hash, fn sumtrees_write(
_rewind_to_output: u64, _rewind_to_kernel: u64, &self,
_sumtree_data: File, _peer_addr: SocketAddr) -> bool { _h: Hash,
_rewind_to_output: u64,
_rewind_to_kernel: u64,
_sumtree_data: File,
_peer_addr: SocketAddr,
) -> bool {
false false
} }
} }
@ -233,5 +243,5 @@ impl NetAdapter for DummyAdapter {
vec![] vec![]
} }
fn peer_addrs_received(&self, _: Vec<SocketAddr>) {} fn peer_addrs_received(&self, _: Vec<SocketAddr>) {}
fn peer_difficulty(&self, _: SocketAddr, _: Difficulty, _:u64) {} fn peer_difficulty(&self, _: SocketAddr, _: Difficulty, _: u64) {}
} }

View file

@ -155,5 +155,8 @@ impl PeerStore {
} }
fn peer_key(peer_addr: SocketAddr) -> Vec<u8> { fn peer_key(peer_addr: SocketAddr) -> Vec<u8> {
to_key(PEER_PREFIX, &mut format!("{}:{}", peer_addr.ip(), peer_addr.port()).into_bytes()) to_key(
PEER_PREFIX,
&mut format!("{}:{}", peer_addr.ip(), peer_addr.port()).into_bytes(),
)
} }

View file

@ -119,7 +119,9 @@ bitflags! {
/// Can provide a list of healthy peers /// Can provide a list of healthy peers
const PEER_LIST = 0b00000100; const PEER_LIST = 0b00000100;
const FULL_NODE = Capabilities::FULL_HIST.bits | Capabilities::UTXO_HIST.bits | Capabilities::PEER_LIST.bits; const FULL_NODE = Capabilities::FULL_HIST.bits
| Capabilities::UTXO_HIST.bits
| Capabilities::PEER_LIST.bits;
} }
} }
@ -199,9 +201,14 @@ pub trait ChainAdapter: Sync + Send {
/// If we're willing to accept that new state, the data stream will be /// If we're willing to accept that new state, the data stream will be
/// read as a zip file, unzipped and the resulting state files should be /// read as a zip file, unzipped and the resulting state files should be
/// rewound to the provided indexes. /// rewound to the provided indexes.
fn sumtrees_write(&self, h: Hash, fn sumtrees_write(
rewind_to_output: u64, rewind_to_kernel: u64, &self,
sumtree_data: File, peer_addr: SocketAddr) -> bool; h: Hash,
rewind_to_output: u64,
rewind_to_kernel: u64,
sumtree_data: File,
peer_addr: SocketAddr,
) -> bool;
} }
/// Additional methods required by the protocol that don't need to be /// Additional methods required by the protocol that don't need to be

View file

@ -47,19 +47,19 @@ fn peer_handshake() {
peers_deny: None, peers_deny: None,
}; };
let net_adapter = Arc::new(p2p::DummyAdapter {}); let net_adapter = Arc::new(p2p::DummyAdapter {});
let server = Arc::new(p2p::Server::new( let server = Arc::new(
".grin".to_owned(), p2p::Server::new(
p2p::Capabilities::UNKNOWN, ".grin".to_owned(),
p2p_conf.clone(), p2p::Capabilities::UNKNOWN,
net_adapter.clone(), p2p_conf.clone(),
Hash::from_vec(vec![]), net_adapter.clone(),
Arc::new(AtomicBool::new(false)), Hash::from_vec(vec![]),
).unwrap()); Arc::new(AtomicBool::new(false)),
).unwrap(),
);
let p2p_inner = server.clone(); let p2p_inner = server.clone();
let _ = thread::spawn(move || { let _ = thread::spawn(move || p2p_inner.listen());
p2p_inner.listen()
});
thread::sleep(time::Duration::from_secs(1)); thread::sleep(time::Duration::from_secs(1));
@ -81,7 +81,7 @@ fn peer_handshake() {
peer.send_ping(Difficulty::one(), 0).unwrap(); peer.send_ping(Difficulty::one(), 0).unwrap();
thread::sleep(time::Duration::from_secs(1)); thread::sleep(time::Duration::from_secs(1));
let server_peer = server.peers.get_connected_peer(&my_addr).unwrap(); let server_peer = server.peers.get_connected_peer(&my_addr).unwrap();
let server_peer = server_peer.read().unwrap(); let server_peer = server_peer.read().unwrap();
assert_eq!(server_peer.info.total_difficulty, Difficulty::one()); assert_eq!(server_peer.info.total_difficulty, Difficulty::one());

View file

@ -12,13 +12,12 @@ use std::clone::Clone;
use std::sync::RwLock; use std::sync::RwLock;
use core::core::{block, hash, transaction}; use core::core::{block, hash, transaction};
use core::core::{OutputFeatures, Input, OutputIdentifier}; use core::core::{Input, OutputFeatures, OutputIdentifier};
use core::global; use core::global;
use core::core::hash::Hashed; use core::core::hash::Hashed;
use types::{BlockChain, PoolError}; use types::{BlockChain, PoolError};
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
/// A DummyUtxoSet for mocking up the chain /// A DummyUtxoSet for mocking up the chain
pub struct DummyUtxoSet { pub struct DummyUtxoSet {
outputs: HashMap<Commitment, transaction::Output>, outputs: HashMap<Commitment, transaction::Output>,
@ -119,10 +118,7 @@ impl BlockChain for DummyChainImpl {
} }
let block_hash = input.block_hash.expect("requires a block hash"); let block_hash = input.block_hash.expect("requires a block hash");
let headers = self.block_headers.read().unwrap(); let headers = self.block_headers.read().unwrap();
if let Some(h) = headers if let Some(h) = headers.iter().find(|x| x.hash() == block_hash) {
.iter()
.find(|x| x.hash() == block_hash)
{
if h.height + global::coinbase_maturity() < height { if h.height + global::coinbase_maturity() < height {
return Ok(()); return Ok(());
} }

View file

@ -130,9 +130,7 @@ impl fmt::Debug for Edge {
write!( write!(
f, f,
"Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}", "Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}",
self.source, self.source, self.destination, self.output
self.destination,
self.output
) )
} }
} }
@ -193,14 +191,14 @@ impl DirectedGraph {
let mut new_vertices: Vec<PoolEntry> = vec![]; let mut new_vertices: Vec<PoolEntry> = vec![];
// first find the set of all destinations from the edges in the graph // first find the set of all destinations from the edges in the graph
// a root is a vertex that is not a destination of any edge // a root is a vertex that is not a destination of any edge
let destinations = self.edges let destinations = self.edges
.values() .values()
.filter_map(|edge| edge.destination) .filter_map(|edge| edge.destination)
.collect::<HashSet<_>>(); .collect::<HashSet<_>>();
// now iterate over the current non-root vertices // now iterate over the current non-root vertices
// and check if it is now a root based on the set of edge destinations // and check if it is now a root based on the set of edge destinations
for x in &self.vertices { for x in &self.vertices {
if destinations.contains(&x.transaction_hash) { if destinations.contains(&x.transaction_hash) {
new_vertices.push(x.clone()); new_vertices.push(x.clone());
@ -309,11 +307,8 @@ mod tests {
let output_commit = keychain.commit(70, &key_id1).unwrap(); let output_commit = keychain.commit(70, &key_id1).unwrap();
let switch_commit = keychain.switch_commit(&key_id1).unwrap(); let switch_commit = keychain.switch_commit(&key_id1).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit( let switch_commit_hash =
switch_commit, SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id1);
&keychain,
&key_id1,
);
let inputs = vec![ let inputs = vec![
core::transaction::Input::new( core::transaction::Input::new(
@ -336,7 +331,13 @@ mod tests {
commit: output_commit, commit: output_commit,
switch_commit_hash: switch_commit_hash, switch_commit_hash: switch_commit_hash,
proof: keychain proof: keychain
.range_proof(100, &key_id1, output_commit, Some(switch_commit_hash.as_ref().to_vec()), msg) .range_proof(
100,
&key_id1,
output_commit,
Some(switch_commit_hash.as_ref().to_vec()),
msg,
)
.unwrap(), .unwrap(),
}; };
@ -344,11 +345,8 @@ mod tests {
.with_fee(5) .with_fee(5)
.with_lock_height(0); .with_lock_height(0);
let test_transaction = core::transaction::Transaction::new( let test_transaction =
inputs, core::transaction::Transaction::new(inputs, vec![output], vec![kernel]);
vec![output],
vec![kernel],
);
let test_pool_entry = PoolEntry::new(&test_transaction); let test_pool_entry = PoolEntry::new(&test_transaction);

View file

@ -51,11 +51,7 @@ where
T: BlockChain, T: BlockChain,
{ {
/// Create a new transaction pool /// Create a new transaction pool
pub fn new( pub fn new(config: PoolConfig, chain: Arc<T>, adapter: Arc<PoolAdapter>) -> TransactionPool<T> {
config: PoolConfig,
chain: Arc<T>,
adapter: Arc<PoolAdapter>,
) -> TransactionPool<T> {
TransactionPool { TransactionPool {
config: config, config: config,
transactions: HashMap::new(), transactions: HashMap::new(),
@ -129,29 +125,26 @@ where
// unspent set, represented by blockchain unspents - pool spents, for an // unspent set, represented by blockchain unspents - pool spents, for an
// output designated by output_commitment. // output designated by output_commitment.
fn search_blockchain_unspents(&self, output_ref: &OutputIdentifier) -> Option<Parent> { fn search_blockchain_unspents(&self, output_ref: &OutputIdentifier) -> Option<Parent> {
self.blockchain self.blockchain.is_unspent(output_ref).ok().map(|_| {
.is_unspent(output_ref) match self.pool.get_blockchain_spent(&output_ref.commit) {
.ok() Some(x) => {
.map(|_| { let other_tx = x.destination_hash().unwrap();
match self.pool.get_blockchain_spent(&output_ref.commit) { Parent::AlreadySpent { other_tx }
Some(x) => {
let other_tx = x.destination_hash().unwrap();
Parent::AlreadySpent { other_tx }
}
None => Parent::BlockTransaction,
} }
}) None => Parent::BlockTransaction,
}
})
} }
// search_pool_spents is the second half of pool input detection, after the // search_pool_spents is the second half of pool input detection, after the
// available_outputs have been checked. This returns either a // available_outputs have been checked. This returns either a
// Parent::AlreadySpent or None. // Parent::AlreadySpent or None.
fn search_pool_spents(&self, output_commitment: &Commitment) -> Option<Parent> { fn search_pool_spents(&self, output_commitment: &Commitment) -> Option<Parent> {
self.pool.get_internal_spent(output_commitment).map(|x| { self.pool
Parent::AlreadySpent { .get_internal_spent(output_commitment)
.map(|x| Parent::AlreadySpent {
other_tx: x.destination_hash().unwrap(), other_tx: x.destination_hash().unwrap(),
} })
})
} }
/// Get the number of transactions in the pool /// Get the number of transactions in the pool
@ -189,14 +182,14 @@ where
tx.validate().map_err(|e| PoolError::InvalidTx(e))?; tx.validate().map_err(|e| PoolError::InvalidTx(e))?;
// The first check involves ensuring that an identical transaction is // The first check involves ensuring that an identical transaction is
// not already in the pool's transaction set. // not already in the pool's transaction set.
// A non-authoritative similar check should be performed under the // A non-authoritative similar check should be performed under the
// pool's read lock before we get to this point, which would catch the // pool's read lock before we get to this point, which would catch the
// majority of duplicate cases. The race condition is caught here. // majority of duplicate cases. The race condition is caught here.
// TODO: When the transaction identifier is finalized, the assumptions // TODO: When the transaction identifier is finalized, the assumptions
// here may change depending on the exact coverage of the identifier. // here may change depending on the exact coverage of the identifier.
// The current tx.hash() method, for example, does not cover changes // The current tx.hash() method, for example, does not cover changes
// to fees or other elements of the signature preimage. // to fees or other elements of the signature preimage.
let tx_hash = graph::transaction_identifier(&tx); let tx_hash = graph::transaction_identifier(&tx);
if self.transactions.contains_key(&tx_hash) { if self.transactions.contains_key(&tx_hash) {
return Err(PoolError::AlreadyInPool); return Err(PoolError::AlreadyInPool);
@ -243,11 +236,11 @@ where
let is_orphan = orphan_refs.len() > 0; let is_orphan = orphan_refs.len() > 0;
// Next we examine the outputs this transaction creates and ensure // Next we examine the outputs this transaction creates and ensure
// that they do not already exist. // that they do not already exist.
// I believe its worth preventing duplicate outputs from being // I believe its worth preventing duplicate outputs from being
// accepted, even though it is possible for them to be mined // accepted, even though it is possible for them to be mined
// with strict ordering. In the future, if desirable, this could // with strict ordering. In the future, if desirable, this could
// be node policy config or more intelligent. // be node policy config or more intelligent.
for output in &tx.outputs { for output in &tx.outputs {
self.check_duplicate_outputs(output, is_orphan)? self.check_duplicate_outputs(output, is_orphan)?
} }
@ -283,13 +276,13 @@ where
Ok(()) Ok(())
} else { } else {
// At this point, we're pretty sure the transaction is an orphan, // At this point, we're pretty sure the transaction is an orphan,
// but we have to explicitly check for double spends against the // but we have to explicitly check for double spends against the
// orphans set; we do not check this as part of the connectivity // orphans set; we do not check this as part of the connectivity
// checking above. // checking above.
// First, any references resolved to the pool need to be compared // First, any references resolved to the pool need to be compared
// against active orphan pool_connections. // against active orphan pool_connections.
// Note that pool_connections here also does double duty to // Note that pool_connections here also does double duty to
// account for blockchain connections. // account for blockchain connections.
for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) { for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) {
match self.orphans match self.orphans
.get_external_spent_output(&pool_ref.output_commitment()) .get_external_spent_output(&pool_ref.output_commitment())
@ -306,9 +299,9 @@ where
} }
// Next, we have to consider the possibility of double spends // Next, we have to consider the possibility of double spends
// within the orphans set. // within the orphans set.
// We also have to distinguish now between missing and internal // We also have to distinguish now between missing and internal
// references. // references.
let missing_refs = self.resolve_orphan_refs(tx_hash, &mut orphan_refs)?; let missing_refs = self.resolve_orphan_refs(tx_hash, &mut orphan_refs)?;
// We have passed all failure modes. // We have passed all failure modes.
@ -347,7 +340,6 @@ where
}); });
} }
// Check for existence of this output in the pool // Check for existence of this output in the pool
match self.pool.find_output(&output.commitment()) { match self.pool.find_output(&output.commitment()) {
Some(x) => { Some(x) => {
@ -360,9 +352,8 @@ where
None => {} None => {}
}; };
// If the transaction might go into orphans, perform the same // If the transaction might go into orphans, perform the same
// checks as above but against the orphan set instead. // checks as above but against the orphan set instead.
if is_orphan { if is_orphan {
// Checking against orphan outputs // Checking against orphan outputs
match self.orphans.find_output(&output.commitment()) { match self.orphans.find_output(&output.commitment()) {
@ -376,7 +367,7 @@ where
None => {} None => {}
}; };
// No need to check pool connections since those are covered // No need to check pool connections since those are covered
// by pool unspents and blockchain connections. // by pool unspents and blockchain connections.
} }
Ok(()) Ok(())
} }
@ -414,9 +405,9 @@ where
} }
None => { None => {
// The reference does not resolve to anything. // The reference does not resolve to anything.
// Make sure this missing_output has not already // Make sure this missing_output has not already
// been claimed, then add this entry to // been claimed, then add this entry to
// missing_refs // missing_refs
match self.orphans.get_unknown_output(&orphan_commitment) { match self.orphans.get_unknown_output(&orphan_commitment) {
Some(x) => { Some(x) => {
return Err(PoolError::DoubleSpend { return Err(PoolError::DoubleSpend {
@ -464,34 +455,34 @@ where
block: &block::Block, block: &block::Block,
) -> Result<Vec<Box<transaction::Transaction>>, PoolError> { ) -> Result<Vec<Box<transaction::Transaction>>, PoolError> {
// If this pool has been kept in sync correctly, serializing all // If this pool has been kept in sync correctly, serializing all
// updates, then the inputs must consume only members of the blockchain // updates, then the inputs must consume only members of the blockchain
// utxo set. // utxo set.
// If the block has been resolved properly and reduced fully to its // If the block has been resolved properly and reduced fully to its
// canonical form, no inputs may consume outputs generated by previous // canonical form, no inputs may consume outputs generated by previous
// transactions in the block; they would be cut-through. TODO: If this // transactions in the block; they would be cut-through. TODO: If this
// is not consensus enforced, then logic must be added here to account // is not consensus enforced, then logic must be added here to account
// for that. // for that.
// Based on this, we operate under the following algorithm: // Based on this, we operate under the following algorithm:
// For each block input, we examine the pool transaction, if any, that // For each block input, we examine the pool transaction, if any, that
// consumes the same blockchain output. // consumes the same blockchain output.
// If one exists, we mark the transaction and then examine its // If one exists, we mark the transaction and then examine its
// children. Recursively, we mark each child until a child is // children. Recursively, we mark each child until a child is
// fully satisfied by outputs in the updated utxo view (after // fully satisfied by outputs in the updated utxo view (after
// reconciliation of the block), or there are no more children. // reconciliation of the block), or there are no more children.
// //
// Additionally, to protect our invariant dictating no duplicate // Additionally, to protect our invariant dictating no duplicate
// outputs, each output generated by the new utxo set is checked // outputs, each output generated by the new utxo set is checked
// against outputs generated by the pool and the corresponding // against outputs generated by the pool and the corresponding
// transactions are also marked. // transactions are also marked.
// //
// After marking concludes, sweeping begins. In order, the marked // After marking concludes, sweeping begins. In order, the marked
// transactions are removed, the vertexes corresponding to the // transactions are removed, the vertexes corresponding to the
// transactions are removed, all the marked transactions' outputs are // transactions are removed, all the marked transactions' outputs are
// removed, and all remaining non-blockchain inputs are returned to the // removed, and all remaining non-blockchain inputs are returned to the
// unspent_outputs set. // unspent_outputs set.
// //
// After the pool has been successfully processed, an orphans // After the pool has been successfully processed, an orphans
// reconciliation job is triggered. // reconciliation job is triggered.
let mut marked_transactions: HashSet<hash::Hash> = HashSet::new(); let mut marked_transactions: HashSet<hash::Hash> = HashSet::new();
{ {
@ -504,7 +495,7 @@ where
.collect(); .collect();
// find all outputs that conflict - potential for duplicates so use a HashSet // find all outputs that conflict - potential for duplicates so use a HashSet
// here // here
let conflicting_outputs: HashSet<hash::Hash> = block let conflicting_outputs: HashSet<hash::Hash> = block
.outputs .outputs
.iter() .iter()
@ -517,7 +508,7 @@ where
.collect(); .collect();
// now iterate over all conflicting hashes from both txs and outputs // now iterate over all conflicting hashes from both txs and outputs
// we can just use the union of the two sets here to remove duplicates // we can just use the union of the two sets here to remove duplicates
for &txh in conflicting_txs.union(&conflicting_outputs) { for &txh in conflicting_txs.union(&conflicting_outputs) {
self.mark_transaction(txh, &mut marked_transactions); self.mark_transaction(txh, &mut marked_transactions);
} }
@ -585,7 +576,7 @@ where
} }
// final step is to update the pool to reflect the new set of roots // final step is to update the pool to reflect the new set of roots
// a tx that was non-root may now be root based on the txs removed // a tx that was non-root may now be root based on the txs removed
self.pool.update_roots(); self.pool.update_roots();
removed_txs removed_txs
@ -617,9 +608,9 @@ where
return Err(PoolError::OverCapacity); return Err(PoolError::OverCapacity);
} }
// for a basic transaction (1 input, 2 outputs) - // for a basic transaction (1 input, 2 outputs) -
// (-1 * 1) + (4 * 2) + 1 = 8 // (-1 * 1) + (4 * 2) + 1 = 8
// 8 * 10 = 80 // 8 * 10 = 80
if self.config.accept_fee_base > 0 { if self.config.accept_fee_base > 0 {
let mut tx_weight = -1 * (tx.inputs.len() as i32) + (4 * tx.outputs.len() as i32) + 1; let mut tx_weight = -1 * (tx.inputs.len() as i32) + (4 * tx.outputs.len() as i32) + 1;
if tx_weight < 1 { if tx_weight < 1 {
@ -773,7 +764,7 @@ mod tests {
}; };
// To test DoubleSpend and AlreadyInPool conditions, we need to add // To test DoubleSpend and AlreadyInPool conditions, we need to add
// a valid transaction. // a valid transaction.
let valid_transaction = test_transaction(vec![5, 6], vec![9]); let valid_transaction = test_transaction(vec![5, 6], vec![9]);
match write_pool.add_to_memory_pool(test_source(), valid_transaction.clone()) { match write_pool.add_to_memory_pool(test_source(), valid_transaction.clone()) {
@ -782,7 +773,7 @@ mod tests {
}; };
// Now, test a DoubleSpend by consuming the same blockchain unspent // Now, test a DoubleSpend by consuming the same blockchain unspent
// as valid_transaction: // as valid_transaction:
let double_spend_transaction = test_transaction(vec![6], vec![2]); let double_spend_transaction = test_transaction(vec![6], vec![2]);
match write_pool.add_to_memory_pool(test_source(), double_spend_transaction) { match write_pool.add_to_memory_pool(test_source(), double_spend_transaction) {
@ -824,7 +815,7 @@ mod tests {
assert_eq!(write_pool.total_size(), 1); assert_eq!(write_pool.total_size(), 1);
// now attempt to add a timelocked tx to the pool // now attempt to add a timelocked tx to the pool
// should fail as invalid based on current height // should fail as invalid based on current height
let timelocked_tx_1 = timelocked_transaction(vec![9], vec![5], 10); let timelocked_tx_1 = timelocked_transaction(vec![9], vec![5], 10);
match write_pool.add_to_memory_pool(test_source(), timelocked_tx_1) { match write_pool.add_to_memory_pool(test_source(), timelocked_tx_1) {
Err(PoolError::ImmatureTransaction { Err(PoolError::ImmatureTransaction {
@ -867,14 +858,10 @@ mod tests {
}; };
chain_ref.store_head_header(&head_header); chain_ref.store_head_header(&head_header);
let txn = test_transaction_with_coinbase_input( let txn = test_transaction_with_coinbase_input(15, coinbase_header.hash(), vec![10, 3]);
15,
coinbase_header.hash(),
vec![10, 3],
);
let result = write_pool.add_to_memory_pool(test_source(), txn); let result = write_pool.add_to_memory_pool(test_source(), txn);
match result { match result {
Err(InvalidTx(transaction::Error::ImmatureCoinbase)) => {}, Err(InvalidTx(transaction::Error::ImmatureCoinbase)) => {}
_ => panic!("expected ImmatureCoinbase error here"), _ => panic!("expected ImmatureCoinbase error here"),
}; };
@ -884,11 +871,7 @@ mod tests {
}; };
chain_ref.store_head_header(&head_header); chain_ref.store_head_header(&head_header);
let txn = test_transaction_with_coinbase_input( let txn = test_transaction_with_coinbase_input(15, coinbase_header.hash(), vec![10, 3]);
15,
coinbase_header.hash(),
vec![10, 3],
);
let result = write_pool.add_to_memory_pool(test_source(), txn); let result = write_pool.add_to_memory_pool(test_source(), txn);
match result { match result {
Ok(_) => {} Ok(_) => {}
@ -920,8 +903,8 @@ mod tests {
let pool = RwLock::new(test_setup(&chain_ref)); let pool = RwLock::new(test_setup(&chain_ref));
// now create two txs // now create two txs
// tx1 spends the UTXO // tx1 spends the UTXO
// tx2 spends output from tx1 // tx2 spends output from tx1
let tx1 = test_transaction(vec![100], vec![90]); let tx1 = test_transaction(vec![100], vec![90]);
let tx2 = test_transaction(vec![90], vec![80]); let tx2 = test_transaction(vec![90], vec![80]);
@ -930,7 +913,7 @@ mod tests {
assert_eq!(write_pool.total_size(), 0); assert_eq!(write_pool.total_size(), 0);
// now add both txs to the pool (tx2 spends tx1 with zero confirmations) // now add both txs to the pool (tx2 spends tx1 with zero confirmations)
// both should be accepted if tx1 added before tx2 // both should be accepted if tx1 added before tx2
write_pool.add_to_memory_pool(test_source(), tx1).unwrap(); write_pool.add_to_memory_pool(test_source(), tx1).unwrap();
write_pool.add_to_memory_pool(test_source(), tx2).unwrap(); write_pool.add_to_memory_pool(test_source(), tx2).unwrap();
@ -944,7 +927,7 @@ mod tests {
txs = mineable_txs.drain(..).map(|x| *x).collect(); txs = mineable_txs.drain(..).map(|x| *x).collect();
// confirm we can preparing both txs for mining here // confirm we can preparing both txs for mining here
// one root tx in the pool, and one non-root vertex in the pool // one root tx in the pool, and one non-root vertex in the pool
assert_eq!(txs.len(), 2); assert_eq!(txs.len(), 2);
} }
@ -964,7 +947,7 @@ mod tests {
chain_ref.apply_block(&block); chain_ref.apply_block(&block);
// now reconcile the block // now reconcile the block
// we should evict both txs here // we should evict both txs here
{ {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write().unwrap();
let evicted_transactions = write_pool.reconcile_block(&block).unwrap(); let evicted_transactions = write_pool.reconcile_block(&block).unwrap();
@ -972,7 +955,7 @@ mod tests {
} }
// check the pool is consistent after reconciling the block // check the pool is consistent after reconciling the block
// we should have zero txs in the pool (neither roots nor non-roots) // we should have zero txs in the pool (neither roots nor non-roots)
{ {
let read_pool = pool.write().unwrap(); let read_pool = pool.write().unwrap();
assert_eq!(read_pool.pool.len_vertices(), 0); assert_eq!(read_pool.pool.len_vertices(), 0);
@ -1003,26 +986,26 @@ mod tests {
let pool = RwLock::new(test_setup(&chain_ref)); let pool = RwLock::new(test_setup(&chain_ref));
// Preparation: We will introduce a three root pool transactions. // Preparation: We will introduce a three root pool transactions.
// 1. A transaction that should be invalidated because it is exactly // 1. A transaction that should be invalidated because it is exactly
// contained in the block. // contained in the block.
// 2. A transaction that should be invalidated because the input is // 2. A transaction that should be invalidated because the input is
// consumed in the block, although it is not exactly consumed. // consumed in the block, although it is not exactly consumed.
// 3. A transaction that should remain after block reconciliation. // 3. A transaction that should remain after block reconciliation.
let block_transaction = test_transaction(vec![10], vec![8]); let block_transaction = test_transaction(vec![10], vec![8]);
let conflict_transaction = test_transaction(vec![20], vec![12, 6]); let conflict_transaction = test_transaction(vec![20], vec![12, 6]);
let valid_transaction = test_transaction(vec![30], vec![13, 15]); let valid_transaction = test_transaction(vec![30], vec![13, 15]);
// We will also introduce a few children: // We will also introduce a few children:
// 4. A transaction that descends from transaction 1, that is in // 4. A transaction that descends from transaction 1, that is in
// turn exactly contained in the block. // turn exactly contained in the block.
let block_child = test_transaction(vec![8], vec![5, 1]); let block_child = test_transaction(vec![8], vec![5, 1]);
// 5. A transaction that descends from transaction 4, that is not // 5. A transaction that descends from transaction 4, that is not
// contained in the block at all and should be valid after // contained in the block at all and should be valid after
// reconciliation. // reconciliation.
let pool_child = test_transaction(vec![5], vec![3]); let pool_child = test_transaction(vec![5], vec![3]);
// 6. A transaction that descends from transaction 2 that does not // 6. A transaction that descends from transaction 2 that does not
// conflict with anything in the block in any way, but should be // conflict with anything in the block in any way, but should be
// invalidated (orphaned). // invalidated (orphaned).
let conflict_child = test_transaction(vec![12], vec![2]); let conflict_child = test_transaction(vec![12], vec![2]);
// 7. A transaction that descends from transaction 2 that should be // 7. A transaction that descends from transaction 2 that should be
// valid due to its inputs being satisfied by the block. // valid due to its inputs being satisfied by the block.
@ -1107,7 +1090,6 @@ mod tests {
// check the specific transactions that were evicted. // check the specific transactions that were evicted.
} }
// Using the pool's methods to validate a few end conditions. // Using the pool's methods to validate a few end conditions.
{ {
let read_pool = pool.read().unwrap(); let read_pool = pool.read().unwrap();
@ -1263,12 +1245,8 @@ mod tests {
) -> transaction::Transaction { ) -> transaction::Transaction {
let keychain = keychain_for_tests(); let keychain = keychain_for_tests();
let input_sum = input_values let input_sum = input_values.iter().sum::<u64>() as i64;
.iter() let output_sum = output_values.iter().sum::<u64>() as i64;
.sum::<u64>() as i64;
let output_sum = output_values
.iter()
.sum::<u64>() as i64;
let fees: i64 = input_sum - output_sum; let fees: i64 = input_sum - output_sum;
assert!(fees >= 0); assert!(fees >= 0);
@ -1296,9 +1274,7 @@ mod tests {
) -> transaction::Transaction { ) -> transaction::Transaction {
let keychain = keychain_for_tests(); let keychain = keychain_for_tests();
let output_sum = output_values let output_sum = output_values.iter().sum::<u64>() as i64;
.iter()
.sum::<u64>() as i64;
let fees: i64 = input_value as i64 - output_sum; let fees: i64 = input_value as i64 - output_sum;
assert!(fees >= 0); assert!(fees >= 0);
@ -1309,18 +1285,16 @@ mod tests {
node: Hash::zero(), node: Hash::zero(),
root: Hash::zero(), root: Hash::zero(),
peaks: vec![Hash::zero()], peaks: vec![Hash::zero()],
.. MerkleProof::default() ..MerkleProof::default()
}; };
let key_id = keychain.derive_key_id(input_value as u32).unwrap(); let key_id = keychain.derive_key_id(input_value as u32).unwrap();
tx_elements.push( tx_elements.push(build::coinbase_input(
build::coinbase_input( input_value,
input_value, input_block_hash,
input_block_hash, merkle_proof,
merkle_proof, key_id,
key_id, ));
),
);
for output_value in output_values { for output_value in output_values {
let key_id = keychain.derive_key_id(output_value as u32).unwrap(); let key_id = keychain.derive_key_id(output_value as u32).unwrap();
@ -1367,13 +1341,18 @@ mod tests {
let key_id = keychain.derive_key_id(value as u32).unwrap(); let key_id = keychain.derive_key_id(value as u32).unwrap();
let commit = keychain.commit(value, &key_id).unwrap(); let commit = keychain.commit(value, &key_id).unwrap();
let switch_commit = keychain.switch_commit(&key_id).unwrap(); let switch_commit = keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit( let switch_commit_hash =
switch_commit, SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id);
&keychain,
&key_id,
);
let msg = secp::pedersen::ProofMessage::empty(); let msg = secp::pedersen::ProofMessage::empty();
let proof = keychain.range_proof(value, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg).unwrap(); let proof = keychain
.range_proof(
value,
&key_id,
commit,
Some(switch_commit_hash.as_ref().to_vec()),
msg,
)
.unwrap();
transaction::Output { transaction::Output {
features: transaction::OutputFeatures::DEFAULT_OUTPUT, features: transaction::OutputFeatures::DEFAULT_OUTPUT,
@ -1389,13 +1368,18 @@ mod tests {
let key_id = keychain.derive_key_id(value as u32).unwrap(); let key_id = keychain.derive_key_id(value as u32).unwrap();
let commit = keychain.commit(value, &key_id).unwrap(); let commit = keychain.commit(value, &key_id).unwrap();
let switch_commit = keychain.switch_commit(&key_id).unwrap(); let switch_commit = keychain.switch_commit(&key_id).unwrap();
let switch_commit_hash = SwitchCommitHash::from_switch_commit( let switch_commit_hash =
switch_commit, SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id);
&keychain,
&key_id,
);
let msg = secp::pedersen::ProofMessage::empty(); let msg = secp::pedersen::ProofMessage::empty();
let proof = keychain.range_proof(value, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg).unwrap(); let proof = keychain
.range_proof(
value,
&key_id,
commit,
Some(switch_commit_hash.as_ref().to_vec()),
msg,
)
.unwrap();
transaction::Output { transaction::Output {
features: transaction::OutputFeatures::COINBASE_OUTPUT, features: transaction::OutputFeatures::COINBASE_OUTPUT,

View file

@ -52,7 +52,7 @@ impl Default for PoolConfig {
} }
fn default_accept_fee_base() -> u64 { fn default_accept_fee_base() -> u64 {
consensus::MILLI_GRIN consensus::MILLI_GRIN
} }
fn default_max_pool_size() -> usize { fn default_max_pool_size() -> usize {
50_000 50_000
@ -86,15 +86,11 @@ impl fmt::Debug for Parent {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self { match self {
&Parent::Unknown => write!(f, "Parent: Unknown"), &Parent::Unknown => write!(f, "Parent: Unknown"),
&Parent::BlockTransaction => { &Parent::BlockTransaction => write!(f, "Parent: Block Transaction"),
write!(f, "Parent: Block Transaction")
}
&Parent::PoolTransaction { tx_ref: x } => { &Parent::PoolTransaction { tx_ref: x } => {
write!(f, "Parent: Pool Transaction ({:?})", x) write!(f, "Parent: Pool Transaction ({:?})", x)
} }
&Parent::AlreadySpent { other_tx: x } => { &Parent::AlreadySpent { other_tx: x } => write!(f, "Parent: Already Spent By {:?}", x),
write!(f, "Parent: Already Spent By {:?}", x)
}
} }
} }
} }
@ -259,7 +255,7 @@ impl Pool {
} }
// Adding the transaction to the vertices list along with internal // Adding the transaction to the vertices list along with internal
// pool edges // pool edges
self.graph.add_entry(pool_entry, pool_refs); self.graph.add_entry(pool_entry, pool_refs);
// Adding the new unspents to the unspent map // Adding the new unspents to the unspent map
@ -421,12 +417,11 @@ impl Orphans {
} }
// if missing_refs is the same length as orphan_refs, we have // if missing_refs is the same length as orphan_refs, we have
// no orphan-orphan links for this transaction and it is a // no orphan-orphan links for this transaction and it is a
// root transaction of the orphans set // root transaction of the orphans set
self.graph self.graph
.add_vertex_only(orphan_entry, is_missing.len() == orphan_refs.len()); .add_vertex_only(orphan_entry, is_missing.len() == orphan_refs.len());
// Adding the new unspents to the unspent map // Adding the new unspents to the unspent map
for unspent_output in new_unspents.drain(..) { for unspent_output in new_unspents.drain(..) {
self.available_outputs self.available_outputs

View file

@ -60,10 +60,12 @@ impl Cuckoo {
let hashed = blake2::blake2b::blake2b(32, &[], header); let hashed = blake2::blake2b::blake2b(32, &[], header);
let hashed = hashed.as_bytes(); let hashed = hashed.as_bytes();
Cuckoo { Cuckoo {
v: [u8_to_u64(hashed, 0), v: [
u8_to_u64(hashed, 8), u8_to_u64(hashed, 0),
u8_to_u64(hashed, 16), u8_to_u64(hashed, 8),
u8_to_u64(hashed, 24)], u8_to_u64(hashed, 16),
u8_to_u64(hashed, 24),
],
size: size, size: size,
mask: (1 << sizeshift) / 2 - 1, mask: (1 << sizeshift) / 2 - 1,
} }
@ -305,7 +307,6 @@ impl Miner {
} }
} }
/// Utility to transform a 8 bytes of a byte array into a u64. /// Utility to transform a 8 bytes of a byte array into a u64.
fn u8_to_u64(p: &[u8], i: usize) -> u64 { fn u8_to_u64(p: &[u8], i: usize) -> u64 {
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24 (p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24
@ -319,181 +320,34 @@ mod test {
use core::core::Proof; use core::core::Proof;
static V1: [u32; 42] = [ static V1: [u32; 42] = [
0x3bbd, 0x3bbd, 0x4e96, 0x1013b, 0x1172b, 0x1371b, 0x13e6a, 0x1aaa6, 0x1b575, 0x1e237, 0x1ee88,
0x4e96, 0x22f94, 0x24223, 0x25b4f, 0x2e9f3, 0x33b49, 0x34063, 0x3454a, 0x3c081, 0x3d08e, 0x3d863,
0x1013b, 0x4285a, 0x42f22, 0x43122, 0x4b853, 0x4cd0c, 0x4f280, 0x557d5, 0x562cf, 0x58e59, 0x59a62,
0x1172b, 0x5b568, 0x644b9, 0x657e9, 0x66337, 0x6821c, 0x7866f, 0x7e14b, 0x7ec7c, 0x7eed7, 0x80643,
0x1371b, 0x8628c, 0x8949e,
0x13e6a,
0x1aaa6,
0x1b575,
0x1e237,
0x1ee88,
0x22f94,
0x24223,
0x25b4f,
0x2e9f3,
0x33b49,
0x34063,
0x3454a,
0x3c081,
0x3d08e,
0x3d863,
0x4285a,
0x42f22,
0x43122,
0x4b853,
0x4cd0c,
0x4f280,
0x557d5,
0x562cf,
0x58e59,
0x59a62,
0x5b568,
0x644b9,
0x657e9,
0x66337,
0x6821c,
0x7866f,
0x7e14b,
0x7ec7c,
0x7eed7,
0x80643,
0x8628c,
0x8949e
]; ];
static V2: [u32; 42] = [ static V2: [u32; 42] = [
0x5e3a, 0x5e3a, 0x8a8b, 0x103d8, 0x1374b, 0x14780, 0x16110, 0x1b571, 0x1c351, 0x1c826, 0x28228,
0x8a8b, 0x2909f, 0x29516, 0x2c1c4, 0x334eb, 0x34cdd, 0x38a2c, 0x3ad23, 0x45ac5, 0x46afe, 0x50f43,
0x103d8, 0x51ed6, 0x52ddd, 0x54a82, 0x5a46b, 0x5dbdb, 0x60f6f, 0x60fcd, 0x61c78, 0x63899, 0x64dab,
0x1374b, 0x6affc, 0x6b569, 0x72639, 0x73987, 0x78806, 0x7b98e, 0x7c7d7, 0x7ddd4, 0x7fa88, 0x8277c,
0x14780, 0x832d9, 0x8ba6f,
0x16110,
0x1b571,
0x1c351,
0x1c826,
0x28228,
0x2909f,
0x29516,
0x2c1c4,
0x334eb,
0x34cdd,
0x38a2c,
0x3ad23,
0x45ac5,
0x46afe,
0x50f43,
0x51ed6,
0x52ddd,
0x54a82,
0x5a46b,
0x5dbdb,
0x60f6f,
0x60fcd,
0x61c78,
0x63899,
0x64dab,
0x6affc,
0x6b569,
0x72639,
0x73987,
0x78806,
0x7b98e,
0x7c7d7,
0x7ddd4,
0x7fa88,
0x8277c,
0x832d9,
0x8ba6f
]; ];
static V3: [u32; 42] = [ static V3: [u32; 42] = [
0x308b, 0x308b, 0x9004, 0x91fc, 0x983e, 0x9d67, 0xa293, 0xb4cb, 0xb6c8, 0xccc8, 0xdddc, 0xf04d,
0x9004, 0x1372f, 0x16ec9, 0x17b61, 0x17d03, 0x1e3bc, 0x1fb0f, 0x29e6e, 0x2a2ca, 0x2a719, 0x3a078,
0x91fc, 0x3b7cc, 0x3c71d, 0x40daa, 0x43e17, 0x46adc, 0x4b359, 0x4c3aa, 0x4ce92, 0x4d06e, 0x51140,
0x983e, 0x565ac, 0x56b1f, 0x58a8b, 0x5e410, 0x5e607, 0x5ebb5, 0x5f8ae, 0x7aeac, 0x7b902, 0x7d6af,
0x9d67, 0x7f400,
0xa293,
0xb4cb,
0xb6c8,
0xccc8,
0xdddc,
0xf04d,
0x1372f,
0x16ec9,
0x17b61,
0x17d03,
0x1e3bc,
0x1fb0f,
0x29e6e,
0x2a2ca,
0x2a719,
0x3a078,
0x3b7cc,
0x3c71d,
0x40daa,
0x43e17,
0x46adc,
0x4b359,
0x4c3aa,
0x4ce92,
0x4d06e,
0x51140,
0x565ac,
0x56b1f,
0x58a8b,
0x5e410,
0x5e607,
0x5ebb5,
0x5f8ae,
0x7aeac,
0x7b902,
0x7d6af,
0x7f400
]; ];
// cuckoo28 at 50% edges of letter 'u' // cuckoo28 at 50% edges of letter 'u'
static V4: [u32; 42] = [ static V4: [u32; 42] = [
0xf7243, 0xf7243, 0x11f130, 0x193812, 0x23b565, 0x279ac3, 0x69b270, 0xe0778f, 0xef51fc, 0x10bf6e8,
0x11f130, 0x13ccf7d, 0x1551177, 0x1b6cfd2, 0x1f872c3, 0x2075681, 0x2e23ccc, 0x2e4c0aa, 0x2f607f1,
0x193812, 0x3007eeb, 0x3407e9a, 0x35423f9, 0x39e48bf, 0x45e3bf6, 0x46aa484, 0x47c0fe1, 0x4b1d5a6,
0x23b565, 0x4bae0ba, 0x4dfdbaf, 0x5048eda, 0x537da6b, 0x5402887, 0x56b8897, 0x5bd8e8b, 0x622de20,
0x279ac3, 0x62be5ce, 0x62d538e, 0x6464518, 0x650a6d5, 0x66ec4fa, 0x66f9476, 0x6b1e5f6, 0x6fd5d88,
0x69b270, 0x701f37b,
0xe0778f,
0xef51fc,
0x10bf6e8,
0x13ccf7d,
0x1551177,
0x1b6cfd2,
0x1f872c3,
0x2075681,
0x2e23ccc,
0x2e4c0aa,
0x2f607f1,
0x3007eeb,
0x3407e9a,
0x35423f9,
0x39e48bf,
0x45e3bf6,
0x46aa484,
0x47c0fe1,
0x4b1d5a6,
0x4bae0ba,
0x4dfdbaf,
0x5048eda,
0x537da6b,
0x5402887,
0x56b8897,
0x5bd8e8b,
0x622de20,
0x62be5ce,
0x62d538e,
0x6464518,
0x650a6d5,
0x66ec4fa,
0x66f9476,
0x6b1e5f6,
0x6fd5d88,
0x701f37b
]; ];
/// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few /// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few
@ -532,15 +386,12 @@ mod test {
fn validate_fail() { fn validate_fail() {
// edge checks // edge checks
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75)); assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75));
assert!(!Cuckoo::new(&[49], 20) assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0xffff; 42]), 75));
.verify(Proof::new(vec![0xffff; 42]), 75));
// wrong data for proof // wrong data for proof
assert!(!Cuckoo::new(&[50], 20) assert!(!Cuckoo::new(&[50], 20).verify(Proof::new(V1.to_vec().clone()), 75));
.verify(Proof::new(V1.to_vec().clone()), 75));
let mut test_header = [0; 32]; let mut test_header = [0; 32];
test_header[0] = 24; test_header[0] = 24;
assert!(!Cuckoo::new(&test_header, 20) assert!(!Cuckoo::new(&test_header, 20).verify(Proof::new(V4.to_vec().clone()), 50));
.verify(Proof::new(V4.to_vec().clone()), 50));
} }
#[test] #[test]

View file

@ -76,7 +76,7 @@ pub trait MiningWorker {
/// satisfies the requirements of the header. /// satisfies the requirements of the header.
pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool { pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
// make sure the pow hash shows a difficulty at least as large as the target // make sure the pow hash shows a difficulty at least as large as the target
// difficulty // difficulty
if bh.difficulty > bh.pow.clone().to_difficulty() { if bh.difficulty > bh.pow.clone().to_difficulty() {
return false; return false;
} }
@ -129,35 +129,35 @@ pub fn pow_size<T: MiningWorker + ?Sized>(
) -> Result<(), Error> { ) -> Result<(), Error> {
let start_nonce = bh.nonce; let start_nonce = bh.nonce;
// set the nonce for faster solution finding in user testing // set the nonce for faster solution finding in user testing
if bh.height == 0 && global::is_user_testing_mode() { if bh.height == 0 && global::is_user_testing_mode() {
bh.nonce = global::get_genesis_nonce(); bh.nonce = global::get_genesis_nonce();
} }
// try to find a cuckoo cycle on that header hash // try to find a cuckoo cycle on that header hash
loop { loop {
// can be trivially optimized by avoiding re-serialization every time but this // can be trivially optimized by avoiding re-serialization every time but this
// is not meant as a fast miner implementation // is not meant as a fast miner implementation
let pow_hash = bh.hash(); let pow_hash = bh.hash();
// if we found a cycle (not guaranteed) and the proof hash is higher that the // if we found a cycle (not guaranteed) and the proof hash is higher that the
// diff, we're all good // diff, we're all good
if let Ok(proof) = miner.mine(&pow_hash[..]) { if let Ok(proof) = miner.mine(&pow_hash[..]) {
if proof.clone().to_difficulty() >= diff { if proof.clone().to_difficulty() >= diff {
bh.pow = proof.clone(); bh.pow = proof.clone();
return Ok(()); return Ok(());
} }
} }
// otherwise increment the nonce // otherwise increment the nonce
bh.nonce += 1; bh.nonce += 1;
// and if we're back where we started, update the time (changes the hash as // and if we're back where we started, update the time (changes the hash as
// well) // well)
if bh.nonce == start_nonce { if bh.nonce == start_nonce {
bh.timestamp = time::at_utc(time::Timespec { sec: 0, nsec: 0 }); bh.timestamp = time::at_utc(time::Timespec { sec: 0, nsec: 0 });
} }
} }
} }
#[cfg(test)] #[cfg(test)]

View file

@ -84,9 +84,9 @@ impl PluginMiner {
} }
// First, load and query the plugins in the given directory // First, load and query the plugins in the given directory
// These should all be stored in 'plugins' at the moment relative // These should all be stored in 'plugins' at the moment relative
// to the executable path, though they should appear somewhere else // to the executable path, though they should appear somewhere else
// when packaging is more//thought out // when packaging is more//thought out
let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap(); let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap();
@ -117,7 +117,7 @@ impl PluginMiner {
let mut index = 0; let mut index = 0;
for f in plugin_vec_filters { for f in plugin_vec_filters {
// So this is built dynamically based on the plugin implementation // So this is built dynamically based on the plugin implementation
// type and the consensus sizeshift // type and the consensus sizeshift
let filter = format!("{}_{}", f, sz); let filter = format!("{}_{}", f, sz);
let caps = plugin_manager.get_available_plugins(&filter).unwrap(); let caps = plugin_manager.get_available_plugins(&filter).unwrap();
@ -135,17 +135,25 @@ impl PluginMiner {
if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config { if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config {
if let Some(dp) = l[index].device_parameters.clone() { if let Some(dp) = l[index].device_parameters.clone() {
for (device, param_map) in dp.into_iter() { for (device, param_map) in dp.into_iter() {
for (param_name, param_value) in param_map.into_iter(){ for (param_name, param_value) in param_map.into_iter() {
let device_id = match device.parse::<u32>() { let device_id = match device.parse::<u32>() {
Ok(n) => n, Ok(n) => n,
Err(e) => { Err(e) => {
error!(LOGGER, "Error initializing mining plugin: {:?}", e); error!(LOGGER, "Error initializing mining plugin: {:?}", e);
panic!("Unable to init mining plugin."); panic!("Unable to init mining plugin.");
}, }
}; };
debug!(LOGGER, "Cuckoo Plugin {}: Setting mining parameter {} to {} on Device {}", debug!(
index, param_name, param_value, device_id); LOGGER,
config.parameter_list.push((param_name, device_id, param_value)); "Cuckoo Plugin {}: Setting mining parameter {} to {} on Device {}",
index,
param_name,
param_value,
device_id
);
config
.parameter_list
.push((param_name, device_id, param_value));
} }
} }
} }
@ -154,7 +162,7 @@ impl PluginMiner {
index += 1; index += 1;
} }
// Store this config now, because we just want one instance // Store this config now, because we just want one instance
// of the plugin lib per invocation now // of the plugin lib per invocation now
*loaded_config_ref = Some(cuckoo_configs.clone()); *loaded_config_ref = Some(cuckoo_configs.clone());
// this will load the associated plugin // this will load the associated plugin

View file

@ -22,7 +22,7 @@ pub struct CuckooMinerPluginConfig {
pub type_filter: String, pub type_filter: String,
/// device params /// device params
pub device_parameters: Option<HashMap<String, HashMap<String, u32>>> pub device_parameters: Option<HashMap<String, HashMap<String, u32>>>,
} }
impl Default for CuckooMinerPluginConfig { impl Default for CuckooMinerPluginConfig {

View file

@ -38,7 +38,6 @@ pub fn show_status(config: &ServerConfig) {
writeln!(e, "Last block hash: {}", status.tip.last_block_pushed).unwrap(); writeln!(e, "Last block hash: {}", status.tip.last_block_pushed).unwrap();
writeln!(e, "Previous block hash: {}", status.tip.prev_block_to_last).unwrap(); writeln!(e, "Previous block hash: {}", status.tip.prev_block_to_last).unwrap();
writeln!(e, "Total difficulty: {}", status.tip.total_difficulty).unwrap(); writeln!(e, "Total difficulty: {}", status.tip.total_difficulty).unwrap();
} }
Err(_) => writeln!( Err(_) => writeln!(
e, e,
@ -81,10 +80,7 @@ pub fn unban_peer(config: &ServerConfig, peer_addr: &SocketAddr) {
pub fn list_connected_peers(config: &ServerConfig) { pub fn list_connected_peers(config: &ServerConfig) {
let mut e = term::stdout().unwrap(); let mut e = term::stdout().unwrap();
let url = format!( let url = format!("http://{}/v1/peers/connected", config.api_http_addr);
"http://{}/v1/peers/connected",
config.api_http_addr
);
match api::client::get::<Vec<p2p::PeerInfo>>(url.as_str()).map_err(|e| Error::API(e)) { match api::client::get::<Vec<p2p::PeerInfo>>(url.as_str()).map_err(|e| Error::API(e)) {
Ok(connected_peers) => { Ok(connected_peers) => {
let mut index = 0; let mut index = 0;
@ -98,7 +94,7 @@ pub fn list_connected_peers(config: &ServerConfig) {
println!(); println!();
index = index + 1; index = index + 1;
} }
}, }
Err(_) => writeln!(e, "Failed to get connected peers").unwrap(), Err(_) => writeln!(e, "Failed to get connected peers").unwrap(),
}; };
e.reset().unwrap(); e.reset().unwrap();

View file

@ -429,7 +429,11 @@ fn wallet_command(wallet_args: &ArgMatches, global_config: GlobalConfig) {
wallet_config.check_node_api_http_addr = sa.to_string().clone(); wallet_config.check_node_api_http_addr = sa.to_string().clone();
} }
let key_derivations: u32 = wallet_args.value_of("key_derivations").unwrap().parse().unwrap(); let key_derivations: u32 = wallet_args
.value_of("key_derivations")
.unwrap()
.parse()
.unwrap();
let mut show_spent = false; let mut show_spent = false;
if wallet_args.is_present("show_spent") { if wallet_args.is_present("show_spent") {
@ -515,29 +519,29 @@ fn wallet_command(wallet_args: &ArgMatches, global_config: GlobalConfig) {
dest, dest,
selection_strategy, selection_strategy,
), ),
Err(e) => match e.kind() { Err(e) => match e.kind() {
wallet::ErrorKind::NotEnoughFunds(available) => { wallet::ErrorKind::NotEnoughFunds(available) => {
error!( error!(
LOGGER, LOGGER,
"Tx not sent: insufficient funds (max: {})", "Tx not sent: insufficient funds (max: {})",
amount_to_hr_string(available), amount_to_hr_string(available),
); );
} }
wallet::ErrorKind::FeeExceedsAmount { wallet::ErrorKind::FeeExceedsAmount {
sender_amount, sender_amount,
recipient_fee, recipient_fee,
} => { } => {
error!( error!(
LOGGER, LOGGER,
"Recipient rejected the transfer because transaction fee ({}) exceeded amount ({}).", "Recipient rejected the transfer because transaction fee ({}) exceeded amount ({}).",
amount_to_hr_string(recipient_fee), amount_to_hr_string(recipient_fee),
amount_to_hr_string(sender_amount) amount_to_hr_string(sender_amount)
); );
} }
_ => { _ => {
error!(LOGGER, "Tx not sent: {:?}", e); error!(LOGGER, "Tx not sent: {:?}", e);
} }
} },
}; };
} }
("burn", Some(send_args)) => { ("burn", Some(send_args)) => {

View file

@ -34,7 +34,6 @@ extern crate serde_derive;
#[macro_use] #[macro_use]
extern crate slog; extern crate slog;
pub mod pmmr; pub mod pmmr;
pub mod types; pub mod types;

View file

@ -13,15 +13,15 @@
//! Implementation of the persistent Backend for the prunable MMR tree. //! Implementation of the persistent Backend for the prunable MMR tree.
use std::fs::{self}; use std::fs;
use std::io::{self}; use std::io;
use std::marker::PhantomData; use std::marker::PhantomData;
use core::core::pmmr::{self, Backend}; use core::core::pmmr::{self, Backend};
use core::ser::{self, PMMRable, Readable, Writeable, Reader, Writer}; use core::ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
use core::core::hash::Hash; use core::core::hash::Hash;
use util::LOGGER; use util::LOGGER;
use types::{AppendOnlyFile, RemoveLog, read_ordered_vec, write_vec}; use types::{read_ordered_vec, write_vec, AppendOnlyFile, RemoveLog};
const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin"; const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin";
const PMMR_DATA_FILE: &'static str = "pmmr_data.bin"; const PMMR_DATA_FILE: &'static str = "pmmr_data.bin";
@ -31,7 +31,8 @@ const PMMR_PRUNED_FILE: &'static str = "pmmr_pruned.bin";
/// Maximum number of nodes in the remove log before it gets flushed /// Maximum number of nodes in the remove log before it gets flushed
pub const RM_LOG_MAX_NODES: usize = 10000; pub const RM_LOG_MAX_NODES: usize = 10000;
/// Metadata for the PMMR backend's AppendOnlyFile, which can be serialized and stored /// Metadata for the PMMR backend's AppendOnlyFile, which can be serialized and
/// stored
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] #[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
pub struct PMMRFileMetadata { pub struct PMMRFileMetadata {
/// last written index of the hash file /// last written index of the hash file
@ -124,8 +125,7 @@ where
Err(e) => { Err(e) => {
error!( error!(
LOGGER, LOGGER,
"Corrupted storage, could not read an entry from hash store: {:?}", "Corrupted storage, could not read an entry from hash store: {:?}", e
e
); );
return None; return None;
} }
@ -152,8 +152,8 @@ where
} }
// Optionally read flatfile storage to get data element // Optionally read flatfile storage to get data element
let flatfile_pos = pmmr::n_leaves(position) let flatfile_pos =
- 1 - self.pruned_nodes.get_leaf_shift(position).unwrap(); pmmr::n_leaves(position) - 1 - self.pruned_nodes.get_leaf_shift(position).unwrap();
let record_len = T::len(); let record_len = T::len();
let file_offset = flatfile_pos as usize * T::len(); let file_offset = flatfile_pos as usize * T::len();
let data = self.data_file.read(file_offset, record_len); let data = self.data_file.read(file_offset, record_len);
@ -196,9 +196,9 @@ where
/// Remove Hashes by insertion position /// Remove Hashes by insertion position
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> { fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> {
self.rm_log.append(positions, index).map_err(|e| { self.rm_log
format!("Could not write to log storage, disk full? {:?}", e) .append(positions, index)
}) .map_err(|e| format!("Could not write to log storage, disk full? {:?}", e))
} }
/// Return data file path /// Return data file path
@ -216,12 +216,14 @@ where
pub fn new(data_dir: String, file_md: Option<PMMRFileMetadata>) -> io::Result<PMMRBackend<T>> { pub fn new(data_dir: String, file_md: Option<PMMRFileMetadata>) -> io::Result<PMMRBackend<T>> {
let (hash_to_pos, data_to_pos) = match file_md { let (hash_to_pos, data_to_pos) = match file_md {
Some(m) => (m.last_hash_file_pos, m.last_data_file_pos), Some(m) => (m.last_hash_file_pos, m.last_data_file_pos),
None => (0,0) None => (0, 0),
}; };
let hash_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE), hash_to_pos)?; let hash_file =
AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE), hash_to_pos)?;
let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE))?; let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE))?;
let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?; let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?;
let data_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE), data_to_pos)?; let data_file =
AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE), data_to_pos)?;
Ok(PMMRBackend { Ok(PMMRBackend {
data_dir: data_dir, data_dir: data_dir,
@ -262,15 +264,15 @@ where
pub fn sync(&mut self) -> io::Result<()> { pub fn sync(&mut self) -> io::Result<()> {
if let Err(e) = self.hash_file.flush() { if let Err(e) = self.hash_file.flush() {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::Interrupted, io::ErrorKind::Interrupted,
format!("Could not write to log hash storage, disk full? {:?}", e), format!("Could not write to log hash storage, disk full? {:?}", e),
)); ));
} }
if let Err(e) = self.data_file.flush() { if let Err(e) = self.data_file.flush() {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::Interrupted, io::ErrorKind::Interrupted,
format!("Could not write to log data storage, disk full? {:?}", e), format!("Could not write to log data storage, disk full? {:?}", e),
)); ));
} }
self.rm_log.flush()?; self.rm_log.flush()?;
Ok(()) Ok(())
@ -292,7 +294,7 @@ where
pub fn last_file_positions(&self) -> PMMRFileMetadata { pub fn last_file_positions(&self) -> PMMRFileMetadata {
PMMRFileMetadata { PMMRFileMetadata {
last_hash_file_pos: self.hash_file.last_buffer_pos() as u64, last_hash_file_pos: self.hash_file.last_buffer_pos() as u64,
last_data_file_pos: self.data_file.last_buffer_pos() as u64 last_data_file_pos: self.data_file.last_buffer_pos() as u64,
} }
} }
@ -338,13 +340,11 @@ where
// remove list // remove list
let tmp_prune_file_hash = format!("{}/{}.hashprune", self.data_dir, PMMR_HASH_FILE); let tmp_prune_file_hash = format!("{}/{}.hashprune", self.data_dir, PMMR_HASH_FILE);
let record_len = 32; let record_len = 32;
let to_rm = filter_map_vec!(self.rm_log.removed, |&(pos, idx)| { let to_rm = filter_map_vec!(self.rm_log.removed, |&(pos, idx)| if idx < cutoff_index {
if idx < cutoff_index { let shift = self.pruned_nodes.get_shift(pos);
let shift = self.pruned_nodes.get_shift(pos); Some((pos - 1 - shift.unwrap()) * record_len)
Some((pos - 1 - shift.unwrap()) * record_len) } else {
} else { None
None
}
}); });
self.hash_file self.hash_file
.save_prune(tmp_prune_file_hash.clone(), to_rm, record_len)?; .save_prune(tmp_prune_file_hash.clone(), to_rm, record_len)?;
@ -390,7 +390,8 @@ where
self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE), 0)?; self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE), 0)?;
// 6. truncate the rm log // 6. truncate the rm log
self.rm_log.removed = self.rm_log.removed self.rm_log.removed = self.rm_log
.removed
.iter() .iter()
.filter(|&&(_, idx)| idx >= cutoff_index) .filter(|&&(_, idx)| idx >= cutoff_index)
.map(|x| *x) .map(|x| *x)

View file

@ -42,7 +42,7 @@ pub struct AppendOnlyFile {
mmap: Option<memmap::Mmap>, mmap: Option<memmap::Mmap>,
buffer_start: usize, buffer_start: usize,
buffer: Vec<u8>, buffer: Vec<u8>,
buffer_start_bak: usize buffer_start_bak: usize,
} }
impl AppendOnlyFile { impl AppendOnlyFile {
@ -128,7 +128,7 @@ impl AppendOnlyFile {
pub fn read(&self, offset: usize, length: usize) -> Vec<u8> { pub fn read(&self, offset: usize, length: usize) -> Vec<u8> {
if offset >= self.buffer_start { if offset >= self.buffer_start {
let offset = offset - self.buffer_start; let offset = offset - self.buffer_start;
return self.buffer[offset..(offset+length)].to_vec(); return self.buffer[offset..(offset + length)].to_vec();
} }
if let None = self.mmap { if let None = self.mmap {
return vec![]; return vec![];
@ -150,7 +150,12 @@ impl AppendOnlyFile {
/// Saves a copy of the current file content, skipping data at the provided /// Saves a copy of the current file content, skipping data at the provided
/// prune indices. The prune Vec must be ordered. /// prune indices. The prune Vec must be ordered.
pub fn save_prune(&self, target: String, prune_offs: Vec<u64>, prune_len: u64) -> io::Result<()> { pub fn save_prune(
&self,
target: String,
prune_offs: Vec<u64>,
prune_len: u64,
) -> io::Result<()> {
let mut reader = File::open(self.path.clone())?; let mut reader = File::open(self.path.clone())?;
let mut writer = File::create(target)?; let mut writer = File::create(target)?;
@ -303,8 +308,8 @@ impl RemoveLog {
} }
} }
} }
let pos = match complete_list.binary_search(&(elmt,0)){ let pos = match complete_list.binary_search(&(elmt, 0)) {
Ok(idx) => idx+1, Ok(idx) => idx + 1,
Err(idx) => idx, Err(idx) => idx,
}; };
complete_list.split_at(pos).0.len() complete_list.split_at(pos).0.len()

View file

@ -20,7 +20,7 @@ extern crate time;
use std::fs; use std::fs;
use core::ser::*; use core::ser::*;
use core::core::pmmr::{PMMR, Backend}; use core::core::pmmr::{Backend, PMMR};
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
#[test] #[test]
@ -38,21 +38,15 @@ fn pmmr_append() {
// check the resulting backend store and the computation of the root // check the resulting backend store and the computation of the root
let node_hash = elems[0].hash(); let node_hash = elems[0].hash();
assert_eq!( assert_eq!(backend.get(1, false).expect("").0, node_hash);
backend.get(1, false).expect("").0,
node_hash
);
let sum2 = elems[0].hash() + elems[1].hash(); let sum2 = elems[0].hash() + elems[1].hash();
let sum4 = sum2 let sum4 = sum2 + (elems[2].hash() + elems[3].hash());
+ (elems[2].hash() + elems[3].hash()); let sum8 = sum4 + ((elems[4].hash() + elems[5].hash()) + (elems[6].hash() + elems[7].hash()));
let sum8 = sum4
+ ((elems[4].hash() + elems[5].hash())
+ (elems[6].hash() + elems[7].hash()));
let sum9 = sum8 + elems[8].hash(); let sum9 = sum8 + elems[8].hash();
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
assert_eq!(pmmr.root(), sum9); assert_eq!(pmmr.root(), sum9);
} }
@ -71,13 +65,13 @@ fn pmmr_prune_compact() {
// save the root // save the root
let root: Hash; let root: Hash;
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
root = pmmr.root(); root = pmmr.root();
} }
// pruning some choice nodes // pruning some choice nodes
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
pmmr.prune(1, 1).unwrap(); pmmr.prune(1, 1).unwrap();
pmmr.prune(4, 1).unwrap(); pmmr.prune(4, 1).unwrap();
pmmr.prune(5, 1).unwrap(); pmmr.prune(5, 1).unwrap();
@ -86,10 +80,13 @@ fn pmmr_prune_compact() {
// check the root and stored data // check the root and stored data
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
assert_eq!(root, pmmr.root()); assert_eq!(root, pmmr.root());
// check we can still retrieve same element from leaf index 2 // check we can still retrieve same element from leaf index 2
assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 2])); assert_eq!(
pmmr.get(2, true).unwrap().1.unwrap(),
TestElem([0, 0, 0, 2])
);
} }
// compact // compact
@ -97,10 +94,16 @@ fn pmmr_prune_compact() {
// recheck the root and stored data // recheck the root and stored data
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
assert_eq!(root, pmmr.root()); assert_eq!(root, pmmr.root());
assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 2])); assert_eq!(
assert_eq!(pmmr.get(11, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 7])); pmmr.get(2, true).unwrap().1.unwrap(),
TestElem([0, 0, 0, 2])
);
assert_eq!(
pmmr.get(11, true).unwrap().1.unwrap(),
TestElem([0, 0, 0, 7])
);
} }
teardown(data_dir); teardown(data_dir);
@ -120,7 +123,7 @@ fn pmmr_reload() {
// save the root and prune some nodes so we have prune data // save the root and prune some nodes so we have prune data
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
pmmr.dump(false); pmmr.dump(false);
root = pmmr.root(); root = pmmr.root();
pmmr.prune(1, 1).unwrap(); pmmr.prune(1, 1).unwrap();
@ -134,7 +137,7 @@ fn pmmr_reload() {
// prune some more to get rm log data // prune some more to get rm log data
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
pmmr.prune(5, 1).unwrap(); pmmr.prune(5, 1).unwrap();
} }
backend.sync().unwrap(); backend.sync().unwrap();
@ -143,11 +146,11 @@ fn pmmr_reload() {
// create a new backend and check everything is kosher // create a new backend and check everything is kosher
{ {
let mut backend:store::pmmr::PMMRBackend<TestElem> = let mut backend: store::pmmr::PMMRBackend<TestElem> =
store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap(); store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
assert_eq!(backend.unpruned_size().unwrap(), mmr_size); assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
assert_eq!(root, pmmr.root()); assert_eq!(root, pmmr.root());
} }
assert_eq!(backend.get(5, false), None); assert_eq!(backend.get(5, false), None);
@ -166,7 +169,7 @@ fn pmmr_rewind() {
backend.sync().unwrap(); backend.sync().unwrap();
let root1: Hash; let root1: Hash;
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
root1 = pmmr.root(); root1 = pmmr.root();
} }
@ -174,7 +177,7 @@ fn pmmr_rewind() {
backend.sync().unwrap(); backend.sync().unwrap();
let root2: Hash; let root2: Hash;
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
root2 = pmmr.root(); root2 = pmmr.root();
} }
@ -183,7 +186,7 @@ fn pmmr_rewind() {
// prune and compact the 2 first elements to spice things up // prune and compact the 2 first elements to spice things up
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
pmmr.prune(1, 1).unwrap(); pmmr.prune(1, 1).unwrap();
pmmr.prune(2, 1).unwrap(); pmmr.prune(2, 1).unwrap();
} }
@ -192,24 +195,24 @@ fn pmmr_rewind() {
// rewind and check the roots still match // rewind and check the roots still match
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
pmmr.rewind(9, 3).unwrap(); pmmr.rewind(9, 3).unwrap();
assert_eq!(pmmr.root(), root2); assert_eq!(pmmr.root(), root2);
} }
backend.sync().unwrap(); backend.sync().unwrap();
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 10); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
assert_eq!(pmmr.root(), root2); assert_eq!(pmmr.root(), root2);
} }
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 10); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
pmmr.rewind(5, 3).unwrap(); pmmr.rewind(5, 3).unwrap();
assert_eq!(pmmr.root(), root1); assert_eq!(pmmr.root(), root1);
} }
backend.sync().unwrap(); backend.sync().unwrap();
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 7); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, 7);
assert_eq!(pmmr.root(), root1); assert_eq!(pmmr.root(), root1);
} }
@ -229,13 +232,13 @@ fn pmmr_compact_horizon() {
// save the root // save the root
{ {
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
root = pmmr.root(); root = pmmr.root();
} }
// pruning some choice nodes with an increasing block height // pruning some choice nodes with an increasing block height
{ {
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size); let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
pmmr.prune(1, 1).unwrap(); pmmr.prune(1, 1).unwrap();
pmmr.prune(2, 2).unwrap(); pmmr.prune(2, 2).unwrap();
pmmr.prune(4, 3).unwrap(); pmmr.prune(4, 3).unwrap();
@ -249,12 +252,13 @@ fn pmmr_compact_horizon() {
// recheck stored data // recheck stored data
{ {
// recreate backend // recreate backend
let mut backend = store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap(); let mut backend =
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
// 9 elements total, minus 2 compacted // 9 elements total, minus 2 compacted
assert_eq!(backend.data_size().unwrap(), 7); assert_eq!(backend.data_size().unwrap(), 7);
// 15 nodes total, 2 pruned and compacted // 15 nodes total, 2 pruned and compacted
assert_eq!(backend.hash_size().unwrap(), 13); assert_eq!(backend.hash_size().unwrap(), 13);
// compact some more // compact some more
backend.check_compact(1, 5).unwrap(); backend.check_compact(1, 5).unwrap();
} }
@ -262,7 +266,8 @@ fn pmmr_compact_horizon() {
// recheck stored data // recheck stored data
{ {
// recreate backend // recreate backend
let backend = store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap(); let backend =
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
// 9 elements total, minus 4 compacted // 9 elements total, minus 4 compacted
assert_eq!(backend.data_size().unwrap(), 5); assert_eq!(backend.data_size().unwrap(), 5);
// 15 nodes total, 6 pruned and compacted // 15 nodes total, 6 pruned and compacted
@ -323,13 +328,11 @@ impl Writeable for TestElem {
} }
impl Readable for TestElem { impl Readable for TestElem {
fn read(reader: &mut Reader) -> Result<TestElem, Error> { fn read(reader: &mut Reader) -> Result<TestElem, Error> {
Ok(TestElem ( Ok(TestElem([
[ reader.read_u32()?,
reader.read_u32()?, reader.read_u32()?,
reader.read_u32()?, reader.read_u32()?,
reader.read_u32()?, reader.read_u32()?,
reader.read_u32()?, ]))
]
))
} }
} }

View file

@ -21,12 +21,12 @@
#![deny(unused_mut)] #![deny(unused_mut)]
#![warn(missing_docs)] #![warn(missing_docs)]
extern crate byteorder;
extern crate rand;
#[macro_use] #[macro_use]
extern crate slog; extern crate slog;
extern crate slog_async; extern crate slog_async;
extern crate slog_term; extern crate slog_term;
extern crate byteorder;
extern crate rand;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
@ -50,7 +50,7 @@ pub mod secp_static;
pub use secp_static::static_secp_instance; pub use secp_static::static_secp_instance;
pub mod types; pub mod types;
pub use types::{LoggingConfig, LogLevel}; pub use types::{LogLevel, LoggingConfig};
// other utils // other utils
use std::cell::{Ref, RefCell}; use std::cell::{Ref, RefCell};

View file

@ -16,7 +16,7 @@
//! initialisation overhead //! initialisation overhead
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use rand::{thread_rng}; use rand::thread_rng;
use secp_ as secp; use secp_ as secp;
lazy_static! { lazy_static! {
@ -27,8 +27,8 @@ lazy_static! {
/// Returns the static instance, but calls randomize on it as well /// Returns the static instance, but calls randomize on it as well
/// (Recommended to avoid side channel attacks /// (Recommended to avoid side channel attacks
pub fn static_secp_instance()-> Arc<Mutex<secp::Secp256k1>>{ pub fn static_secp_instance() -> Arc<Mutex<secp::Secp256k1>> {
let mut secp_inst=SECP256K1.lock().unwrap(); let mut secp_inst = SECP256K1.lock().unwrap();
secp_inst.randomize(&mut thread_rng()); secp_inst.randomize(&mut thread_rng());
SECP256K1.clone() SECP256K1.clone()
} }

View file

@ -21,7 +21,7 @@ use std::fs::{self, File};
use walkdir::WalkDir; use walkdir::WalkDir;
use zip_rs; use zip_rs;
use zip_rs::result::{ZipResult, ZipError}; use zip_rs::result::{ZipError, ZipResult};
use zip_rs::write::FileOptions; use zip_rs::write::FileOptions;
/// Compress a source directory recursively into a zip file using the /// Compress a source directory recursively into a zip file using the
@ -29,8 +29,10 @@ use zip_rs::write::FileOptions;
/// unwanted execution bits. /// unwanted execution bits.
pub fn compress(src_dir: &Path, dst_file: &File) -> ZipResult<()> { pub fn compress(src_dir: &Path, dst_file: &File) -> ZipResult<()> {
if !Path::new(src_dir).is_dir() { if !Path::new(src_dir).is_dir() {
return Err(ZipError::Io( return Err(ZipError::Io(io::Error::new(
io::Error::new(io::ErrorKind::Other, "Source must be a directory."))); io::ErrorKind::Other,
"Source must be a directory.",
)));
} }
let options = FileOptions::default() let options = FileOptions::default()
@ -61,7 +63,10 @@ pub fn compress(src_dir: &Path, dst_file: &File) -> ZipResult<()> {
} }
/// Decompress a source file into the provided destination path. /// Decompress a source file into the provided destination path.
pub fn decompress<R>(src_file: R, dest: &Path) -> ZipResult<()> where R: io::Read + io::Seek { pub fn decompress<R>(src_file: R, dest: &Path) -> ZipResult<()>
where
R: io::Read + io::Seek,
{
let mut archive = zip_rs::ZipArchive::new(src_file)?; let mut archive = zip_rs::ZipArchive::new(src_file)?;
for i in 0..archive.len() { for i in 0..archive.len() {
@ -85,10 +90,12 @@ pub fn decompress<R>(src_file: R, dest: &Path) -> ZipResult<()> where R: io::Rea
{ {
use std::os::unix::fs::PermissionsExt; use std::os::unix::fs::PermissionsExt;
if let Some(mode) = file.unix_mode() { if let Some(mode) = file.unix_mode() {
fs::set_permissions(&file_path.to_str().unwrap(), PermissionsExt::from_mode(mode))?; fs::set_permissions(
&file_path.to_str().unwrap(),
PermissionsExt::from_mode(mode),
)?;
} }
} }
} }
Ok(()) Ok(())
} }

View file

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
extern crate grin_util as util; extern crate grin_util as util;
use std::fs::{self, File}; use std::fs::{self, File};
use std::path::Path; use std::path::Path;
@ -30,12 +30,12 @@ fn zip_unzip() {
let zip_file = File::create(zip_name).unwrap(); let zip_file = File::create(zip_name).unwrap();
zip::compress(&root.join("./to_zip"), &zip_file).unwrap(); zip::compress(&root.join("./to_zip"), &zip_file).unwrap();
zip_file.sync_all(); zip_file.sync_all();
let zip_path = Path::new(zip_name); let zip_path = Path::new(zip_name);
assert!(zip_path.exists()); assert!(zip_path.exists());
assert!(zip_path.is_file()); assert!(zip_path.is_file());
assert!(zip_path.metadata().unwrap().len() > 300); assert!(zip_path.metadata().unwrap().len() > 300);
fs::create_dir_all(root.join("./dezipped")).unwrap(); fs::create_dir_all(root.join("./dezipped")).unwrap();
let zip_file = File::open(zip_name).unwrap(); let zip_file = File::open(zip_name).unwrap();
zip::decompress(zip_file, &root.join("./dezipped")).unwrap(); zip::decompress(zip_file, &root.join("./dezipped")).unwrap();

View file

@ -17,7 +17,7 @@
use std::collections::hash_map::Entry; use std::collections::hash_map::Entry;
use std::collections::HashMap; use std::collections::HashMap;
use failure::{ResultExt}; use failure::ResultExt;
use api; use api;
use types::*; use types::*;
@ -26,7 +26,6 @@ use util::secp::pedersen;
use util; use util;
use util::LOGGER; use util::LOGGER;
// Transitions a local wallet output from Unconfirmed -> Unspent. // Transitions a local wallet output from Unconfirmed -> Unspent.
fn mark_unspent_output(out: &mut OutputData) { fn mark_unspent_output(out: &mut OutputData) {
match out.status { match out.status {
@ -53,23 +52,21 @@ pub fn refresh_outputs(config: &WalletConfig, keychain: &Keychain) -> Result<(),
Ok(()) Ok(())
} }
// TODO - this might be slow if we have really old outputs that have never been refreshed // TODO - this might be slow if we have really old outputs that have never been
// refreshed
fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> Result<(), Error> { fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> Result<(), Error> {
// build a local map of wallet outputs keyed by commit // build a local map of wallet outputs keyed by commit
// and a list of outputs we want to query the node for // and a list of outputs we want to query the node for
let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new(); let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new();
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| { let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
for out in wallet_data for out in wallet_data.outputs.values().filter(|x| {
.outputs x.root_key_id == keychain.root_key_id() && x.block.is_none()
.values() && x.status == OutputStatus::Unspent
.filter(|x| { }) {
x.root_key_id == keychain.root_key_id() && let commit = keychain
x.block.is_none() && .commit_with_key_index(out.value, out.n_child)
x.status == OutputStatus::Unspent .context(ErrorKind::Keychain)?;
}) wallet_outputs.insert(commit, out.key_id.clone());
{
let commit = keychain.commit_with_key_index(out.value, out.n_child).context(ErrorKind::Keychain)?;
wallet_outputs.insert(commit, out.key_id.clone());
} }
Ok(()) Ok(())
}); });
@ -95,16 +92,11 @@ fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> R
let tip = get_tip_from_node(config)?; let tip = get_tip_from_node(config)?;
let height_params = format!( let height_params = format!("start_height={}&end_height={}", 0, tip.height,);
"start_height={}&end_height={}",
0,
tip.height,
);
let mut query_params = vec![height_params]; let mut query_params = vec![height_params];
query_params.append(&mut id_params); query_params.append(&mut id_params);
let url = let url = format!(
format!(
"{}/v1/chain/utxos/byheight?{}", "{}/v1/chain/utxos/byheight?{}",
config.check_node_api_http_addr, config.check_node_api_http_addr,
query_params.join("&"), query_params.join("&"),
@ -114,17 +106,15 @@ fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> R
let mut api_blocks: HashMap<pedersen::Commitment, api::BlockHeaderInfo> = HashMap::new(); let mut api_blocks: HashMap<pedersen::Commitment, api::BlockHeaderInfo> = HashMap::new();
let mut api_merkle_proofs: HashMap<pedersen::Commitment, MerkleProofWrapper> = HashMap::new(); let mut api_merkle_proofs: HashMap<pedersen::Commitment, MerkleProofWrapper> = HashMap::new();
match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) { match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) {
Ok(blocks) => { Ok(blocks) => for block in blocks {
for block in blocks { for out in block.outputs {
for out in block.outputs { api_blocks.insert(out.commit, block.header.clone());
api_blocks.insert(out.commit, block.header.clone()); if let Some(merkle_proof) = out.merkle_proof {
if let Some(merkle_proof) = out.merkle_proof { let wrapper = MerkleProofWrapper(merkle_proof);
let wrapper = MerkleProofWrapper(merkle_proof); api_merkle_proofs.insert(out.commit, wrapper);
api_merkle_proofs.insert(out.commit, wrapper);
}
} }
} }
} },
Err(e) => { Err(e) => {
// if we got anything other than 200 back from server, bye // if we got anything other than 200 back from server, bye
error!(LOGGER, "Refresh failed... unable to contact node: {}", e); error!(LOGGER, "Refresh failed... unable to contact node: {}", e);
@ -161,20 +151,18 @@ fn refresh_output_state(config: &WalletConfig, keychain: &Keychain) -> Result<()
// build a local map of wallet outputs keyed by commit // build a local map of wallet outputs keyed by commit
// and a list of outputs we want to query the node for // and a list of outputs we want to query the node for
let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new(); let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new();
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| { let _ =
for out in wallet_data WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
.outputs for out in wallet_data.outputs.values().filter(|x| {
.values() x.root_key_id == keychain.root_key_id() && x.status != OutputStatus::Spent
.filter(|x| { }) {
x.root_key_id == keychain.root_key_id() && let commit = keychain
x.status != OutputStatus::Spent .commit_with_key_index(out.value, out.n_child)
}) .context(ErrorKind::Keychain)?;
{ wallet_outputs.insert(commit, out.key_id.clone());
let commit = keychain.commit_with_key_index(out.value, out.n_child).context(ErrorKind::Keychain)?; }
wallet_outputs.insert(commit, out.key_id.clone()); Ok(())
}; });
Ok(())
});
// build the necessary query params - // build the necessary query params -
// ?id=xxx&id=yyy&id=zzz // ?id=xxx&id=yyy&id=zzz
@ -211,18 +199,22 @@ fn refresh_output_state(config: &WalletConfig, keychain: &Keychain) -> Result<()
// the corresponding api output (if it exists) // the corresponding api output (if it exists)
// and refresh it in-place in the wallet. // and refresh it in-place in the wallet.
// Note: minimizing the time we spend holding the wallet lock. // Note: minimizing the time we spend holding the wallet lock.
WalletData::with_wallet(&config.data_file_dir, |wallet_data| for commit in wallet_outputs.keys() { WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
let id = wallet_outputs.get(&commit).unwrap(); for commit in wallet_outputs.keys() {
if let Entry::Occupied(mut output) = wallet_data.outputs.entry(id.to_hex()) { let id = wallet_outputs.get(&commit).unwrap();
match api_utxos.get(&commit) { if let Entry::Occupied(mut output) = wallet_data.outputs.entry(id.to_hex()) {
Some(_) => mark_unspent_output(&mut output.get_mut()), match api_utxos.get(&commit) {
None => mark_spent_output(&mut output.get_mut()), Some(_) => mark_unspent_output(&mut output.get_mut()),
}; None => mark_spent_output(&mut output.get_mut()),
};
}
} }
}) })
} }
pub fn get_tip_from_node(config: &WalletConfig) -> Result<api::Tip, Error> { pub fn get_tip_from_node(config: &WalletConfig) -> Result<api::Tip, Error> {
let url = format!("{}/v1/chain", config.check_node_api_http_addr); let url = format!("{}/v1/chain", config.check_node_api_http_addr);
api::client::get::<api::Tip>(url.as_str()).context(ErrorKind::Node).map_err(|e| e.into()) api::client::get::<api::Tip>(url.as_str())
.context(ErrorKind::Node)
.map_err(|e| e.into())
} }

View file

@ -40,8 +40,7 @@ pub fn create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Erro
has_error = true; has_error = true;
error!( error!(
LOGGER, LOGGER,
"Failed to get coinbase from {}. Run grin wallet listen", "Failed to get coinbase from {}. Run grin wallet listen", url
url
); );
} }
if has_error { if has_error {
@ -56,7 +55,8 @@ fn retry_backoff_forever<F, R>(f: F) -> Result<R, Error>
where where
F: FnMut() -> Result<R, Error>, F: FnMut() -> Result<R, Error>,
{ {
let mut core = reactor::Core::new().context(ErrorKind::GenericError("Could not create reactor"))?; let mut core =
reactor::Core::new().context(ErrorKind::GenericError("Could not create reactor"))?;
let retry_strategy = let retry_strategy =
FibonacciBackoff::from_millis(100).max_delay(time::Duration::from_secs(10)); FibonacciBackoff::from_millis(100).max_delay(time::Duration::from_secs(10));
let retry_future = Retry::spawn(core.handle(), retry_strategy, f); let retry_future = Retry::spawn(core.handle(), retry_strategy, f);
@ -69,30 +69,38 @@ pub fn send_partial_tx(url: &str, partial_tx: &PartialTx) -> Result<PartialTx, E
} }
fn single_send_partial_tx(url: &str, partial_tx: &PartialTx) -> Result<PartialTx, Error> { fn single_send_partial_tx(url: &str, partial_tx: &PartialTx) -> Result<PartialTx, Error> {
let mut core = reactor::Core::new().context(ErrorKind::Hyper)?; let mut core = reactor::Core::new().context(ErrorKind::Hyper)?;
let client = hyper::Client::new(&core.handle()); let client = hyper::Client::new(&core.handle());
let mut req = Request::new(Method::Post, url.parse::<hyper::Uri>().context(ErrorKind::Hyper)?); let mut req = Request::new(
Method::Post,
url.parse::<hyper::Uri>().context(ErrorKind::Hyper)?,
);
req.headers_mut().set(ContentType::json()); req.headers_mut().set(ContentType::json());
let json = serde_json::to_string(&partial_tx).context(ErrorKind::Hyper)?; let json = serde_json::to_string(&partial_tx).context(ErrorKind::Hyper)?;
req.set_body(json); req.set_body(json);
let work = client.request(req).and_then(|res| { let work = client.request(req).and_then(|res| {
res.body().concat2().and_then(move |body| { res.body().concat2().and_then(move |body| {
let partial_tx: PartialTx = serde_json::from_slice(&body).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; let partial_tx: PartialTx =
serde_json::from_slice(&body).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(partial_tx) Ok(partial_tx)
}) })
}); });
let res = core.run(work).context(ErrorKind::Hyper)?; let res = core.run(work).context(ErrorKind::Hyper)?;
Ok(res) Ok(res)
} }
/// Makes a single request to the wallet API to create a new coinbase output. /// Makes a single request to the wallet API to create a new coinbase output.
fn single_create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Error> { fn single_create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Error> {
let mut core = reactor::Core::new().context(ErrorKind::GenericError("Could not create reactor"))?; let mut core =
reactor::Core::new().context(ErrorKind::GenericError("Could not create reactor"))?;
let client = hyper::Client::new(&core.handle()); let client = hyper::Client::new(&core.handle());
let mut req = Request::new(Method::Post, url.parse::<hyper::Uri>().context(ErrorKind::Uri)?); let mut req = Request::new(
Method::Post,
url.parse::<hyper::Uri>().context(ErrorKind::Uri)?,
);
req.headers_mut().set(ContentType::json()); req.headers_mut().set(ContentType::json());
let json = serde_json::to_string(&block_fees).context(ErrorKind::Format)?; let json = serde_json::to_string(&block_fees).context(ErrorKind::Format)?;
req.set_body(json); req.set_body(json);
@ -105,6 +113,7 @@ fn single_create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, E
}) })
}); });
let res = core.run(work).context(ErrorKind::GenericError("Could not run core"))?; let res = core.run(work)
.context(ErrorKind::GenericError("Could not run core"))?;
Ok(res) Ok(res)
} }

View file

@ -26,7 +26,6 @@ use types::*;
use util; use util;
use failure::{Fail, ResultExt}; use failure::{Fail, ResultExt};
pub struct CoinbaseHandler { pub struct CoinbaseHandler {
pub config: WalletConfig, pub config: WalletConfig,
pub keychain: Keychain, pub keychain: Keychain,
@ -35,22 +34,21 @@ pub struct CoinbaseHandler {
impl CoinbaseHandler { impl CoinbaseHandler {
fn build_coinbase(&self, block_fees: &BlockFees) -> Result<CbData, Error> { fn build_coinbase(&self, block_fees: &BlockFees) -> Result<CbData, Error> {
let (out, kern, block_fees) = receive_coinbase(&self.config, &self.keychain, block_fees) let (out, kern, block_fees) = receive_coinbase(&self.config, &self.keychain, block_fees)
.map_err(|e| { .map_err(|e| api::Error::Internal(format!("Error building coinbase: {:?}", e)))
api::Error::Internal(format!("Error building coinbase: {:?}", e)) .context(ErrorKind::Node)?;
}).context(ErrorKind::Node)?;
let out_bin = ser::ser_vec(&out).map_err(|e| { let out_bin = ser::ser_vec(&out)
api::Error::Internal(format!("Error serializing output: {:?}", e)) .map_err(|e| api::Error::Internal(format!("Error serializing output: {:?}", e)))
}).context(ErrorKind::Node)?; .context(ErrorKind::Node)?;
let kern_bin = ser::ser_vec(&kern).map_err(|e| { let kern_bin = ser::ser_vec(&kern)
api::Error::Internal(format!("Error serializing kernel: {:?}", e)) .map_err(|e| api::Error::Internal(format!("Error serializing kernel: {:?}", e)))
}).context(ErrorKind::Node)?; .context(ErrorKind::Node)?;
let key_id_bin = match block_fees.key_id { let key_id_bin = match block_fees.key_id {
Some(key_id) => ser::ser_vec(&key_id).map_err(|e| { Some(key_id) => ser::ser_vec(&key_id)
api::Error::Internal(format!("Error serializing kernel: {:?}", e)) .map_err(|e| api::Error::Internal(format!("Error serializing kernel: {:?}", e)))
}).context(ErrorKind::Node)?, .context(ErrorKind::Node)?,
None => vec![], None => vec![],
}; };

View file

@ -15,14 +15,15 @@
use checker; use checker;
use keychain::Keychain; use keychain::Keychain;
use core::core::amount_to_hr_string; use core::core::amount_to_hr_string;
use types::{WalletConfig, WalletData, OutputStatus, WalletInfo}; use types::{OutputStatus, WalletConfig, WalletData, WalletInfo};
use prettytable; use prettytable;
pub fn show_info(config: &WalletConfig, keychain: &Keychain) { pub fn show_info(config: &WalletConfig, keychain: &Keychain) {
let wallet_info = retrieve_info(config, keychain); let wallet_info = retrieve_info(config, keychain);
println!("\n____ Wallet Summary Info at {} ({}) ____\n", println!(
wallet_info.current_height, "\n____ Wallet Summary Info at {} ({}) ____\n",
wallet_info.data_confirmed_from); wallet_info.current_height, wallet_info.data_confirmed_from
);
let mut table = table!( let mut table = table!(
[bFG->"Total", FG->amount_to_hr_string(wallet_info.total)], [bFG->"Total", FG->amount_to_hr_string(wallet_info.total)],
[bFY->"Awaiting Confirmation", FY->amount_to_hr_string(wallet_info.amount_awaiting_confirmation)], [bFY->"Awaiting Confirmation", FY->amount_to_hr_string(wallet_info.amount_awaiting_confirmation)],
@ -37,9 +38,9 @@ pub fn show_info(config: &WalletConfig, keychain: &Keychain) {
if !wallet_info.data_confirmed { if !wallet_info.data_confirmed {
println!( println!(
"\nWARNING: Failed to verify wallet contents with grin server. \ "\nWARNING: Failed to verify wallet contents with grin server. \
Above info is maybe not fully updated or invalid! \ Above info is maybe not fully updated or invalid! \
Check that your `grin server` is OK, or see `wallet help restore`" Check that your `grin server` is OK, or see `wallet help restore`"
); );
} }
} }
@ -67,7 +68,7 @@ pub fn retrieve_info(config: &WalletConfig, keychain: &Keychain) -> WalletInfo {
if out.status == OutputStatus::Unspent { if out.status == OutputStatus::Unspent {
unspent_total += out.value; unspent_total += out.value;
if out.lock_height > current_height { if out.lock_height > current_height {
unspent_but_locked_total += out.value; unspent_but_locked_total += out.value;
} }
} }
if out.status == OutputStatus::Unconfirmed && !out.is_coinbase { if out.status == OutputStatus::Unconfirmed && !out.is_coinbase {
@ -76,18 +77,18 @@ pub fn retrieve_info(config: &WalletConfig, keychain: &Keychain) -> WalletInfo {
if out.status == OutputStatus::Locked { if out.status == OutputStatus::Locked {
locked_total += out.value; locked_total += out.value;
} }
}; }
let mut data_confirmed = true; let mut data_confirmed = true;
if let Err(_) = result { if let Err(_) = result {
data_confirmed = false; data_confirmed = false;
} }
Ok(WalletInfo { Ok(WalletInfo {
current_height : current_height, current_height: current_height,
total: unspent_total+unconfirmed_total, total: unspent_total + unconfirmed_total,
amount_awaiting_confirmation: unconfirmed_total, amount_awaiting_confirmation: unconfirmed_total,
amount_confirmed_but_locked: unspent_but_locked_total, amount_confirmed_but_locked: unspent_but_locked_total,
amount_currently_spendable: unspent_total-unspent_but_locked_total, amount_currently_spendable: unspent_total - unspent_but_locked_total,
amount_locked: locked_total, amount_locked: locked_total,
data_confirmed: data_confirmed, data_confirmed: data_confirmed,
data_confirmed_from: String::from(from), data_confirmed_from: String::from(from),

View file

@ -16,17 +16,17 @@
extern crate blake2_rfc as blake2; extern crate blake2_rfc as blake2;
extern crate byteorder; extern crate byteorder;
#[macro_use]
extern crate prettytable;
extern crate rand; extern crate rand;
extern crate serde; extern crate serde;
extern crate uuid;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
#[macro_use] #[macro_use]
extern crate slog; extern crate slog;
#[macro_use]
extern crate prettytable;
extern crate term; extern crate term;
extern crate uuid;
extern crate bodyparser; extern crate bodyparser;
extern crate failure; extern crate failure;
@ -57,8 +57,9 @@ pub mod client;
pub mod server; pub mod server;
pub use outputs::show_outputs; pub use outputs::show_outputs;
pub use info::{show_info, retrieve_info}; pub use info::{retrieve_info, show_info};
pub use receiver::{WalletReceiver}; pub use receiver::WalletReceiver;
pub use sender::{issue_burn_tx, issue_send_tx}; pub use sender::{issue_burn_tx, issue_send_tx};
pub use types::{BlockFees, CbData, Error, ErrorKind, WalletConfig, WalletReceiveRequest, WalletInfo, WalletSeed}; pub use types::{BlockFees, CbData, Error, ErrorKind, WalletConfig, WalletInfo,
WalletReceiveRequest, WalletSeed};
pub use restore::restore; pub use restore::restore;

View file

@ -15,19 +15,19 @@
use checker; use checker;
use keychain::Keychain; use keychain::Keychain;
use core::core; use core::core;
use types::{WalletConfig, WalletData, OutputStatus}; use types::{OutputStatus, WalletConfig, WalletData};
use prettytable; use prettytable;
use term; use term;
use std::io::prelude::*; use std::io::prelude::*;
pub fn show_outputs(config: &WalletConfig, keychain: &Keychain, show_spent:bool) { pub fn show_outputs(config: &WalletConfig, keychain: &Keychain, show_spent: bool) {
let root_key_id = keychain.root_key_id(); let root_key_id = keychain.root_key_id();
let result = checker::refresh_outputs(&config, &keychain); let result = checker::refresh_outputs(&config, &keychain);
// just read the wallet here, no need for a write lock // just read the wallet here, no need for a write lock
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| { let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
// get the current height via the api // get the current height via the api
// if we cannot get the current height use the max height known to the wallet // if we cannot get the current height use the max height known to the wallet
let current_height = match checker::get_tip_from_node(config) { let current_height = match checker::get_tip_from_node(config) {
Ok(tip) => tip.height, Ok(tip) => tip.height,
Err(_) => match wallet_data.outputs.values().map(|out| out.height).max() { Err(_) => match wallet_data.outputs.values().map(|out| out.height).max() {
@ -40,16 +40,17 @@ pub fn show_outputs(config: &WalletConfig, keychain: &Keychain, show_spent:bool)
.outputs .outputs
.values() .values()
.filter(|out| out.root_key_id == root_key_id) .filter(|out| out.root_key_id == root_key_id)
.filter(|out| .filter(|out| {
if show_spent { if show_spent {
true true
} else { } else {
out.status != OutputStatus::Spent out.status != OutputStatus::Spent
}) }
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
outputs.sort_by_key(|out| out.n_child); outputs.sort_by_key(|out| out.n_child);
let title=format!("Wallet Outputs - Block Height: {}", current_height); let title = format!("Wallet Outputs - Block Height: {}", current_height);
println!(); println!();
let mut t = term::stdout().unwrap(); let mut t = term::stdout().unwrap();
t.fg(term::color::MAGENTA).unwrap(); t.fg(term::color::MAGENTA).unwrap();
@ -69,13 +70,13 @@ pub fn show_outputs(config: &WalletConfig, keychain: &Keychain, show_spent:bool)
]); ]);
for out in outputs { for out in outputs {
let key_id=format!("{}", out.key_id); let key_id = format!("{}", out.key_id);
let height=format!("{}", out.height); let height = format!("{}", out.height);
let lock_height=format!("{}", out.lock_height); let lock_height = format!("{}", out.lock_height);
let status=format!("{:?}", out.status); let status = format!("{:?}", out.status);
let is_coinbase=format!("{}", out.is_coinbase); let is_coinbase = format!("{}", out.is_coinbase);
let num_confirmations=format!("{}", out.num_confirmations(current_height)); let num_confirmations = format!("{}", out.num_confirmations(current_height));
let value=format!("{}", core::amount_to_hr_string(out.value)); let value = format!("{}", core::amount_to_hr_string(out.value));
table.add_row(row![ table.add_row(row![
bFC->key_id, bFC->key_id,
bFB->height, bFB->height,

View file

@ -25,12 +25,12 @@ use uuid::Uuid;
use api; use api;
use core::consensus::reward; use core::consensus::reward;
use core::core::{build, Block, Committed, Output, Transaction, TxKernel, amount_to_hr_string}; use core::core::{amount_to_hr_string, build, Block, Committed, Output, Transaction, TxKernel};
use core::{global, ser}; use core::{global, ser};
use keychain::{Identifier, Keychain, BlindingFactor}; use keychain::{BlindingFactor, Identifier, Keychain};
use types::*; use types::*;
use util::{LOGGER, to_hex, secp}; use util::{secp, to_hex, LOGGER};
use failure::{ResultExt}; use failure::ResultExt;
/// Dummy wrapper for the hex-encoded serialized transaction. /// Dummy wrapper for the hex-encoded serialized transaction.
#[derive(Serialize, Deserialize)] #[derive(Serialize, Deserialize)]
@ -51,9 +51,10 @@ pub struct TxWrapper {
fn handle_sender_initiation( fn handle_sender_initiation(
config: &WalletConfig, config: &WalletConfig,
keychain: &Keychain, keychain: &Keychain,
partial_tx: &PartialTx partial_tx: &PartialTx,
) -> Result<PartialTx, Error> { ) -> Result<PartialTx, Error> {
let (amount, _sender_pub_blinding, sender_pub_nonce, kernel_offset, _sig, tx) = read_partial_tx(keychain, partial_tx)?; let (amount, _sender_pub_blinding, sender_pub_nonce, kernel_offset, _sig, tx) =
read_partial_tx(keychain, partial_tx)?;
let root_key_id = keychain.root_key_id(); let root_key_id = keychain.root_key_id();
@ -68,23 +69,24 @@ fn handle_sender_initiation(
})?; })?;
} }
if fee > amount { if fee > amount {
info!( info!(
LOGGER, LOGGER,
"Rejected the transfer because transaction fee ({}) exceeds received amount ({}).", "Rejected the transfer because transaction fee ({}) exceeds received amount ({}).",
amount_to_hr_string(fee), amount_to_hr_string(fee),
amount_to_hr_string(amount) amount_to_hr_string(amount)
); );
return Err(ErrorKind::FeeExceedsAmount { return Err(ErrorKind::FeeExceedsAmount {
sender_amount: amount, sender_amount: amount,
recipient_fee: fee, recipient_fee: fee,
})?; })?;
} }
let out_amount = amount - fee; let out_amount = amount - fee;
// First step is just to get the excess sum of the outputs we're participating in // First step is just to get the excess sum of the outputs we're participating
// Output and key needs to be stored until transaction finalisation time, somehow // in Output and key needs to be stored until transaction finalisation time,
// somehow
let key_id = WalletData::with_wallet(&config.data_file_dir, |wallet_data| { let key_id = WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
let (key_id, derivation) = next_available_key(&wallet_data, keychain); let (key_id, derivation) = next_available_key(&wallet_data, keychain);
@ -106,29 +108,35 @@ fn handle_sender_initiation(
})?; })?;
// Still handy for getting the blinding sum // Still handy for getting the blinding sum
let (_, blind_sum) = build::partial_transaction( let (_, blind_sum) =
vec![ build::partial_transaction(vec![build::output(out_amount, key_id.clone())], keychain)
build::output(out_amount, key_id.clone()), .context(ErrorKind::Keychain)?;
],
keychain,
).context(ErrorKind::Keychain)?;
warn!(LOGGER, "Creating new aggsig context"); warn!(LOGGER, "Creating new aggsig context");
// Create a new aggsig context // Create a new aggsig context
// this will create a new blinding sum and nonce, and store them // this will create a new blinding sum and nonce, and store them
let blind = blind_sum.secret_key(&keychain.secp()).context(ErrorKind::Keychain)?; let blind = blind_sum
keychain.aggsig_create_context(&partial_tx.id, blind).context(ErrorKind::Keychain)?; .secret_key(&keychain.secp())
.context(ErrorKind::Keychain)?;
keychain
.aggsig_create_context(&partial_tx.id, blind)
.context(ErrorKind::Keychain)?;
keychain.aggsig_add_output(&partial_tx.id, &key_id); keychain.aggsig_add_output(&partial_tx.id, &key_id);
let sig_part = keychain.aggsig_calculate_partial_sig( let sig_part = keychain
&partial_tx.id, .aggsig_calculate_partial_sig(&partial_tx.id, &sender_pub_nonce, fee, tx.lock_height())
&sender_pub_nonce, .unwrap();
fee,
tx.lock_height(),
).unwrap();
// Build the response, which should contain sR, blinding excess xR * G, public nonce kR * G // Build the response, which should contain sR, blinding excess xR * G, public
let mut partial_tx = build_partial_tx(&partial_tx.id, keychain, amount, kernel_offset, Some(sig_part), tx); // nonce kR * G
let mut partial_tx = build_partial_tx(
&partial_tx.id,
keychain,
amount,
kernel_offset,
Some(sig_part),
tx,
);
partial_tx.phase = PartialTxPhase::ReceiverInitiation; partial_tx.phase = PartialTxPhase::ReceiverInitiation;
Ok(partial_tx) Ok(partial_tx)
@ -149,16 +157,18 @@ fn handle_sender_initiation(
fn handle_sender_confirmation( fn handle_sender_confirmation(
config: &WalletConfig, config: &WalletConfig,
keychain: &Keychain, keychain: &Keychain,
partial_tx: &PartialTx partial_tx: &PartialTx,
) -> Result<PartialTx, Error> { ) -> Result<PartialTx, Error> {
let (amount, sender_pub_blinding, sender_pub_nonce, kernel_offset, sender_sig_part, tx) = read_partial_tx(keychain, partial_tx)?; let (amount, sender_pub_blinding, sender_pub_nonce, kernel_offset, sender_sig_part, tx) =
read_partial_tx(keychain, partial_tx)?;
let sender_sig_part = sender_sig_part.unwrap(); let sender_sig_part = sender_sig_part.unwrap();
let res = keychain.aggsig_verify_partial_sig( let res = keychain.aggsig_verify_partial_sig(
&partial_tx.id, &partial_tx.id,
&sender_sig_part, &sender_sig_part,
&sender_pub_nonce, &sender_pub_nonce,
&sender_pub_blinding, &sender_pub_blinding,
tx.fee(), tx.lock_height(), tx.fee(),
tx.lock_height(),
); );
if !res { if !res {
@ -167,26 +177,29 @@ fn handle_sender_confirmation(
} }
// Just calculate our sig part again instead of storing // Just calculate our sig part again instead of storing
let our_sig_part = keychain.aggsig_calculate_partial_sig( let our_sig_part = keychain
&partial_tx.id, .aggsig_calculate_partial_sig(
&sender_pub_nonce, &partial_tx.id,
tx.fee(), &sender_pub_nonce,
tx.lock_height(), tx.fee(),
).unwrap(); tx.lock_height(),
)
.unwrap();
// And the final signature // And the final signature
let final_sig = keychain.aggsig_calculate_final_sig( let final_sig = keychain
&partial_tx.id, .aggsig_calculate_final_sig(
&sender_sig_part, &partial_tx.id,
&our_sig_part, &sender_sig_part,
&sender_pub_nonce, &our_sig_part,
).unwrap(); &sender_pub_nonce,
)
.unwrap();
// Calculate the final public key (for our own sanity check) // Calculate the final public key (for our own sanity check)
let final_pubkey = keychain.aggsig_calculate_final_pubkey( let final_pubkey = keychain
&partial_tx.id, .aggsig_calculate_final_pubkey(&partial_tx.id, &sender_pub_blinding)
&sender_pub_blinding, .unwrap();
).unwrap();
// Check our final sig verifies // Check our final sig verifies
let res = keychain.aggsig_verify_final_sig_build_msg( let res = keychain.aggsig_verify_final_sig_build_msg(
@ -214,12 +227,18 @@ fn handle_sender_confirmation(
let tx_hex = to_hex(ser::ser_vec(&final_tx).unwrap()); let tx_hex = to_hex(ser::ser_vec(&final_tx).unwrap());
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str()); let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }) api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).context(ErrorKind::Node)?;
.context(ErrorKind::Node)?;
// Return what we've actually posted // Return what we've actually posted
// TODO - why build_partial_tx here? Just a naming issue? // TODO - why build_partial_tx here? Just a naming issue?
let mut partial_tx = build_partial_tx(&partial_tx.id, keychain, amount, kernel_offset, Some(final_sig), tx); let mut partial_tx = build_partial_tx(
&partial_tx.id,
keychain,
amount,
kernel_offset,
Some(final_sig),
tx,
);
partial_tx.phase = PartialTxPhase::ReceiverConfirmation; partial_tx.phase = PartialTxPhase::ReceiverConfirmation;
Ok(partial_tx) Ok(partial_tx)
} }
@ -239,28 +258,38 @@ impl Handler for WalletReceiver {
if let Ok(Some(partial_tx)) = struct_body { if let Ok(Some(partial_tx)) = struct_body {
match partial_tx.phase { match partial_tx.phase {
PartialTxPhase::SenderInitiation => { PartialTxPhase::SenderInitiation => {
let resp_tx = handle_sender_initiation(&self.config, &self.keychain, &partial_tx) let resp_tx = handle_sender_initiation(
.map_err(|e| { &self.config,
&self.keychain,
&partial_tx,
).map_err(|e| {
error!(LOGGER, "Phase 1 Sender Initiation -> Problematic partial tx, looks like this: {:?}", partial_tx); error!(LOGGER, "Phase 1 Sender Initiation -> Problematic partial tx, looks like this: {:?}", partial_tx);
api::Error::Internal( api::Error::Internal(format!(
format!("Error processing partial transaction: {:?}", e), "Error processing partial transaction: {:?}",
)}) e
.unwrap(); ))
})
.unwrap();
let json = serde_json::to_string(&resp_tx).unwrap(); let json = serde_json::to_string(&resp_tx).unwrap();
Ok(Response::with((status::Ok, json))) Ok(Response::with((status::Ok, json)))
}, }
PartialTxPhase::SenderConfirmation => { PartialTxPhase::SenderConfirmation => {
let resp_tx = handle_sender_confirmation(&self.config, &self.keychain, &partial_tx) let resp_tx = handle_sender_confirmation(
.map_err(|e| { &self.config,
&self.keychain,
&partial_tx,
).map_err(|e| {
error!(LOGGER, "Phase 3 Sender Confirmation -> Problematic partial tx, looks like this: {:?}", partial_tx); error!(LOGGER, "Phase 3 Sender Confirmation -> Problematic partial tx, looks like this: {:?}", partial_tx);
api::Error::Internal( api::Error::Internal(format!(
format!("Error processing partial transaction: {:?}", e), "Error processing partial transaction: {:?}",
)}) e
.unwrap(); ))
})
.unwrap();
let json = serde_json::to_string(&resp_tx).unwrap(); let json = serde_json::to_string(&resp_tx).unwrap();
Ok(Response::with((status::Ok, json))) Ok(Response::with((status::Ok, json)))
}, }
_=> { _ => {
error!(LOGGER, "Unhandled Phase: {:?}", partial_tx); error!(LOGGER, "Unhandled Phase: {:?}", partial_tx);
Ok(Response::with((status::BadRequest, "Unhandled Phase"))) Ok(Response::with((status::BadRequest, "Unhandled Phase")))
} }
@ -271,10 +300,7 @@ impl Handler for WalletReceiver {
} }
} }
fn retrieve_existing_key( fn retrieve_existing_key(wallet_data: &WalletData, key_id: Identifier) -> (Identifier, u32) {
wallet_data: &WalletData,
key_id: Identifier,
) -> (Identifier, u32) {
if let Some(existing) = wallet_data.get_output(&key_id) { if let Some(existing) = wallet_data.get_output(&key_id) {
let key_id = existing.key_id.clone(); let key_id = existing.key_id.clone();
let derivation = existing.n_child; let derivation = existing.n_child;
@ -284,10 +310,7 @@ fn retrieve_existing_key(
} }
} }
fn next_available_key( fn next_available_key(wallet_data: &WalletData, keychain: &Keychain) -> (Identifier, u32) {
wallet_data: &WalletData,
keychain: &Keychain,
) -> (Identifier, u32) {
let root_key_id = keychain.root_key_id(); let root_key_id = keychain.root_key_id();
let derivation = wallet_data.next_child(root_key_id.clone()); let derivation = wallet_data.next_child(root_key_id.clone());
let key_id = keychain.derive_key_id(derivation).unwrap(); let key_id = keychain.derive_key_id(derivation).unwrap();
@ -342,12 +365,8 @@ pub fn receive_coinbase(
debug!(LOGGER, "receive_coinbase: {:?}", block_fees); debug!(LOGGER, "receive_coinbase: {:?}", block_fees);
let (out, kern) = Block::reward_output( let (out, kern) = Block::reward_output(&keychain, &key_id, block_fees.fees, block_fees.height)
&keychain, .context(ErrorKind::Keychain)?;
&key_id,
block_fees.fees,
block_fees.height,
).context(ErrorKind::Keychain)?;
Ok((out, kern, block_fees)) Ok((out, kern, block_fees))
} }
@ -381,11 +400,11 @@ fn build_final_transaction(
amount_to_hr_string(fee), amount_to_hr_string(fee),
amount_to_hr_string(amount) amount_to_hr_string(amount)
); );
return Err(ErrorKind::FeeExceedsAmount { return Err(ErrorKind::FeeExceedsAmount {
sender_amount: amount, sender_amount: amount,
recipient_fee: fee, recipient_fee: fee,
})?; })?;
} }
let out_amount = amount - fee; let out_amount = amount - fee;
@ -430,8 +449,14 @@ fn build_final_transaction(
let tx_excess = final_tx.sum_commitments().context(ErrorKind::Transaction)?; let tx_excess = final_tx.sum_commitments().context(ErrorKind::Transaction)?;
// subtract the kernel_excess (built from kernel_offset) // subtract the kernel_excess (built from kernel_offset)
let offset_excess = keychain.secp().commit(0, kernel_offset.secret_key(&keychain.secp()).unwrap()).unwrap(); let offset_excess = keychain
keychain.secp().commit_sum(vec![tx_excess], vec![offset_excess]).context(ErrorKind::Transaction)? .secp()
.commit(0, kernel_offset.secret_key(&keychain.secp()).unwrap())
.unwrap();
keychain
.secp()
.commit_sum(vec![tx_excess], vec![offset_excess])
.context(ErrorKind::Transaction)?
}; };
// update the tx kernel to reflect the offset excess and sig // update the tx kernel to reflect the offset excess and sig
@ -440,7 +465,9 @@ fn build_final_transaction(
final_tx.kernels[0].excess_sig = excess_sig.clone(); final_tx.kernels[0].excess_sig = excess_sig.clone();
// confirm the kernel verifies successfully before proceeding // confirm the kernel verifies successfully before proceeding
final_tx.kernels[0].verify().context(ErrorKind::Transaction)?; final_tx.kernels[0]
.verify()
.context(ErrorKind::Transaction)?;
// confirm the overall transaction is valid (including the updated kernel) // confirm the overall transaction is valid (including the updated kernel)
let _ = final_tx.validate().context(ErrorKind::Transaction)?; let _ = final_tx.validate().context(ErrorKind::Transaction)?;

View file

@ -11,18 +11,17 @@
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use failure::{ResultExt, Fail}; use failure::{Fail, ResultExt};
use keychain::{Keychain, Identifier}; use keychain::{Identifier, Keychain};
use util::{LOGGER, to_hex}; use util::{to_hex, LOGGER};
use util::secp::pedersen; use util::secp::pedersen;
use api; use api;
use core::global; use core::global;
use core::core::{Output, SwitchCommitHash}; use core::core::{Output, SwitchCommitHash};
use core::core::transaction::OutputFeatures; use core::core::transaction::OutputFeatures;
use types::{WalletConfig, WalletData, OutputData, OutputStatus, Error, ErrorKind}; use types::{Error, ErrorKind, OutputData, OutputStatus, WalletConfig, WalletData};
use byteorder::{BigEndian, ByteOrder}; use byteorder::{BigEndian, ByteOrder};
pub fn get_chain_height(config: &WalletConfig) -> Result<u64, Error> { pub fn get_chain_height(config: &WalletConfig) -> Result<u64, Error> {
let url = format!("{}/v1/chain", config.check_node_api_http_addr); let url = format!("{}/v1/chain", config.check_node_api_http_addr);
@ -46,13 +45,9 @@ fn output_with_range_proof(
commit_id: &str, commit_id: &str,
height: u64, height: u64,
) -> Result<api::OutputPrintable, Error> { ) -> Result<api::OutputPrintable, Error> {
let url = let url = format!(
format!(
"{}/v1/chain/utxos/byheight?start_height={}&end_height={}&id={}&include_rp", "{}/v1/chain/utxos/byheight?start_height={}&end_height={}&id={}&include_rp",
config.check_node_api_http_addr, config.check_node_api_http_addr, height, height, commit_id,
height,
height,
commit_id,
); );
match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) { match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) {
@ -64,7 +59,7 @@ fn output_with_range_proof(
Err(ErrorKind::Node)? Err(ErrorKind::Node)?
} }
} else { } else {
Err(ErrorKind::Node)? Err(ErrorKind::Node)?
} }
} }
Err(e) => { Err(e) => {
@ -90,9 +85,15 @@ fn retrieve_amount_and_coinbase_status(
api::OutputType::Coinbase => OutputFeatures::COINBASE_OUTPUT, api::OutputType::Coinbase => OutputFeatures::COINBASE_OUTPUT,
api::OutputType::Transaction => OutputFeatures::DEFAULT_OUTPUT, api::OutputType::Transaction => OutputFeatures::DEFAULT_OUTPUT,
}, },
proof: output.range_proof().context(ErrorKind::GenericError("range proof error"))?, proof: output
switch_commit_hash: output.switch_commit_hash().context(ErrorKind::GenericError("switch commit hash error"))?, .range_proof()
commit: output.commit().context(ErrorKind::GenericError("commit error"))?, .context(ErrorKind::GenericError("range proof error"))?,
switch_commit_hash: output
.switch_commit_hash()
.context(ErrorKind::GenericError("switch commit hash error"))?,
commit: output
.commit()
.context(ErrorKind::GenericError("commit error"))?,
}; };
if let Some(amount) = core_output.recover_value(keychain, &key_id) { if let Some(amount) = core_output.recover_value(keychain, &key_id) {
@ -113,11 +114,9 @@ pub fn utxos_batch_block(
) -> Result<Vec<api::BlockOutputs>, Error> { ) -> Result<Vec<api::BlockOutputs>, Error> {
let query_param = format!("start_height={}&end_height={}", start_height, end_height); let query_param = format!("start_height={}&end_height={}", start_height, end_height);
let url = let url = format!(
format!(
"{}/v1/chain/utxos/byheight?{}", "{}/v1/chain/utxos/byheight?{}",
config.check_node_api_http_addr, config.check_node_api_http_addr, query_param,
query_param,
); );
match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) { match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) {
@ -167,12 +166,7 @@ fn find_utxos_with_key(
); );
if x == expected_hash { if x == expected_hash {
info!( info!(LOGGER, "Output found: {:?}, key_index: {:?}", output, i,);
LOGGER,
"Output found: {:?}, key_index: {:?}",
output,
i,
);
// add it to result set here // add it to result set here
let commit_id = output.commit.0; let commit_id = output.commit.0;
@ -219,8 +213,7 @@ fn find_utxos_with_key(
} else { } else {
info!( info!(
LOGGER, LOGGER,
"Unable to retrieve the amount (needs investigating) {:?}", "Unable to retrieve the amount (needs investigating) {:?}", res,
res,
); );
} }
} }
@ -258,15 +251,13 @@ pub fn restore(
let chain_height = get_chain_height(config)?; let chain_height = get_chain_height(config)?;
info!( info!(
LOGGER, LOGGER,
"Starting restore: Chain height is {}.", "Starting restore: Chain height is {}.", chain_height
chain_height
); );
let mut switch_commit_cache: Vec<pedersen::Commitment> = vec![]; let mut switch_commit_cache: Vec<pedersen::Commitment> = vec![];
info!( info!(
LOGGER, LOGGER,
"Building key derivation cache ({}) ...", "Building key derivation cache ({}) ...", key_derivations,
key_derivations,
); );
for i in 0..key_derivations { for i in 0..key_derivations {
let switch_commit = keychain.switch_commit_from_index(i as u32).unwrap(); let switch_commit = keychain.switch_commit_from_index(i as u32).unwrap();
@ -318,12 +309,11 @@ pub fn restore(
block: None, block: None,
merkle_proof: None, merkle_proof: None,
}); });
}; }
} }
} }
}); });
h > 0 h > 0
} } {}
{}
Ok(()) Ok(())
} }

View file

@ -18,9 +18,9 @@ use uuid::Uuid;
use api; use api;
use client; use client;
use checker; use checker;
use core::core::{build, Transaction, amount_to_hr_string}; use core::core::{amount_to_hr_string, build, Transaction};
use core::ser; use core::ser;
use keychain::{BlindingFactor, BlindSum, Identifier, Keychain}; use keychain::{BlindSum, BlindingFactor, Identifier, Keychain};
use receiver::TxWrapper; use receiver::TxWrapper;
use types::*; use types::*;
use util::LOGGER; use util::LOGGER;
@ -64,44 +64,52 @@ pub fn issue_send_tx(
// Generate a random kernel offset here // Generate a random kernel offset here
// and subtract it from the blind_sum so we create // and subtract it from the blind_sum so we create
// the aggsig context with the "split" key // the aggsig context with the "split" key
let kernel_offset = BlindingFactor::from_secret_key( let kernel_offset =
SecretKey::new(&keychain.secp(), &mut thread_rng()) BlindingFactor::from_secret_key(SecretKey::new(&keychain.secp(), &mut thread_rng()));
);
let blind_offset = keychain.blind_sum( let blind_offset = keychain
&BlindSum::new() .blind_sum(&BlindSum::new()
.add_blinding_factor(blind) .add_blinding_factor(blind)
.sub_blinding_factor(kernel_offset) .sub_blinding_factor(kernel_offset))
).unwrap(); .unwrap();
// //
// -Sender picks random blinding factors for all outputs it participates in, computes total blinding excess xS // -Sender picks random blinding factors for all outputs it participates in,
// -Sender picks random nonce kS // computes total blinding excess xS -Sender picks random nonce kS
// -Sender posts inputs, outputs, Message M=fee, xS * G and kS * G to Receiver // -Sender posts inputs, outputs, Message M=fee, xS * G and kS * G to Receiver
// //
// Create a new aggsig context // Create a new aggsig context
let tx_id = Uuid::new_v4(); let tx_id = Uuid::new_v4();
let skey = blind_offset.secret_key(&keychain.secp()).context(ErrorKind::Keychain)?; let skey = blind_offset
keychain.aggsig_create_context(&tx_id, skey).context(ErrorKind::Keychain)?; .secret_key(&keychain.secp())
.context(ErrorKind::Keychain)?;
keychain
.aggsig_create_context(&tx_id, skey)
.context(ErrorKind::Keychain)?;
let partial_tx = build_partial_tx(&tx_id, keychain, amount_with_fee, kernel_offset, None, tx); let partial_tx = build_partial_tx(&tx_id, keychain, amount_with_fee, kernel_offset, None, tx);
// Closure to acquire wallet lock and lock the coins being spent // Closure to acquire wallet lock and lock the coins being spent
// so we avoid accidental double spend attempt. // so we avoid accidental double spend attempt.
let update_wallet = || WalletData::with_wallet(&config.data_file_dir, |wallet_data| { let update_wallet = || {
for coin in coins { WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
wallet_data.lock_output(&coin); for coin in coins {
} wallet_data.lock_output(&coin);
}); }
})
};
// Closure to acquire wallet lock and delete the change output in case of tx failure. // Closure to acquire wallet lock and delete the change output in case of tx
let rollback_wallet = || WalletData::with_wallet(&config.data_file_dir, |wallet_data| { // failure.
info!(LOGGER, "cleaning up unused change output from wallet"); let rollback_wallet = || {
wallet_data.delete_output(&change_key); WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
}); info!(LOGGER, "cleaning up unused change output from wallet");
wallet_data.delete_output(&change_key);
})
};
// TODO: stdout option removed for now, as it won't work very will with this version of // TODO: stdout option removed for now, as it won't work very will with this
// aggsig exchange // version of aggsig exchange
/*if dest == "stdout" { /*if dest == "stdout" {
let json_tx = serde_json::to_string_pretty(&partial_tx).unwrap(); let json_tx = serde_json::to_string_pretty(&partial_tx).unwrap();
@ -110,7 +118,10 @@ pub fn issue_send_tx(
} else */ } else */
if &dest[..4] != "http" { if &dest[..4] != "http" {
panic!("dest formatted as {} but send -d expected stdout or http://IP:port", dest); panic!(
"dest formatted as {} but send -d expected stdout or http://IP:port",
dest
);
} }
let url = format!("{}/v1/receive/transaction", &dest); let url = format!("{}/v1/receive/transaction", &dest);
@ -118,14 +129,19 @@ pub fn issue_send_tx(
let res = client::send_partial_tx(&url, &partial_tx); let res = client::send_partial_tx(&url, &partial_tx);
if let Err(e) = res { if let Err(e) = res {
match e.kind() { match e.kind() {
ErrorKind::FeeExceedsAmount {sender_amount, recipient_fee} => ErrorKind::FeeExceedsAmount {
error!( sender_amount,
recipient_fee,
} => error!(
LOGGER, LOGGER,
"Recipient rejected the transfer because transaction fee ({}) exceeded amount ({}).", "Recipient rejected the transfer because transaction fee ({}) exceeded amount ({}).",
amount_to_hr_string(recipient_fee), amount_to_hr_string(recipient_fee),
amount_to_hr_string(sender_amount) amount_to_hr_string(sender_amount)
), ),
_ => error!(LOGGER, "Communication with receiver failed on SenderInitiation send. Aborting transaction"), _ => error!(
LOGGER,
"Communication with receiver failed on SenderInitiation send. Aborting transaction"
),
} }
rollback_wallet()?; rollback_wallet()?;
return Err(e); return Err(e);
@ -133,11 +149,12 @@ pub fn issue_send_tx(
/* -Sender receives xR * G, kR * G, sR /* -Sender receives xR * G, kR * G, sR
* -Sender computes Schnorr challenge e = H(M | kR * G + kS * G) * -Sender computes Schnorr challenge e = H(M | kR * G + kS * G)
* -Sender verifies receivers sig, by verifying that kR * G + e * xR * G = sR * G· * -Sender verifies receivers sig, by verifying that kR * G + e * xR * G =
* -Sender computes their part of signature, sS = kS + e * xS * sR * G· -Sender computes their part of signature, sS = kS + e * xS
* -Sender posts sS to receiver * -Sender posts sS to receiver
*/ */
let (_amount, recp_pub_blinding, recp_pub_nonce, kernel_offset, sig, tx) = read_partial_tx(keychain, &res.unwrap())?; let (_amount, recp_pub_blinding, recp_pub_nonce, kernel_offset, sig, tx) =
read_partial_tx(keychain, &res.unwrap())?;
let res = keychain.aggsig_verify_partial_sig( let res = keychain.aggsig_verify_partial_sig(
&tx_id, &tx_id,
&sig.unwrap(), &sig.unwrap(),
@ -151,11 +168,21 @@ pub fn issue_send_tx(
return Err(ErrorKind::Signature("Partial Sig from recipient invalid."))?; return Err(ErrorKind::Signature("Partial Sig from recipient invalid."))?;
} }
let sig_part = keychain.aggsig_calculate_partial_sig(&tx_id, &recp_pub_nonce, tx.fee(), tx.lock_height()).unwrap(); let sig_part = keychain
.aggsig_calculate_partial_sig(&tx_id, &recp_pub_nonce, tx.fee(), tx.lock_height())
.unwrap();
// Build the next stage, containing sS (and our pubkeys again, for the recipient's convenience) // Build the next stage, containing sS (and our pubkeys again, for the
// offset has not been modified during tx building, so pass it back in // recipient's convenience) offset has not been modified during tx building,
let mut partial_tx = build_partial_tx(&tx_id, keychain, amount_with_fee, kernel_offset, Some(sig_part), tx); // so pass it back in
let mut partial_tx = build_partial_tx(
&tx_id,
keychain,
amount_with_fee,
kernel_offset,
Some(sig_part),
tx,
);
partial_tx.phase = PartialTxPhase::SenderConfirmation; partial_tx.phase = PartialTxPhase::SenderConfirmation;
// And send again // And send again
@ -192,7 +219,16 @@ fn build_send_tx(
lock_height: u64, lock_height: u64,
max_outputs: usize, max_outputs: usize,
selection_strategy_is_use_all: bool, selection_strategy_is_use_all: bool,
) -> Result<(Transaction, BlindingFactor, Vec<OutputData>, Identifier, u64), Error> { ) -> Result<
(
Transaction,
BlindingFactor,
Vec<OutputData>,
Identifier,
u64,
),
Error,
> {
let key_id = keychain.clone().root_key_id(); let key_id = keychain.clone().root_key_id();
// select some spendable coins from the wallet // select some spendable coins from the wallet
@ -208,14 +244,14 @@ fn build_send_tx(
})?; })?;
// Get the maximum number of outputs in the wallet // Get the maximum number of outputs in the wallet
let max_outputs = WalletData::read_wallet(&config.data_file_dir, |wallet_data| { let max_outputs = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
Ok(wallet_data.select_coins( Ok(wallet_data.select_coins(
key_id.clone(), key_id.clone(),
amount, amount,
current_height, current_height,
minimum_confirmations, minimum_confirmations,
max_outputs, max_outputs,
true, true,
)) ))
})?.len(); })?.len();
@ -226,8 +262,8 @@ fn build_send_tx(
let mut total: u64 = coins.iter().map(|c| c.value).sum(); let mut total: u64 = coins.iter().map(|c| c.value).sum();
let mut amount_with_fee = amount + fee; let mut amount_with_fee = amount + fee;
// Here check if we have enough outputs for the amount including fee otherwise look for other // Here check if we have enough outputs for the amount including fee otherwise
// outputs and check again // look for other outputs and check again
while total <= amount_with_fee { while total <= amount_with_fee {
// End the loop if we have selected all the outputs and still not enough funds // End the loop if we have selected all the outputs and still not enough funds
if coins.len() == max_outputs { if coins.len() == max_outputs {
@ -324,12 +360,15 @@ fn inputs_and_change(
parts.push(build::with_fee(fee)); parts.push(build::with_fee(fee));
// if we are spending 10,000 coins to send 1,000 then our change will be 9,000 // if we are spending 10,000 coins to send 1,000 then our change will be 9,000
// if the fee is 80 then the recipient will receive 1000 and our change will be 8,920 // if the fee is 80 then the recipient will receive 1000 and our change will be
// 8,920
let change = total - amount - fee; let change = total - amount - fee;
// build inputs using the appropriate derived key_ids // build inputs using the appropriate derived key_ids
for coin in coins { for coin in coins {
let key_id = keychain.derive_key_id(coin.n_child).context(ErrorKind::Keychain)?; let key_id = keychain
.derive_key_id(coin.n_child)
.context(ErrorKind::Keychain)?;
if coin.is_coinbase { if coin.is_coinbase {
let block = coin.block.clone(); let block = coin.block.clone();
let merkle_proof = coin.merkle_proof.clone(); let merkle_proof = coin.merkle_proof.clone();
@ -378,7 +417,6 @@ mod test {
use core::core::build; use core::core::build;
use keychain::Keychain; use keychain::Keychain;
#[test] #[test]
// demonstrate that input.commitment == referenced output.commitment // demonstrate that input.commitment == referenced output.commitment
// based on the public key and amount begin spent // based on the public key and amount begin spent

View file

@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use api::ApiServer; use api::ApiServer;
use keychain::Keychain; use keychain::Keychain;
use handlers::CoinbaseHandler; use handlers::CoinbaseHandler;

View file

@ -14,7 +14,7 @@
use blake2; use blake2;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use std::{fmt}; use std::fmt;
use std::fmt::Display; use std::fmt::Display;
use uuid::Uuid; use uuid::Uuid;
use std::convert::From; use std::convert::From;
@ -68,103 +68,100 @@ pub fn tx_fee(input_len: usize, output_len: usize, base_fee: Option<u64>) -> u64
#[derive(Debug)] #[derive(Debug)]
pub struct Error { pub struct Error {
inner: Context<ErrorKind>, inner: Context<ErrorKind>,
} }
/// Wallet errors, mostly wrappers around underlying crypto or I/O errors. /// Wallet errors, mostly wrappers around underlying crypto or I/O errors.
#[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)] #[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)]
pub enum ErrorKind { pub enum ErrorKind {
#[fail(display = "Not enough funds")] #[fail(display = "Not enough funds")] NotEnoughFunds(u64),
NotEnoughFunds(u64),
#[fail(display = "Fee dispute: sender fee {}, recipient fee {}", sender_fee, recipient_fee)] #[fail(display = "Fee dispute: sender fee {}, recipient fee {}", sender_fee, recipient_fee)]
FeeDispute{sender_fee: u64, recipient_fee: u64}, FeeDispute {
sender_fee: u64,
recipient_fee: u64,
},
#[fail(display = "Fee exceeds amount: sender amount {}, recipient fee {}", sender_amount, recipient_fee)] #[fail(display = "Fee exceeds amount: sender amount {}, recipient fee {}", sender_amount,
FeeExceedsAmount{sender_amount: u64,recipient_fee: u64}, recipient_fee)]
FeeExceedsAmount {
sender_amount: u64,
recipient_fee: u64,
},
#[fail(display = "Keychain error")] #[fail(display = "Keychain error")] Keychain,
Keychain,
#[fail(display = "Transaction error")] #[fail(display = "Transaction error")] Transaction,
Transaction,
#[fail(display = "Secp error")] #[fail(display = "Secp error")] Secp,
Secp,
#[fail(display = "Wallet data error: {}", _0)] #[fail(display = "Wallet data error: {}", _0)] WalletData(&'static str),
WalletData(&'static str),
/// An error in the format of the JSON structures exchanged by the wallet /// An error in the format of the JSON structures exchanged by the wallet
#[fail(display = "JSON format error")] #[fail(display = "JSON format error")]
Format, Format,
#[fail(display = "I/O error")] IO,
#[fail(display = "I/O error")] /// Error when contacting a node through its API
IO, #[fail(display = "Node API error")]
Node,
/// Error when contacting a node through its API /// Error originating from hyper.
#[fail(display = "Node API error")] #[fail(display = "Hyper error")]
Node, Hyper,
/// Error originating from hyper. /// Error originating from hyper uri parsing.
#[fail(display = "Hyper error")] #[fail(display = "Uri parsing error")]
Hyper, Uri,
/// Error originating from hyper uri parsing. #[fail(display = "Signature error")] Signature(&'static str),
#[fail(display = "Uri parsing error")]
Uri,
#[fail(display = "Signature error")]
Signature(&'static str),
/// Attempt to use duplicate transaction id in separate transactions /// Attempt to use duplicate transaction id in separate transactions
#[fail(display = "Duplicate transaction ID error")] #[fail(display = "Duplicate transaction ID error")]
DuplicateTransactionId, DuplicateTransactionId,
/// Wallet seed already exists /// Wallet seed already exists
#[fail(display = "Wallet seed exists error")] #[fail(display = "Wallet seed exists error")]
WalletSeedExists, WalletSeedExists,
#[fail(display = "Generic error: {}", _0)] #[fail(display = "Generic error: {}", _0)] GenericError(&'static str),
GenericError(&'static str),
} }
impl Fail for Error { impl Fail for Error {
fn cause(&self) -> Option<&Fail> { fn cause(&self) -> Option<&Fail> {
self.inner.cause() self.inner.cause()
} }
fn backtrace(&self) -> Option<&Backtrace> { fn backtrace(&self) -> Option<&Backtrace> {
self.inner.backtrace() self.inner.backtrace()
} }
} }
impl Display for Error { impl Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
Display::fmt(&self.inner, f) Display::fmt(&self.inner, f)
} }
} }
impl Error { impl Error {
pub fn kind(&self) -> ErrorKind { pub fn kind(&self) -> ErrorKind {
*self.inner.get_context() *self.inner.get_context()
} }
} }
impl From<ErrorKind> for Error { impl From<ErrorKind> for Error {
fn from(kind: ErrorKind) -> Error { fn from(kind: ErrorKind) -> Error {
Error { Error {
inner: Context::new(kind), inner: Context::new(kind),
} }
} }
} }
impl From<Context<ErrorKind>> for Error { impl From<Context<ErrorKind>> for Error {
fn from(inner: Context<ErrorKind>) -> Error { fn from(inner: Context<ErrorKind>) -> Error {
Error { inner: inner } Error { inner: inner }
} }
} }
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -271,7 +268,6 @@ impl<'de> serde::de::Visitor<'de> for MerkleProofWrapperVisitor {
} }
} }
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)] #[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
pub struct BlockIdentifier(Hash); pub struct BlockIdentifier(Hash);
@ -371,7 +367,8 @@ impl OutputData {
} }
} }
/// Check if output is eligible to spend based on state and height and confirmations /// Check if output is eligible to spend based on state and height and
/// confirmations
pub fn eligible_to_spend(&self, current_height: u64, minimum_confirmations: u64) -> bool { pub fn eligible_to_spend(&self, current_height: u64, minimum_confirmations: u64) -> bool {
if [OutputStatus::Spent, OutputStatus::Locked].contains(&self.status) { if [OutputStatus::Spent, OutputStatus::Locked].contains(&self.status) {
return false; return false;
@ -404,7 +401,8 @@ impl WalletSeed {
} }
fn from_hex(hex: &str) -> Result<WalletSeed, Error> { fn from_hex(hex: &str) -> Result<WalletSeed, Error> {
let bytes = util::from_hex(hex.to_string()).context(ErrorKind::GenericError("Invalid hex"))?; let bytes =
util::from_hex(hex.to_string()).context(ErrorKind::GenericError("Invalid hex"))?;
Ok(WalletSeed::from_bytes(&bytes)) Ok(WalletSeed::from_bytes(&bytes))
} }
@ -429,9 +427,7 @@ impl WalletSeed {
let seed_file_path = &format!( let seed_file_path = &format!(
"{}{}{}", "{}{}{}",
wallet_config.data_file_dir, wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE,
MAIN_SEPARATOR,
SEED_FILE,
); );
debug!(LOGGER, "Generating wallet seed file at: {}", seed_file_path,); debug!(LOGGER, "Generating wallet seed file at: {}", seed_file_path,);
@ -441,7 +437,8 @@ impl WalletSeed {
} else { } else {
let seed = WalletSeed::init_new(); let seed = WalletSeed::init_new();
let mut file = File::create(seed_file_path).context(ErrorKind::IO)?; let mut file = File::create(seed_file_path).context(ErrorKind::IO)?;
file.write_all(&seed.to_hex().as_bytes()).context(ErrorKind::IO)?; file.write_all(&seed.to_hex().as_bytes())
.context(ErrorKind::IO)?;
Ok(seed) Ok(seed)
} }
} }
@ -452,9 +449,7 @@ impl WalletSeed {
let seed_file_path = &format!( let seed_file_path = &format!(
"{}{}{}", "{}{}{}",
wallet_config.data_file_dir, wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE,
MAIN_SEPARATOR,
SEED_FILE,
); );
debug!(LOGGER, "Using wallet seed file at: {}", seed_file_path,); debug!(LOGGER, "Using wallet seed file at: {}", seed_file_path,);
@ -539,7 +534,10 @@ impl WalletData {
LOGGER, LOGGER,
"Failed to acquire wallet lock file (multiple retries)", "Failed to acquire wallet lock file (multiple retries)",
); );
return Err(e.context(ErrorKind::WalletData("Failed to acquire lock file")).into()); return Err(
e.context(ErrorKind::WalletData("Failed to acquire lock file"))
.into(),
);
} }
} }
@ -549,7 +547,9 @@ impl WalletData {
wdat.write(data_file_path)?; wdat.write(data_file_path)?;
// delete the lock file // delete the lock file
fs::remove_file(lock_file_path).context(ErrorKind::WalletData("Could not remove wallet lock file. Maybe insufficient rights?"))?; fs::remove_file(lock_file_path).context(ErrorKind::WalletData(
"Could not remove wallet lock file. Maybe insufficient rights?",
))?;
info!(LOGGER, "... released wallet lock"); info!(LOGGER, "... released wallet lock");
@ -570,10 +570,12 @@ impl WalletData {
/// Read output_data vec from disk. /// Read output_data vec from disk.
fn read_outputs(data_file_path: &str) -> Result<Vec<OutputData>, Error> { fn read_outputs(data_file_path: &str) -> Result<Vec<OutputData>, Error> {
let data_file = File::open(data_file_path).context(ErrorKind::WalletData(&"Could not open wallet file"))?; let data_file = File::open(data_file_path)
serde_json::from_reader(data_file).map_err(|e| { e.context(ErrorKind::WalletData(&"Error reading wallet file ")).into()}) .context(ErrorKind::WalletData(&"Could not open wallet file"))?;
serde_json::from_reader(data_file).map_err(|e| {
e.context(ErrorKind::WalletData(&"Error reading wallet file "))
.into()
})
} }
/// Populate wallet_data with output_data from disk. /// Populate wallet_data with output_data from disk.
@ -590,14 +592,16 @@ impl WalletData {
/// Write the wallet data to disk. /// Write the wallet data to disk.
fn write(&self, data_file_path: &str) -> Result<(), Error> { fn write(&self, data_file_path: &str) -> Result<(), Error> {
let mut data_file = File::create(data_file_path).map_err(|e| { let mut data_file = File::create(data_file_path)
e.context(ErrorKind::WalletData(&"Could not create "))})?; .map_err(|e| e.context(ErrorKind::WalletData(&"Could not create ")))?;
let mut outputs = self.outputs.values().collect::<Vec<_>>(); let mut outputs = self.outputs.values().collect::<Vec<_>>();
outputs.sort(); outputs.sort();
let res_json = serde_json::to_vec_pretty(&outputs).map_err(|e| { let res_json = serde_json::to_vec_pretty(&outputs)
e.context(ErrorKind::WalletData("Error serializing wallet data")) .map_err(|e| e.context(ErrorKind::WalletData("Error serializing wallet data")))?;
})?; data_file
data_file.write_all(res_json.as_slice()).context(ErrorKind::WalletData(&"Error writing wallet file")).map_err(|e| e.into()) .write_all(res_json.as_slice())
.context(ErrorKind::WalletData(&"Error writing wallet file"))
.map_err(|e| e.into())
} }
/// Append a new output data to the wallet data. /// Append a new output data to the wallet data.
@ -654,12 +658,12 @@ impl WalletData {
// use a sliding window to identify potential sets of possible outputs to spend // use a sliding window to identify potential sets of possible outputs to spend
// Case of amount > total amount of max_outputs(500): // Case of amount > total amount of max_outputs(500):
// The limit exists because by default, we always select as many inputs as possible in a transaction, // The limit exists because by default, we always select as many inputs as
// to reduce both the UTXO set and the fees. // possible in a transaction, to reduce both the UTXO set and the fees.
// But that only makes sense up to a point, hence the limit to avoid being too greedy. // But that only makes sense up to a point, hence the limit to avoid being too
// But if max_outputs(500) is actually not enought to cover the whole amount, // greedy. But if max_outputs(500) is actually not enought to cover the whole
// the wallet should allow going over it to satisfy what the user wants to send. // amount, the wallet should allow going over it to satisfy what the user
// So the wallet considers max_outputs more of a soft limit. // wants to send. So the wallet considers max_outputs more of a soft limit.
if eligible.len() > max_outputs { if eligible.len() > max_outputs {
for window in eligible.windows(max_outputs) { for window in eligible.windows(max_outputs) {
let windowed_eligibles = window.iter().cloned().collect::<Vec<_>>(); let windowed_eligibles = window.iter().cloned().collect::<Vec<_>>();
@ -668,9 +672,14 @@ impl WalletData {
} }
} }
// Not exist in any window of which total amount >= amount. // Not exist in any window of which total amount >= amount.
// Then take coins from the smallest one up to the total amount of selected coins = the amount. // Then take coins from the smallest one up to the total amount of selected
// coins = the amount.
if let Some(outputs) = self.select_from(amount, false, eligible.clone()) { if let Some(outputs) = self.select_from(amount, false, eligible.clone()) {
debug!(LOGGER, "Extending maximum number of outputs. {} outputs selected.", outputs.len()); debug!(
LOGGER,
"Extending maximum number of outputs. {} outputs selected.",
outputs.len()
);
return outputs; return outputs;
} }
} else { } else {
@ -680,7 +689,8 @@ impl WalletData {
} }
// we failed to find a suitable set of outputs to spend, // we failed to find a suitable set of outputs to spend,
// so return the largest amount we can so we can provide guidance on what is possible // so return the largest amount we can so we can provide guidance on what is
// possible
eligible.reverse(); eligible.reverse();
eligible.iter().take(max_outputs).cloned().collect() eligible.iter().take(max_outputs).cloned().collect()
} }
@ -700,14 +710,15 @@ impl WalletData {
} else { } else {
let mut selected_amount = 0; let mut selected_amount = 0;
return Some( return Some(
outputs.iter() outputs
.iter()
.take_while(|out| { .take_while(|out| {
let res = selected_amount < amount; let res = selected_amount < amount;
selected_amount += out.value; selected_amount += out.value;
res res
}) })
.cloned() .cloned()
.collect() .collect(),
); );
} }
} else { } else {
@ -733,14 +744,14 @@ pub enum PartialTxPhase {
SenderInitiation, SenderInitiation,
ReceiverInitiation, ReceiverInitiation,
SenderConfirmation, SenderConfirmation,
ReceiverConfirmation ReceiverConfirmation,
} }
/// Helper in serializing the information required during an interactive aggsig /// Helper in serializing the information required during an interactive aggsig
/// transaction /// transaction
#[derive(Serialize, Deserialize, Debug, Clone)] #[derive(Serialize, Deserialize, Debug, Clone)]
pub struct PartialTx { pub struct PartialTx {
pub phase: PartialTxPhase, pub phase: PartialTxPhase,
pub id: Uuid, pub id: Uuid,
pub amount: u64, pub amount: u64,
pub public_blind_excess: String, pub public_blind_excess: String,
@ -754,14 +765,13 @@ pub struct PartialTx {
/// aggsig_tx_context should contain the private key/nonce pair /// aggsig_tx_context should contain the private key/nonce pair
/// the resulting partial tx will contain the corresponding public keys /// the resulting partial tx will contain the corresponding public keys
pub fn build_partial_tx( pub fn build_partial_tx(
transaction_id : &Uuid, transaction_id: &Uuid,
keychain: &keychain::Keychain, keychain: &keychain::Keychain,
receive_amount: u64, receive_amount: u64,
kernel_offset: BlindingFactor, kernel_offset: BlindingFactor,
part_sig: Option<secp::Signature>, part_sig: Option<secp::Signature>,
tx: Transaction, tx: Transaction,
) -> PartialTx { ) -> PartialTx {
let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(transaction_id); let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(transaction_id);
let mut pub_excess = pub_excess.serialize_vec(keychain.secp(), true).clone(); let mut pub_excess = pub_excess.serialize_vec(keychain.secp(), true).clone();
let len = pub_excess.clone().len(); let len = pub_excess.clone().len();
@ -773,7 +783,7 @@ pub fn build_partial_tx(
PartialTx { PartialTx {
phase: PartialTxPhase::SenderInitiation, phase: PartialTxPhase::SenderInitiation,
id : transaction_id.clone(), id: transaction_id.clone(),
amount: receive_amount, amount: receive_amount,
public_blind_excess: util::to_hex(pub_excess), public_blind_excess: util::to_hex(pub_excess),
public_nonce: util::to_hex(pub_nonce), public_nonce: util::to_hex(pub_nonce),
@ -791,23 +801,43 @@ pub fn build_partial_tx(
pub fn read_partial_tx( pub fn read_partial_tx(
keychain: &keychain::Keychain, keychain: &keychain::Keychain,
partial_tx: &PartialTx, partial_tx: &PartialTx,
) -> Result<(u64, PublicKey, PublicKey, BlindingFactor, Option<Signature>, Transaction), Error> { ) -> Result<
let blind_bin = util::from_hex(partial_tx.public_blind_excess.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?; (
let blinding = PublicKey::from_slice(keychain.secp(), &blind_bin[..]).context(ErrorKind::GenericError("Could not construct public key"))?; u64,
PublicKey,
PublicKey,
BlindingFactor,
Option<Signature>,
Transaction,
),
Error,
> {
let blind_bin = util::from_hex(partial_tx.public_blind_excess.clone())
.context(ErrorKind::GenericError("Could not decode HEX"))?;
let blinding = PublicKey::from_slice(keychain.secp(), &blind_bin[..])
.context(ErrorKind::GenericError("Could not construct public key"))?;
let nonce_bin = util::from_hex(partial_tx.public_nonce.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?; let nonce_bin = util::from_hex(partial_tx.public_nonce.clone())
let nonce = PublicKey::from_slice(keychain.secp(), &nonce_bin[..]).context(ErrorKind::GenericError("Could not construct public key"))?; .context(ErrorKind::GenericError("Could not decode HEX"))?;
let nonce = PublicKey::from_slice(keychain.secp(), &nonce_bin[..])
.context(ErrorKind::GenericError("Could not construct public key"))?;
let kernel_offset = BlindingFactor::from_hex(&partial_tx.kernel_offset.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?; let kernel_offset = BlindingFactor::from_hex(&partial_tx.kernel_offset.clone())
.context(ErrorKind::GenericError("Could not decode HEX"))?;
let sig_bin = util::from_hex(partial_tx.part_sig.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?; let sig_bin = util::from_hex(partial_tx.part_sig.clone())
.context(ErrorKind::GenericError("Could not decode HEX"))?;
let sig = match sig_bin.len() { let sig = match sig_bin.len() {
1 => None, 1 => None,
_ => Some(Signature::from_der(keychain.secp(), &sig_bin[..]).context(ErrorKind::GenericError("Could not create signature"))?), _ => Some(Signature::from_der(keychain.secp(), &sig_bin[..])
.context(ErrorKind::GenericError("Could not create signature"))?),
}; };
let tx_bin = util::from_hex(partial_tx.tx.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?; let tx_bin = util::from_hex(partial_tx.tx.clone())
let tx = ser::deserialize(&mut &tx_bin[..]).context(ErrorKind::GenericError("Could not deserialize transaction, invalid format."))?; .context(ErrorKind::GenericError("Could not decode HEX"))?;
Ok((partial_tx.amount, blinding, nonce, kernel_offset, sig, tx)) let tx = ser::deserialize(&mut &tx_bin[..]).context(ErrorKind::GenericError(
"Could not deserialize transaction, invalid format.",
))?;
Ok((partial_tx.amount, blinding, nonce, kernel_offset, sig, tx))
} }
/// Amount in request to build a coinbase output. /// Amount in request to build a coinbase output.