mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
rustfmt all the things
This commit is contained in:
parent
19565aea3d
commit
be8d9633e4
83 changed files with 2981 additions and 2607 deletions
|
@ -32,9 +32,8 @@ where
|
|||
{
|
||||
let client = hyper::Client::new();
|
||||
let res = check_error(client.get(url).send())?;
|
||||
serde_json::from_reader(res).map_err(|e| {
|
||||
Error::Internal(format!("Server returned invalid JSON: {}", e))
|
||||
})
|
||||
serde_json::from_reader(res)
|
||||
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e)))
|
||||
}
|
||||
|
||||
/// Helper function to easily issue a HTTP POST request with the provided JSON
|
||||
|
@ -45,9 +44,8 @@ pub fn post<'a, IN>(url: &'a str, input: &IN) -> Result<(), Error>
|
|||
where
|
||||
IN: Serialize,
|
||||
{
|
||||
let in_json = serde_json::to_string(input).map_err(|e| {
|
||||
Error::Internal(format!("Could not serialize data to JSON: {}", e))
|
||||
})?;
|
||||
let in_json = serde_json::to_string(input)
|
||||
.map_err(|e| Error::Internal(format!("Could not serialize data to JSON: {}", e)))?;
|
||||
let client = hyper::Client::new();
|
||||
let _res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?;
|
||||
Ok(())
|
||||
|
@ -61,13 +59,17 @@ fn check_error(res: hyper::Result<Response>) -> Result<Response, Error> {
|
|||
let mut response = res.unwrap();
|
||||
match response.status.class() {
|
||||
StatusClass::Success => Ok(response),
|
||||
StatusClass::ServerError => {
|
||||
Err(Error::Internal(format!("Server error: {}", err_msg(&mut response))))
|
||||
}
|
||||
StatusClass::ServerError => Err(Error::Internal(format!(
|
||||
"Server error: {}",
|
||||
err_msg(&mut response)
|
||||
))),
|
||||
StatusClass::ClientError => if response.status == StatusCode::NotFound {
|
||||
Err(Error::NotFound)
|
||||
} else {
|
||||
Err(Error::Argument(format!("Argument error: {}", err_msg(&mut response))))
|
||||
Err(Error::Argument(format!(
|
||||
"Argument error: {}",
|
||||
err_msg(&mut response)
|
||||
)))
|
||||
},
|
||||
_ => Err(Error::Internal(format!("Unrecognized error."))),
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
use std::io::Read;
|
||||
use std::sync::{Arc, Weak, RwLock};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
use std::thread;
|
||||
|
||||
use iron::prelude::*;
|
||||
|
@ -24,7 +24,7 @@ use serde::Serialize;
|
|||
use serde_json;
|
||||
|
||||
use chain;
|
||||
use core::core::{OutputIdentifier, Transaction, OutputFeatures};
|
||||
use core::core::{OutputFeatures, OutputIdentifier, Transaction};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::ser;
|
||||
use pool;
|
||||
|
@ -43,7 +43,6 @@ fn w<T>(weak: &Weak<T>) -> Arc<T> {
|
|||
weak.upgrade().unwrap()
|
||||
}
|
||||
|
||||
|
||||
// RESTful index of available api endpoints
|
||||
// GET /v1/
|
||||
struct IndexHandler {
|
||||
|
@ -74,15 +73,16 @@ impl UtxoHandler {
|
|||
|
||||
// We need the features here to be able to generate the necessary hash
|
||||
// to compare against the hash in the output MMR.
|
||||
// For now we can just try both (but this probably needs to be part of the api params)
|
||||
// For now we can just try both (but this probably needs to be part of the api
|
||||
// params)
|
||||
let outputs = [
|
||||
OutputIdentifier::new(OutputFeatures::DEFAULT_OUTPUT, &commit),
|
||||
OutputIdentifier::new(OutputFeatures::COINBASE_OUTPUT, &commit)
|
||||
OutputIdentifier::new(OutputFeatures::COINBASE_OUTPUT, &commit),
|
||||
];
|
||||
|
||||
for x in outputs.iter() {
|
||||
if let Ok(_) = w(&self.chain).is_unspent(&x) {
|
||||
return Ok(Utxo::new(&commit))
|
||||
return Ok(Utxo::new(&commit));
|
||||
}
|
||||
}
|
||||
Err(Error::NotFound)
|
||||
|
@ -117,16 +117,12 @@ impl UtxoHandler {
|
|||
commitments: Vec<Commitment>,
|
||||
include_proof: bool,
|
||||
) -> BlockOutputs {
|
||||
let header = w(&self.chain)
|
||||
.get_header_by_height(block_height)
|
||||
.unwrap();
|
||||
let header = w(&self.chain).get_header_by_height(block_height).unwrap();
|
||||
let block = w(&self.chain).get_block(&header.hash()).unwrap();
|
||||
let outputs = block
|
||||
.outputs
|
||||
.iter()
|
||||
.filter(|output| {
|
||||
commitments.is_empty() || commitments.contains(&output.commit)
|
||||
})
|
||||
.filter(|output| commitments.is_empty() || commitments.contains(&output.commit))
|
||||
.map(|output| {
|
||||
OutputPrintable::from_output(output, w(&self.chain), &block, include_proof)
|
||||
})
|
||||
|
@ -406,11 +402,7 @@ pub struct BlockHandler {
|
|||
impl BlockHandler {
|
||||
fn get_block(&self, h: &Hash) -> Result<BlockPrintable, Error> {
|
||||
let block = w(&self.chain).get_block(h).map_err(|_| Error::NotFound)?;
|
||||
Ok(BlockPrintable::from_block(
|
||||
&block,
|
||||
w(&self.chain),
|
||||
false,
|
||||
))
|
||||
Ok(BlockPrintable::from_block(&block, w(&self.chain), false))
|
||||
}
|
||||
|
||||
fn get_compact_block(&self, h: &Hash) -> Result<CompactBlockPrintable, Error> {
|
||||
|
|
|
@ -14,15 +14,15 @@
|
|||
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_pool as pool;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_pool as pool;
|
||||
extern crate grin_store as store;
|
||||
extern crate grin_util as util;
|
||||
|
||||
extern crate hyper;
|
||||
extern crate iron;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate iron;
|
||||
extern crate mount;
|
||||
extern crate regex;
|
||||
#[macro_use]
|
||||
|
|
145
api/src/types.rs
145
api/src/types.rs
|
@ -163,7 +163,9 @@ pub struct Utxo {
|
|||
|
||||
impl Utxo {
|
||||
pub fn new(commit: &pedersen::Commitment) -> Utxo {
|
||||
Utxo { commit: PrintableCommitment(commit.clone()) }
|
||||
Utxo {
|
||||
commit: PrintableCommitment(commit.clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,15 +184,19 @@ impl PrintableCommitment {
|
|||
}
|
||||
|
||||
impl serde::ser::Serialize for PrintableCommitment {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where
|
||||
S: serde::ser::Serializer {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::ser::Serializer,
|
||||
{
|
||||
serializer.serialize_str(&util::to_hex(self.to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::de::Deserialize<'de> for PrintableCommitment {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where
|
||||
D: serde::de::Deserializer<'de> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
deserializer.deserialize_str(PrintableCommitmentVisitor)
|
||||
}
|
||||
}
|
||||
|
@ -204,9 +210,13 @@ impl<'de> serde::de::Visitor<'de> for PrintableCommitmentVisitor {
|
|||
formatter.write_str("a Pedersen commitment")
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E> where
|
||||
E: serde::de::Error, {
|
||||
Ok(PrintableCommitment(pedersen::Commitment::from_vec(util::from_hex(String::from(v)).unwrap())))
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(PrintableCommitment(pedersen::Commitment::from_vec(
|
||||
util::from_hex(String::from(v)).unwrap(),
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -237,12 +247,14 @@ impl OutputPrintable {
|
|||
block: &core::Block,
|
||||
include_proof: bool,
|
||||
) -> OutputPrintable {
|
||||
let output_type =
|
||||
if output.features.contains(core::transaction::OutputFeatures::COINBASE_OUTPUT) {
|
||||
OutputType::Coinbase
|
||||
} else {
|
||||
OutputType::Transaction
|
||||
};
|
||||
let output_type = if output
|
||||
.features
|
||||
.contains(core::transaction::OutputFeatures::COINBASE_OUTPUT)
|
||||
{
|
||||
OutputType::Coinbase
|
||||
} else {
|
||||
OutputType::Transaction
|
||||
};
|
||||
|
||||
let out_id = core::OutputIdentifier::from_output(&output);
|
||||
let spent = chain.is_unspent(&out_id).is_err();
|
||||
|
@ -253,13 +265,14 @@ impl OutputPrintable {
|
|||
None
|
||||
};
|
||||
|
||||
// Get the Merkle proof for all unspent coinbase outputs (to verify maturity on spend).
|
||||
// We obtain the Merkle proof by rewinding the PMMR.
|
||||
// We require the rewind() to be stable even after the PMMR is pruned and compacted
|
||||
// so we can still recreate the necessary proof.
|
||||
// Get the Merkle proof for all unspent coinbase outputs (to verify maturity on
|
||||
// spend). We obtain the Merkle proof by rewinding the PMMR.
|
||||
// We require the rewind() to be stable even after the PMMR is pruned and
|
||||
// compacted so we can still recreate the necessary proof.
|
||||
let mut merkle_proof = None;
|
||||
if output.features.contains(core::transaction::OutputFeatures::COINBASE_OUTPUT)
|
||||
&& !spent
|
||||
if output
|
||||
.features
|
||||
.contains(core::transaction::OutputFeatures::COINBASE_OUTPUT) && !spent
|
||||
{
|
||||
merkle_proof = chain.get_merkle_proof(&out_id, &block).ok()
|
||||
};
|
||||
|
@ -285,13 +298,17 @@ impl OutputPrintable {
|
|||
}
|
||||
|
||||
pub fn range_proof(&self) -> Result<pedersen::RangeProof, ser::Error> {
|
||||
self.proof.clone().ok_or_else(|| ser::Error::HexError(format!("output range_proof missing")))
|
||||
self.proof
|
||||
.clone()
|
||||
.ok_or_else(|| ser::Error::HexError(format!("output range_proof missing")))
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::ser::Serialize for OutputPrintable {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> where
|
||||
S: serde::ser::Serializer {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::ser::Serializer,
|
||||
{
|
||||
let mut state = serializer.serialize_struct("OutputPrintable", 7)?;
|
||||
state.serialize_field("output_type", &self.output_type)?;
|
||||
state.serialize_field("commit", &util::to_hex(self.commit.0.to_vec()))?;
|
||||
|
@ -308,8 +325,10 @@ impl serde::ser::Serialize for OutputPrintable {
|
|||
}
|
||||
|
||||
impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where
|
||||
D: serde::de::Deserializer<'de> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
#[derive(Deserialize)]
|
||||
#[serde(field_identifier, rename_all = "snake_case")]
|
||||
enum Field {
|
||||
|
@ -319,7 +338,7 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
|
|||
Spent,
|
||||
Proof,
|
||||
ProofHash,
|
||||
MerkleProof
|
||||
MerkleProof,
|
||||
}
|
||||
|
||||
struct OutputPrintableVisitor;
|
||||
|
@ -331,8 +350,10 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
|
|||
formatter.write_str("a print able Output")
|
||||
}
|
||||
|
||||
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where
|
||||
A: MapAccess<'de>, {
|
||||
fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
|
||||
where
|
||||
A: MapAccess<'de>,
|
||||
{
|
||||
let mut output_type = None;
|
||||
let mut commit = None;
|
||||
let mut switch_commit_hash = None;
|
||||
|
@ -346,15 +367,15 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
|
|||
Field::OutputType => {
|
||||
no_dup!(output_type);
|
||||
output_type = Some(map.next_value()?)
|
||||
},
|
||||
}
|
||||
Field::Commit => {
|
||||
no_dup!(commit);
|
||||
|
||||
let val: String = map.next_value()?;
|
||||
let vec = util::from_hex(val.clone())
|
||||
.map_err(serde::de::Error::custom)?;
|
||||
let vec =
|
||||
util::from_hex(val.clone()).map_err(serde::de::Error::custom)?;
|
||||
commit = Some(pedersen::Commitment::from_vec(vec));
|
||||
},
|
||||
}
|
||||
Field::SwitchCommitHash => {
|
||||
no_dup!(switch_commit_hash);
|
||||
|
||||
|
@ -362,11 +383,11 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
|
|||
let hash = core::SwitchCommitHash::from_hex(&val.clone())
|
||||
.map_err(serde::de::Error::custom)?;
|
||||
switch_commit_hash = Some(hash)
|
||||
},
|
||||
}
|
||||
Field::Spent => {
|
||||
no_dup!(spent);
|
||||
spent = Some(map.next_value()?)
|
||||
},
|
||||
}
|
||||
Field::Proof => {
|
||||
no_dup!(proof);
|
||||
|
||||
|
@ -380,13 +401,16 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
|
|||
bytes[i] = vec[i];
|
||||
}
|
||||
|
||||
proof = Some(pedersen::RangeProof { proof: bytes, plen: vec.len() })
|
||||
proof = Some(pedersen::RangeProof {
|
||||
proof: bytes,
|
||||
plen: vec.len(),
|
||||
})
|
||||
}
|
||||
},
|
||||
}
|
||||
Field::ProofHash => {
|
||||
no_dup!(proof_hash);
|
||||
proof_hash = Some(map.next_value()?)
|
||||
},
|
||||
}
|
||||
Field::MerkleProof => {
|
||||
no_dup!(merkle_proof);
|
||||
if let Some(hex) = map.next_value::<Option<String>>()? {
|
||||
|
@ -412,7 +436,14 @@ impl<'de> serde::de::Deserialize<'de> for OutputPrintable {
|
|||
}
|
||||
}
|
||||
|
||||
const FIELDS: &'static [&'static str] = &["output_type", "commit", "switch_commit_hash", "spent", "proof", "proof_hash"];
|
||||
const FIELDS: &'static [&'static str] = &[
|
||||
"output_type",
|
||||
"commit",
|
||||
"switch_commit_hash",
|
||||
"spent",
|
||||
"proof",
|
||||
"proof_hash",
|
||||
];
|
||||
deserializer.deserialize_struct("OutputPrintable", FIELDS, OutputPrintableVisitor)
|
||||
}
|
||||
}
|
||||
|
@ -523,14 +554,17 @@ impl BlockPrintable {
|
|||
chain: Arc<chain::Chain>,
|
||||
include_proof: bool,
|
||||
) -> BlockPrintable {
|
||||
let inputs = block.inputs
|
||||
let inputs = block
|
||||
.inputs
|
||||
.iter()
|
||||
.map(|x| util::to_hex(x.commitment().0.to_vec()))
|
||||
.collect();
|
||||
let outputs = block
|
||||
.outputs
|
||||
.iter()
|
||||
.map(|output| OutputPrintable::from_output(output, chain.clone(), &block, include_proof))
|
||||
.map(|output| {
|
||||
OutputPrintable::from_output(output, chain.clone(), &block, include_proof)
|
||||
})
|
||||
.collect();
|
||||
let kernels = block
|
||||
.kernels
|
||||
|
@ -559,19 +593,18 @@ pub struct CompactBlockPrintable {
|
|||
}
|
||||
|
||||
impl CompactBlockPrintable {
|
||||
/// Convert a compact block into a printable representation suitable for api response
|
||||
/// Convert a compact block into a printable representation suitable for
|
||||
/// api response
|
||||
pub fn from_compact_block(
|
||||
cb: &core::CompactBlock,
|
||||
chain: Arc<chain::Chain>,
|
||||
) -> CompactBlockPrintable {
|
||||
let block = chain.get_block(&cb.hash()).unwrap();
|
||||
let out_full = cb
|
||||
.out_full
|
||||
let out_full = cb.out_full
|
||||
.iter()
|
||||
.map(|x| OutputPrintable::from_output(x, chain.clone(), &block, false))
|
||||
.collect();
|
||||
let kern_full = cb
|
||||
.kern_full
|
||||
let kern_full = cb.kern_full
|
||||
.iter()
|
||||
.map(|x| TxKernelPrintable::from_txkernel(x))
|
||||
.collect();
|
||||
|
@ -611,15 +644,16 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn serialize_output() {
|
||||
let hex_output = "{\
|
||||
\"output_type\":\"Coinbase\",\
|
||||
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
|
||||
\"switch_commit_hash\":\"85daaf11011dc11e52af84ebe78e2f2d19cbdc76000000000000000000000000\",\
|
||||
\"spent\":false,\
|
||||
\"proof\":null,\
|
||||
\"proof_hash\":\"ed6ba96009b86173bade6a9227ed60422916593fa32dd6d78b25b7a4eeef4946\",\
|
||||
\"merkle_proof\":null\
|
||||
}";
|
||||
let hex_output =
|
||||
"{\
|
||||
\"output_type\":\"Coinbase\",\
|
||||
\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\",\
|
||||
\"switch_commit_hash\":\"85daaf11011dc11e52af84ebe78e2f2d19cbdc76000000000000000000000000\",\
|
||||
\"spent\":false,\
|
||||
\"proof\":null,\
|
||||
\"proof_hash\":\"ed6ba96009b86173bade6a9227ed60422916593fa32dd6d78b25b7a4eeef4946\",\
|
||||
\"merkle_proof\":null\
|
||||
}";
|
||||
let deserialized: OutputPrintable = serde_json::from_str(&hex_output).unwrap();
|
||||
let serialized = serde_json::to_string(&deserialized).unwrap();
|
||||
assert_eq!(serialized, hex_output);
|
||||
|
@ -627,7 +661,8 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn serialize_utxo() {
|
||||
let hex_commit = "{\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\"}";
|
||||
let hex_commit =
|
||||
"{\"commit\":\"083eafae5d61a85ab07b12e1a51b3918d8e6de11fc6cde641d54af53608aa77b9f\"}";
|
||||
let deserialized: Utxo = serde_json::from_str(&hex_commit).unwrap();
|
||||
let serialized = serde_json::to_string(&deserialized).unwrap();
|
||||
assert_eq!(serialized, hex_commit);
|
||||
|
|
|
@ -20,7 +20,8 @@ use std::fs::File;
|
|||
use std::sync::{Arc, Mutex, RwLock};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use core::core::{Block, BlockHeader, Input, OutputFeatures, OutputIdentifier, OutputStoreable, TxKernel};
|
||||
use core::core::{Block, BlockHeader, Input, OutputFeatures, OutputIdentifier, OutputStoreable,
|
||||
TxKernel};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::pmmr::MerkleProof;
|
||||
use core::core::target::Difficulty;
|
||||
|
@ -33,7 +34,6 @@ use types::*;
|
|||
use util::secp::pedersen::RangeProof;
|
||||
use util::LOGGER;
|
||||
|
||||
|
||||
const MAX_ORPHAN_AGE_SECS: u64 = 30;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
|
@ -75,7 +75,9 @@ impl OrphanBlockPool {
|
|||
{
|
||||
let mut orphans = self.orphans.write().unwrap();
|
||||
let mut prev_idx = self.prev_idx.write().unwrap();
|
||||
orphans.retain(|_, ref mut x| x.added.elapsed() < Duration::from_secs(MAX_ORPHAN_AGE_SECS));
|
||||
orphans.retain(|_, ref mut x| {
|
||||
x.added.elapsed() < Duration::from_secs(MAX_ORPHAN_AGE_SECS)
|
||||
});
|
||||
prev_idx.retain(|_, &mut x| orphans.contains_key(&x));
|
||||
}
|
||||
}
|
||||
|
@ -155,9 +157,7 @@ impl Chain {
|
|||
// check if we have a head in store, otherwise the genesis block is it
|
||||
let head = store.head();
|
||||
let sumtree_md = match head {
|
||||
Ok(h) => {
|
||||
Some(store.get_block_pmmr_file_metadata(&h.last_block_h)?)
|
||||
},
|
||||
Ok(h) => Some(store.get_block_pmmr_file_metadata(&h.last_block_h)?),
|
||||
Err(NotFoundErr) => None,
|
||||
Err(e) => return Err(Error::StoreErr(e, "chain init load head".to_owned())),
|
||||
};
|
||||
|
@ -172,9 +172,7 @@ impl Chain {
|
|||
store.save_block(&genesis)?;
|
||||
store.setup_height(&genesis.header, &tip)?;
|
||||
if genesis.kernels.len() > 0 {
|
||||
sumtree::extending(&mut sumtrees, |extension| {
|
||||
extension.apply_block(&genesis)
|
||||
})?;
|
||||
sumtree::extending(&mut sumtrees, |extension| extension.apply_block(&genesis))?;
|
||||
}
|
||||
|
||||
// saving a new tip based on genesis
|
||||
|
@ -211,30 +209,32 @@ impl Chain {
|
|||
|
||||
/// Processes a single block, then checks for orphans, processing
|
||||
/// those as well if they're found
|
||||
pub fn process_block(&self, b: Block, opts: Options)
|
||||
-> Result<(Option<Tip>, Option<Block>), Error>
|
||||
{
|
||||
let res = self.process_block_no_orphans(b, opts);
|
||||
match res {
|
||||
Ok((t, b)) => {
|
||||
// We accepted a block, so see if we can accept any orphans
|
||||
if let Some(ref b) = b {
|
||||
self.check_orphans(b.hash());
|
||||
}
|
||||
Ok((t, b))
|
||||
},
|
||||
Err(e) => {
|
||||
Err(e)
|
||||
pub fn process_block(
|
||||
&self,
|
||||
b: Block,
|
||||
opts: Options,
|
||||
) -> Result<(Option<Tip>, Option<Block>), Error> {
|
||||
let res = self.process_block_no_orphans(b, opts);
|
||||
match res {
|
||||
Ok((t, b)) => {
|
||||
// We accepted a block, so see if we can accept any orphans
|
||||
if let Some(ref b) = b {
|
||||
self.check_orphans(b.hash());
|
||||
}
|
||||
Ok((t, b))
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempt to add a new block to the chain. Returns the new chain tip if it
|
||||
/// has been added to the longest chain, None if it's added to an (as of
|
||||
/// now) orphan chain.
|
||||
pub fn process_block_no_orphans(&self, b: Block, opts: Options)
|
||||
-> Result<(Option<Tip>, Option<Block>), Error>
|
||||
{
|
||||
pub fn process_block_no_orphans(
|
||||
&self,
|
||||
b: Block,
|
||||
opts: Options,
|
||||
) -> Result<(Option<Tip>, Option<Block>), Error> {
|
||||
let head = self.store
|
||||
.head()
|
||||
.map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?;
|
||||
|
@ -258,7 +258,7 @@ impl Chain {
|
|||
adapter.block_accepted(&b, opts);
|
||||
}
|
||||
Ok((Some(tip.clone()), Some(b.clone())))
|
||||
},
|
||||
}
|
||||
Ok(None) => {
|
||||
// block got accepted but we did not extend the head
|
||||
// so its on a fork (or is the start of a new fork)
|
||||
|
@ -267,7 +267,8 @@ impl Chain {
|
|||
// TODO - This opens us to an amplification attack on blocks
|
||||
// mined at a low difficulty. We should suppress really old blocks
|
||||
// or less relevant blocks somehow.
|
||||
// We should also probably consider banning nodes that send us really old blocks.
|
||||
// We should also probably consider banning nodes that send us really old
|
||||
// blocks.
|
||||
//
|
||||
if !opts.contains(Options::SYNC) {
|
||||
// broadcast the block
|
||||
|
@ -275,7 +276,7 @@ impl Chain {
|
|||
adapter.block_accepted(&b, opts);
|
||||
}
|
||||
Ok((None, Some(b.clone())))
|
||||
},
|
||||
}
|
||||
Err(Error::Orphan) => {
|
||||
let block_hash = b.hash();
|
||||
let orphan = Orphan {
|
||||
|
@ -297,7 +298,7 @@ impl Chain {
|
|||
self.orphans.len(),
|
||||
);
|
||||
Err(Error::Orphan)
|
||||
},
|
||||
}
|
||||
Err(Error::Unfit(ref msg)) => {
|
||||
debug!(
|
||||
LOGGER,
|
||||
|
@ -334,11 +335,7 @@ impl Chain {
|
|||
|
||||
/// Attempt to add a new header to the header chain.
|
||||
/// This is only ever used during sync and uses sync_head.
|
||||
pub fn sync_block_header(
|
||||
&self,
|
||||
bh: &BlockHeader,
|
||||
opts: Options,
|
||||
) -> Result<Option<Tip>, Error> {
|
||||
pub fn sync_block_header(&self, bh: &BlockHeader, opts: Options) -> Result<Option<Tip>, Error> {
|
||||
let sync_head = self.get_sync_head()?;
|
||||
let header_head = self.get_header_head()?;
|
||||
let sync_ctx = self.ctx_from_head(sync_head, opts);
|
||||
|
@ -361,7 +358,6 @@ impl Chain {
|
|||
self.orphans.contains(hash)
|
||||
}
|
||||
|
||||
|
||||
/// Check for orphans, once a block is successfully added
|
||||
pub fn check_orphans(&self, mut last_block_hash: Hash) {
|
||||
debug!(
|
||||
|
@ -384,10 +380,10 @@ impl Chain {
|
|||
} else {
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
Err(_) => {
|
||||
break;
|
||||
},
|
||||
}
|
||||
};
|
||||
} else {
|
||||
break;
|
||||
|
@ -408,9 +404,7 @@ impl Chain {
|
|||
pub fn validate(&self) -> Result<(), Error> {
|
||||
let header = self.store.head_header()?;
|
||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||
sumtree::extending(&mut sumtrees, |extension| {
|
||||
extension.validate(&header)
|
||||
})
|
||||
sumtree::extending(&mut sumtrees, |extension| extension.validate(&header))
|
||||
}
|
||||
|
||||
/// Check if the input has matured sufficiently for the given block height.
|
||||
|
@ -466,13 +460,7 @@ impl Chain {
|
|||
}
|
||||
|
||||
/// Returns current sumtree roots
|
||||
pub fn get_sumtree_roots(
|
||||
&self,
|
||||
) -> (
|
||||
Hash,
|
||||
Hash,
|
||||
Hash,
|
||||
) {
|
||||
pub fn get_sumtree_roots(&self) -> (Hash, Hash, Hash) {
|
||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||
sumtrees.roots()
|
||||
}
|
||||
|
@ -507,9 +495,8 @@ impl Chain {
|
|||
h: Hash,
|
||||
rewind_to_output: u64,
|
||||
rewind_to_kernel: u64,
|
||||
sumtree_data: File
|
||||
sumtree_data: File,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
let head = self.head().unwrap();
|
||||
let header_head = self.get_header_head().unwrap();
|
||||
if header_head.height - head.height < global::cut_through_horizon() as u64 {
|
||||
|
@ -610,17 +597,17 @@ impl Chain {
|
|||
|
||||
/// Gets the block header at the provided height
|
||||
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||
self.store.get_header_by_height(height).map_err(|e| {
|
||||
Error::StoreErr(e, "chain get header by height".to_owned())
|
||||
})
|
||||
self.store
|
||||
.get_header_by_height(height)
|
||||
.map_err(|e| Error::StoreErr(e, "chain get header by height".to_owned()))
|
||||
}
|
||||
|
||||
/// Verifies the given block header is actually on the current chain.
|
||||
/// Checks the header_by_height index to verify the header is where we say it is
|
||||
pub fn is_on_current_chain(&self, header: &BlockHeader) -> Result<(), Error> {
|
||||
self.store.is_on_current_chain(header).map_err(|e| {
|
||||
Error::StoreErr(e, "chain is_on_current_chain".to_owned())
|
||||
})
|
||||
self.store
|
||||
.is_on_current_chain(header)
|
||||
.map_err(|e| Error::StoreErr(e, "chain is_on_current_chain".to_owned()))
|
||||
}
|
||||
|
||||
/// Get the tip of the current "sync" header chain.
|
||||
|
@ -648,13 +635,18 @@ impl Chain {
|
|||
|
||||
/// Check whether we have a block without reading it
|
||||
pub fn block_exists(&self, h: Hash) -> Result<bool, Error> {
|
||||
self.store.block_exists(&h)
|
||||
self.store
|
||||
.block_exists(&h)
|
||||
.map_err(|e| Error::StoreErr(e, "chain block exists".to_owned()))
|
||||
}
|
||||
|
||||
/// Retrieve the file index metadata for a given block
|
||||
pub fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, Error> {
|
||||
self.store.get_block_pmmr_file_metadata(h)
|
||||
pub fn get_block_pmmr_file_metadata(
|
||||
&self,
|
||||
h: &Hash,
|
||||
) -> Result<PMMRFileMetadataCollection, Error> {
|
||||
self.store
|
||||
.get_block_pmmr_file_metadata(h)
|
||||
.map_err(|e| Error::StoreErr(e, "retrieve block pmmr metadata".to_owned()))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -64,25 +64,27 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
|
|||
|
||||
validate_header(&b.header, &mut ctx)?;
|
||||
|
||||
// valid header, now check we actually have the previous block in the store
|
||||
// valid header, now check we actually have the previous block in the store
|
||||
// not just the header but the block itself
|
||||
// short circuit the test first both for performance (in-mem vs db access)
|
||||
// but also for the specific case of the first fast sync full block
|
||||
if b.header.previous != ctx.head.last_block_h {
|
||||
// we cannot assume we can use the chain head for this as we may be dealing with a fork
|
||||
// we cannot use heights here as the fork may have jumped in height
|
||||
// we cannot assume we can use the chain head for this as we may be dealing
|
||||
// with a fork we cannot use heights here as the fork may have jumped in
|
||||
// height
|
||||
match ctx.store.block_exists(&b.header.previous) {
|
||||
Ok(true) => {},
|
||||
Ok(true) => {}
|
||||
Ok(false) => {
|
||||
return Err(Error::Orphan);
|
||||
},
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(Error::StoreErr(e, "pipe get previous".to_owned()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// valid header and we have a previous block, time to take the lock on the sum trees
|
||||
// valid header and we have a previous block, time to take the lock on the sum
|
||||
// trees
|
||||
let local_sumtrees = ctx.sumtrees.clone();
|
||||
let mut sumtrees = local_sumtrees.write().unwrap();
|
||||
|
||||
|
@ -112,15 +114,19 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
|
|||
|
||||
match result {
|
||||
Ok(t) => {
|
||||
save_pmmr_metadata(&Tip::from_block(&b.header), &sumtrees, ctx.store.clone())?;
|
||||
save_pmmr_metadata(&Tip::from_block(&b.header), &sumtrees, ctx.store.clone())?;
|
||||
Ok(t)
|
||||
},
|
||||
}
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
/// Save pmmr index location for a given block
|
||||
pub fn save_pmmr_metadata(t: &Tip, sumtrees: &sumtree::SumTrees, store: Arc<ChainStore>) -> Result<(), Error> {
|
||||
pub fn save_pmmr_metadata(
|
||||
t: &Tip,
|
||||
sumtrees: &sumtree::SumTrees,
|
||||
store: Arc<ChainStore>,
|
||||
) -> Result<(), Error> {
|
||||
// Save pmmr file metadata for this block
|
||||
let block_file_md = sumtrees.last_file_metadata();
|
||||
store
|
||||
|
@ -136,7 +142,12 @@ pub fn sync_block_header(
|
|||
mut sync_ctx: BlockContext,
|
||||
mut header_ctx: BlockContext,
|
||||
) -> Result<Option<Tip>, Error> {
|
||||
debug!(LOGGER, "pipe: sync_block_header: {} at {}", bh.hash(), bh.height);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"pipe: sync_block_header: {} at {}",
|
||||
bh.hash(),
|
||||
bh.height
|
||||
);
|
||||
|
||||
validate_header(&bh, &mut sync_ctx)?;
|
||||
add_block_header(bh, &mut sync_ctx)?;
|
||||
|
@ -146,17 +157,20 @@ pub fn sync_block_header(
|
|||
// just taking the shared lock
|
||||
let _ = header_ctx.sumtrees.write().unwrap();
|
||||
|
||||
// now update the header_head (if new header with most work) and the sync_head (always)
|
||||
// now update the header_head (if new header with most work) and the sync_head
|
||||
// (always)
|
||||
update_header_head(bh, &mut header_ctx)?;
|
||||
update_sync_head(bh, &mut sync_ctx)
|
||||
}
|
||||
|
||||
/// Process block header as part of "header first" block propagation.
|
||||
pub fn process_block_header(
|
||||
bh: &BlockHeader,
|
||||
mut ctx: BlockContext,
|
||||
) -> Result<Option<Tip>, Error> {
|
||||
debug!(LOGGER, "pipe: process_block_header: {} at {}", bh.hash(), bh.height);
|
||||
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"pipe: process_block_header: {} at {}",
|
||||
bh.hash(),
|
||||
bh.height
|
||||
);
|
||||
|
||||
check_header_known(bh.hash(), &mut ctx)?;
|
||||
validate_header(&bh, &mut ctx)?;
|
||||
|
@ -214,13 +228,11 @@ fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
|||
/// arranged by order of cost to have as little DoS surface as possible.
|
||||
/// TODO require only the block header (with length information)
|
||||
fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||
|
||||
// check version, enforces scheduled hard fork
|
||||
if !consensus::valid_header_version(header.height, header.version) {
|
||||
error!(
|
||||
LOGGER,
|
||||
"Invalid block header version received ({}), maybe update Grin?",
|
||||
header.version
|
||||
"Invalid block header version received ({}), maybe update Grin?", header.version
|
||||
);
|
||||
return Err(Error::InvalidBlockVersion(header.version));
|
||||
}
|
||||
|
@ -236,11 +248,17 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
if !ctx.opts.contains(Options::SKIP_POW) {
|
||||
let n = global::sizeshift() as u32;
|
||||
if !(ctx.pow_verifier)(header, n) {
|
||||
error!(LOGGER, "pipe: validate_header failed for cuckoo shift size {}", n);
|
||||
error!(
|
||||
LOGGER,
|
||||
"pipe: validate_header failed for cuckoo shift size {}", n
|
||||
);
|
||||
return Err(Error::InvalidPow);
|
||||
}
|
||||
if header.height % 500 == 0 {
|
||||
debug!(LOGGER, "Validating header validated, using cuckoo shift size {}", n);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Validating header validated, using cuckoo shift size {}", n
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -248,9 +266,10 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
let prev = match ctx.store.get_block_header(&header.previous) {
|
||||
Ok(prev) => Ok(prev),
|
||||
Err(grin_store::Error::NotFoundErr) => Err(Error::Orphan),
|
||||
Err(e) =>{
|
||||
Err(Error::StoreErr(e, format!("previous header {}", header.previous)))
|
||||
}
|
||||
Err(e) => Err(Error::StoreErr(
|
||||
e,
|
||||
format!("previous header {}", header.previous),
|
||||
)),
|
||||
}?;
|
||||
|
||||
if header.height != prev.height + 1 {
|
||||
|
@ -312,7 +331,6 @@ fn validate_block(
|
|||
ctx: &mut BlockContext,
|
||||
ext: &mut sumtree::Extension,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
// main isolated block validation, checks all commitment sums and sigs
|
||||
b.validate().map_err(&Error::InvalidBlockProof)?;
|
||||
|
||||
|
@ -331,9 +349,7 @@ fn validate_block(
|
|||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"validate_block: utxo roots - {:?}, {:?}",
|
||||
roots.utxo_root,
|
||||
b.header.utxo_root,
|
||||
"validate_block: utxo roots - {:?}, {:?}", roots.utxo_root, b.header.utxo_root,
|
||||
);
|
||||
debug!(
|
||||
LOGGER,
|
||||
|
@ -343,9 +359,7 @@ fn validate_block(
|
|||
);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"validate_block: kernel roots - {:?}, {:?}",
|
||||
roots.kernel_root,
|
||||
b.header.kernel_root,
|
||||
"validate_block: kernel roots - {:?}, {:?}", roots.kernel_root, b.header.kernel_root,
|
||||
);
|
||||
|
||||
return Err(Error::InvalidRoot);
|
||||
|
@ -395,11 +409,21 @@ fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error>
|
|||
}
|
||||
ctx.head = tip.clone();
|
||||
if b.header.height % 100 == 0 {
|
||||
info!(LOGGER, "pipe: chain head reached {} @ {} [{}]",
|
||||
b.header.height, b.header.difficulty, b.hash());
|
||||
info!(
|
||||
LOGGER,
|
||||
"pipe: chain head reached {} @ {} [{}]",
|
||||
b.header.height,
|
||||
b.header.difficulty,
|
||||
b.hash()
|
||||
);
|
||||
} else {
|
||||
debug!(LOGGER, "pipe: chain head reached {} @ {} [{}]",
|
||||
b.header.height, b.header.difficulty, b.hash());
|
||||
debug!(
|
||||
LOGGER,
|
||||
"pipe: chain head reached {} @ {} [{}]",
|
||||
b.header.height,
|
||||
b.header.difficulty,
|
||||
b.hash()
|
||||
);
|
||||
}
|
||||
Ok(Some(tip))
|
||||
} else {
|
||||
|
@ -415,9 +439,21 @@ fn update_sync_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<T
|
|||
.map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?;
|
||||
ctx.head = tip.clone();
|
||||
if bh.height % 100 == 0 {
|
||||
info!(LOGGER, "sync head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash());
|
||||
info!(
|
||||
LOGGER,
|
||||
"sync head {} @ {} [{}]",
|
||||
bh.total_difficulty,
|
||||
bh.height,
|
||||
bh.hash()
|
||||
);
|
||||
} else {
|
||||
debug!(LOGGER, "sync head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash());
|
||||
debug!(
|
||||
LOGGER,
|
||||
"sync head {} @ {} [{}]",
|
||||
bh.total_difficulty,
|
||||
bh.height,
|
||||
bh.hash()
|
||||
);
|
||||
}
|
||||
Ok(Some(tip))
|
||||
}
|
||||
|
@ -430,9 +466,21 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
|
|||
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
|
||||
ctx.head = tip.clone();
|
||||
if bh.height % 100 == 0 {
|
||||
info!(LOGGER, "header head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash());
|
||||
info!(
|
||||
LOGGER,
|
||||
"header head {} @ {} [{}]",
|
||||
bh.total_difficulty,
|
||||
bh.height,
|
||||
bh.hash()
|
||||
);
|
||||
} else {
|
||||
debug!(LOGGER, "header head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash());
|
||||
debug!(
|
||||
LOGGER,
|
||||
"header head {} @ {} [{}]",
|
||||
bh.total_difficulty,
|
||||
bh.height,
|
||||
bh.hash()
|
||||
);
|
||||
}
|
||||
Ok(Some(tip))
|
||||
} else {
|
||||
|
@ -449,7 +497,6 @@ pub fn rewind_and_apply_fork(
|
|||
store: Arc<ChainStore>,
|
||||
ext: &mut sumtree::Extension,
|
||||
) -> Result<(), Error> {
|
||||
|
||||
// extending a fork, first identify the block where forking occurred
|
||||
// keeping the hashes of blocks along the fork
|
||||
let mut current = b.header.previous;
|
||||
|
@ -479,9 +526,9 @@ pub fn rewind_and_apply_fork(
|
|||
|
||||
// apply all forked blocks, including this new one
|
||||
for h in hashes {
|
||||
let fb = store.get_block(&h).map_err(|e| {
|
||||
Error::StoreErr(e, format!("getting forked blocks"))
|
||||
})?;
|
||||
let fb = store
|
||||
.get_block(&h)
|
||||
.map_err(|e| Error::StoreErr(e, format!("getting forked blocks")))?;
|
||||
ext.apply_block(&fb)?;
|
||||
}
|
||||
Ok(())
|
||||
|
|
|
@ -105,7 +105,8 @@ impl ChainStore for ChainKVStore {
|
|||
|
||||
fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
||||
option_to_not_found(
|
||||
self.db.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
|
||||
self.db
|
||||
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -113,10 +114,7 @@ impl ChainStore for ChainKVStore {
|
|||
fn save_block(&self, b: &Block) -> Result<(), Error> {
|
||||
let batch = self.db
|
||||
.batch()
|
||||
.put_ser(
|
||||
&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..],
|
||||
b,
|
||||
)?
|
||||
.put_ser(&to_key(BLOCK_PREFIX, &mut b.hash().to_vec())[..], b)?
|
||||
.put_ser(
|
||||
&to_key(BLOCK_HEADER_PREFIX, &mut b.hash().to_vec())[..],
|
||||
&b.header,
|
||||
|
@ -187,14 +185,18 @@ impl ChainStore for ChainKVStore {
|
|||
)
|
||||
}
|
||||
|
||||
fn save_block_pmmr_file_metadata(&self, h:&Hash, md: &PMMRFileMetadataCollection) -> Result<(), Error> {
|
||||
fn save_block_pmmr_file_metadata(
|
||||
&self,
|
||||
h: &Hash,
|
||||
md: &PMMRFileMetadataCollection,
|
||||
) -> Result<(), Error> {
|
||||
self.db.put_ser(
|
||||
&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..],
|
||||
&md,
|
||||
)
|
||||
}
|
||||
|
||||
fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, Error>{
|
||||
fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, Error> {
|
||||
option_to_not_found(
|
||||
self.db
|
||||
.get_ser(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())),
|
||||
|
@ -202,7 +204,8 @@ impl ChainStore for ChainKVStore {
|
|||
}
|
||||
|
||||
fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), Error> {
|
||||
self.db.delete(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..])
|
||||
self.db
|
||||
.delete(&to_key(BLOCK_PMMR_FILE_METADATA_PREFIX, &mut h.to_vec())[..])
|
||||
}
|
||||
|
||||
/// Maintain consistency of the "header_by_height" index by traversing back
|
||||
|
@ -212,9 +215,7 @@ impl ChainStore for ChainKVStore {
|
|||
/// We need to handle the case where we have no index entry for a given
|
||||
/// height to account for the case where we just switched to a new fork and
|
||||
/// the height jumped beyond current chain height.
|
||||
fn setup_height(&self, header: &BlockHeader, old_tip: &Tip)
|
||||
-> Result<(), Error> {
|
||||
|
||||
fn setup_height(&self, header: &BlockHeader, old_tip: &Tip) -> Result<(), Error> {
|
||||
// remove headers ahead if we backtracked
|
||||
for n in header.height..old_tip.height {
|
||||
self.delete_header_by_height(n)?;
|
||||
|
@ -229,8 +230,10 @@ impl ChainStore for ChainKVStore {
|
|||
if let Ok(_) = self.is_on_current_chain(&prev_header) {
|
||||
break;
|
||||
}
|
||||
self.db
|
||||
.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, prev_header.height), &prev_header)?;
|
||||
self.db.put_ser(
|
||||
&u64_to_key(HEADER_HEIGHT_PREFIX, prev_header.height),
|
||||
&prev_header,
|
||||
)?;
|
||||
|
||||
prev_header = self.get_block_header(&prev_header.previous)?;
|
||||
}
|
||||
|
|
|
@ -23,19 +23,19 @@ use std::path::{Path, PathBuf};
|
|||
use std::sync::Arc;
|
||||
|
||||
use util::static_secp_instance;
|
||||
use util::secp::pedersen::{RangeProof, Commitment};
|
||||
use util::secp::pedersen::{Commitment, RangeProof};
|
||||
|
||||
use core::consensus::reward;
|
||||
use core::core::{Block, BlockHeader, Input, Output, OutputIdentifier,
|
||||
OutputFeatures, OutputStoreable, TxKernel};
|
||||
use core::core::pmmr::{self, PMMR, MerkleProof};
|
||||
use core::core::{Block, BlockHeader, Input, Output, OutputFeatures, OutputIdentifier,
|
||||
OutputStoreable, TxKernel};
|
||||
use core::core::pmmr::{self, MerkleProof, PMMR};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::ser::{self, PMMRable};
|
||||
|
||||
use grin_store;
|
||||
use grin_store::pmmr::{PMMRBackend, PMMRFileMetadata};
|
||||
use types::{ChainStore, SumTreeRoots, PMMRFileMetadataCollection, Error};
|
||||
use util::{LOGGER, zip};
|
||||
use types::{ChainStore, Error, PMMRFileMetadataCollection, SumTreeRoots};
|
||||
use util::{zip, LOGGER};
|
||||
|
||||
const SUMTREES_SUBDIR: &'static str = "sumtrees";
|
||||
const UTXO_SUBDIR: &'static str = "utxo";
|
||||
|
@ -55,7 +55,11 @@ impl<T> PMMRHandle<T>
|
|||
where
|
||||
T: PMMRable,
|
||||
{
|
||||
fn new(root_dir: String, file_name: &str, index_md: Option<PMMRFileMetadata>) -> Result<PMMRHandle<T>, Error> {
|
||||
fn new(
|
||||
root_dir: String,
|
||||
file_name: &str,
|
||||
index_md: Option<PMMRFileMetadata>,
|
||||
) -> Result<PMMRHandle<T>, Error> {
|
||||
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
|
||||
fs::create_dir_all(path.clone())?;
|
||||
let be = PMMRBackend::new(path.to_str().unwrap().to_string(), index_md)?;
|
||||
|
@ -65,7 +69,7 @@ where
|
|||
last_pos: sz,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
/// Return last written positions of hash file and data file
|
||||
pub fn last_file_positions(&self) -> PMMRFileMetadata {
|
||||
self.backend.last_file_positions()
|
||||
|
@ -93,18 +97,21 @@ pub struct SumTrees {
|
|||
|
||||
impl SumTrees {
|
||||
/// Open an existing or new set of backends for the SumTrees
|
||||
pub fn open(root_dir: String,
|
||||
pub fn open(
|
||||
root_dir: String,
|
||||
commit_index: Arc<ChainStore>,
|
||||
last_file_positions: Option<PMMRFileMetadataCollection>
|
||||
) -> Result<SumTrees, Error> {
|
||||
|
||||
last_file_positions: Option<PMMRFileMetadataCollection>,
|
||||
) -> Result<SumTrees, Error> {
|
||||
let utxo_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, UTXO_SUBDIR].iter().collect();
|
||||
fs::create_dir_all(utxo_file_path.clone())?;
|
||||
|
||||
let rproof_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, RANGE_PROOF_SUBDIR].iter().collect();
|
||||
let rproof_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, RANGE_PROOF_SUBDIR]
|
||||
.iter()
|
||||
.collect();
|
||||
fs::create_dir_all(rproof_file_path.clone())?;
|
||||
|
||||
let kernel_file_path: PathBuf = [&root_dir, SUMTREES_SUBDIR, KERNEL_SUBDIR].iter().collect();
|
||||
let kernel_file_path: PathBuf =
|
||||
[&root_dir, SUMTREES_SUBDIR, KERNEL_SUBDIR].iter().collect();
|
||||
fs::create_dir_all(kernel_file_path.clone())?;
|
||||
|
||||
let mut utxo_md = None;
|
||||
|
@ -131,10 +138,8 @@ impl SumTrees {
|
|||
pub fn is_unspent(&mut self, output_id: &OutputIdentifier) -> Result<Hash, Error> {
|
||||
match self.commit_index.get_output_pos(&output_id.commit) {
|
||||
Ok(pos) => {
|
||||
let output_pmmr:PMMR<OutputStoreable, _> = PMMR::at(
|
||||
&mut self.utxo_pmmr_h.backend,
|
||||
self.utxo_pmmr_h.last_pos,
|
||||
);
|
||||
let output_pmmr: PMMR<OutputStoreable, _> =
|
||||
PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
|
||||
if let Some((hash, _)) = output_pmmr.get(pos, false) {
|
||||
if hash == output_id.hash() {
|
||||
Ok(hash)
|
||||
|
@ -154,19 +159,22 @@ impl SumTrees {
|
|||
/// nodes at level 0
|
||||
/// TODO: These need to return the actual data from the flat-files instead of hashes now
|
||||
pub fn last_n_utxo(&mut self, distance: u64) -> Vec<(Hash, Option<OutputStoreable>)> {
|
||||
let utxo_pmmr:PMMR<OutputStoreable, _> = PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
|
||||
let utxo_pmmr: PMMR<OutputStoreable, _> =
|
||||
PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
|
||||
utxo_pmmr.get_last_n_insertions(distance)
|
||||
}
|
||||
|
||||
/// as above, for range proofs
|
||||
pub fn last_n_rangeproof(&mut self, distance: u64) -> Vec<(Hash, Option<RangeProof>)> {
|
||||
let rproof_pmmr:PMMR<RangeProof, _> = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
||||
let rproof_pmmr: PMMR<RangeProof, _> =
|
||||
PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
||||
rproof_pmmr.get_last_n_insertions(distance)
|
||||
}
|
||||
|
||||
/// as above, for kernels
|
||||
pub fn last_n_kernel(&mut self, distance: u64) -> Vec<(Hash, Option<TxKernel>)> {
|
||||
let kernel_pmmr:PMMR<TxKernel, _> = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
||||
let kernel_pmmr: PMMR<TxKernel, _> =
|
||||
PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
||||
kernel_pmmr.get_last_n_insertions(distance)
|
||||
}
|
||||
|
||||
|
@ -180,22 +188,19 @@ impl SumTrees {
|
|||
PMMRFileMetadataCollection::new(
|
||||
self.utxo_pmmr_h.last_file_positions(),
|
||||
self.rproof_pmmr_h.last_file_positions(),
|
||||
self.kernel_pmmr_h.last_file_positions()
|
||||
self.kernel_pmmr_h.last_file_positions(),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
/// Get sum tree roots
|
||||
/// TODO: Return data instead of hashes
|
||||
pub fn roots(
|
||||
&mut self,
|
||||
) -> (
|
||||
Hash,
|
||||
Hash,
|
||||
Hash,
|
||||
) {
|
||||
let output_pmmr:PMMR<OutputStoreable, _> = PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
|
||||
let rproof_pmmr:PMMR<RangeProof, _> = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
||||
let kernel_pmmr:PMMR<TxKernel, _> = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
||||
pub fn roots(&mut self) -> (Hash, Hash, Hash) {
|
||||
let output_pmmr: PMMR<OutputStoreable, _> =
|
||||
PMMR::at(&mut self.utxo_pmmr_h.backend, self.utxo_pmmr_h.last_pos);
|
||||
let rproof_pmmr: PMMR<RangeProof, _> =
|
||||
PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
||||
let kernel_pmmr: PMMR<TxKernel, _> =
|
||||
PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
||||
(output_pmmr.root(), rproof_pmmr.root(), kernel_pmmr.root())
|
||||
}
|
||||
}
|
||||
|
@ -273,16 +278,9 @@ pub struct Extension<'a> {
|
|||
|
||||
impl<'a> Extension<'a> {
|
||||
// constructor
|
||||
fn new(
|
||||
trees: &'a mut SumTrees,
|
||||
commit_index: Arc<ChainStore>,
|
||||
) -> Extension<'a> {
|
||||
|
||||
fn new(trees: &'a mut SumTrees, commit_index: Arc<ChainStore>) -> Extension<'a> {
|
||||
Extension {
|
||||
utxo_pmmr: PMMR::at(
|
||||
&mut trees.utxo_pmmr_h.backend,
|
||||
trees.utxo_pmmr_h.last_pos,
|
||||
),
|
||||
utxo_pmmr: PMMR::at(&mut trees.utxo_pmmr_h.backend, trees.utxo_pmmr_h.last_pos),
|
||||
rproof_pmmr: PMMR::at(
|
||||
&mut trees.rproof_pmmr_h.backend,
|
||||
trees.rproof_pmmr_h.last_pos,
|
||||
|
@ -302,7 +300,6 @@ impl<'a> Extension<'a> {
|
|||
/// applied in order of the provided Vec. If pruning is enabled, inputs also
|
||||
/// prune MMR data.
|
||||
pub fn apply_block(&mut self, b: &Block) -> Result<(), Error> {
|
||||
|
||||
// first applying coinbase outputs. due to the construction of PMMRs the
|
||||
// last element, when its a leaf, can never be pruned as it has no parent
|
||||
// yet and it will be needed to calculate that hash. to work around this,
|
||||
|
@ -357,8 +354,9 @@ impl<'a> Extension<'a> {
|
|||
// check hash from pmmr matches hash from input (or corresponding output)
|
||||
// if not then the input is not being honest about
|
||||
// what it is attempting to spend...
|
||||
if output_id_hash != read_hash ||
|
||||
output_id_hash != read_elem.expect("no output at position").hash() {
|
||||
if output_id_hash != read_hash
|
||||
|| output_id_hash != read_elem.expect("no output at position").hash()
|
||||
{
|
||||
return Err(Error::SumTreeErr(format!("output pmmr hash mismatch")));
|
||||
}
|
||||
|
||||
|
@ -370,7 +368,8 @@ impl<'a> Extension<'a> {
|
|||
}
|
||||
|
||||
// Now prune the utxo_pmmr, rproof_pmmr and their storage.
|
||||
// Input is not valid if we cannot prune successfully (to spend an unspent output).
|
||||
// Input is not valid if we cannot prune successfully (to spend an unspent
|
||||
// output).
|
||||
match self.utxo_pmmr.prune(pos, height as u32) {
|
||||
Ok(true) => {
|
||||
self.rproof_pmmr
|
||||
|
@ -420,7 +419,7 @@ impl<'a> Extension<'a> {
|
|||
fn apply_kernel(&mut self, kernel: &TxKernel) -> Result<(), Error> {
|
||||
if let Ok(pos) = self.get_kernel_pos(&kernel.excess) {
|
||||
// same as outputs
|
||||
if let Some((h,_)) = self.kernel_pmmr.get(pos, false) {
|
||||
if let Some((h, _)) = self.kernel_pmmr.get(pos, false) {
|
||||
if h == kernel.hash() {
|
||||
return Err(Error::DuplicateKernel(kernel.excess.clone()));
|
||||
}
|
||||
|
@ -446,7 +445,11 @@ impl<'a> Extension<'a> {
|
|||
output: &OutputIdentifier,
|
||||
block: &Block,
|
||||
) -> Result<MerkleProof, Error> {
|
||||
debug!(LOGGER, "sumtree: merkle_proof_via_rewind: rewinding to block {:?}", block.hash());
|
||||
debug!(
|
||||
LOGGER,
|
||||
"sumtree: merkle_proof_via_rewind: rewinding to block {:?}",
|
||||
block.hash()
|
||||
);
|
||||
// rewind to the specified block
|
||||
self.rewind(block)?;
|
||||
// then calculate the Merkle Proof based on the known pos
|
||||
|
@ -476,11 +479,15 @@ impl<'a> Extension<'a> {
|
|||
|
||||
/// Rewinds the MMRs to the provided positions, given the output and
|
||||
/// kernel we want to rewind to.
|
||||
pub fn rewind_pos(&mut self, height: u64, out_pos_rew: u64, kern_pos_rew: u64) -> Result<(), Error> {
|
||||
debug!(LOGGER,
|
||||
"Rewind sumtrees to output pos: {}, kernel pos: {}",
|
||||
out_pos_rew,
|
||||
kern_pos_rew,
|
||||
pub fn rewind_pos(
|
||||
&mut self,
|
||||
height: u64,
|
||||
out_pos_rew: u64,
|
||||
kern_pos_rew: u64,
|
||||
) -> Result<(), Error> {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Rewind sumtrees to output pos: {}, kernel pos: {}", out_pos_rew, kern_pos_rew,
|
||||
);
|
||||
|
||||
self.utxo_pmmr
|
||||
|
@ -514,9 +521,7 @@ impl<'a> Extension<'a> {
|
|||
|
||||
/// Current root hashes and sums (if applicable) for the UTXO, range proof
|
||||
/// and kernel sum trees.
|
||||
pub fn roots(
|
||||
&self,
|
||||
) -> SumTreeRoots {
|
||||
pub fn roots(&self) -> SumTreeRoots {
|
||||
SumTreeRoots {
|
||||
utxo_root: self.utxo_pmmr.root(),
|
||||
rproof_root: self.rproof_pmmr.root(),
|
||||
|
@ -556,7 +561,9 @@ impl<'a> Extension<'a> {
|
|||
let adjusted_sum_utxo = secp.commit_sum(vec![utxo_sum], vec![over_commit])?;
|
||||
|
||||
if adjusted_sum_utxo != kernel_sum {
|
||||
return Err(Error::InvalidSumtree("Differing UTXO commitment and kernel excess sums.".to_owned()));
|
||||
return Err(Error::InvalidSumtree(
|
||||
"Differing UTXO commitment and kernel excess sums.".to_owned(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -567,11 +574,12 @@ impl<'a> Extension<'a> {
|
|||
/// by iterating over the whole MMR data. This is a costly operation
|
||||
/// performed only when we receive a full new chain state.
|
||||
pub fn rebuild_index(&self) -> Result<(), Error> {
|
||||
for n in 1..self.utxo_pmmr.unpruned_size()+1 {
|
||||
for n in 1..self.utxo_pmmr.unpruned_size() + 1 {
|
||||
// non-pruned leaves only
|
||||
if pmmr::bintree_postorder_height(n) == 0 {
|
||||
if let Some((_, out)) = self.utxo_pmmr.get(n, true) {
|
||||
self.commit_index.save_output_pos(&out.expect("not a leaf node").commit, n)?;
|
||||
self.commit_index
|
||||
.save_output_pos(&out.expect("not a leaf node").commit, n)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -605,7 +613,8 @@ impl<'a> Extension<'a> {
|
|||
)
|
||||
}
|
||||
|
||||
/// Sums the excess of all our kernels, validating their signatures on the way
|
||||
/// Sums the excess of all our kernels, validating their signatures on the
|
||||
/// way
|
||||
fn sum_kernels(&self) -> Result<(Commitment, u64), Error> {
|
||||
// make sure we have the right count of kernels using the MMR, the storage
|
||||
// file may have a few more
|
||||
|
@ -644,9 +653,9 @@ impl<'a> Extension<'a> {
|
|||
let mut sum_utxo = None;
|
||||
let mut utxo_count = 0;
|
||||
let secp = static_secp_instance();
|
||||
for n in 1..self.utxo_pmmr.unpruned_size()+1 {
|
||||
for n in 1..self.utxo_pmmr.unpruned_size() + 1 {
|
||||
if pmmr::bintree_postorder_height(n) == 0 {
|
||||
if let Some((_,output)) = self.utxo_pmmr.get(n, true) {
|
||||
if let Some((_, output)) = self.utxo_pmmr.get(n, true) {
|
||||
let out = output.expect("not a leaf node");
|
||||
let commit = out.commit.clone();
|
||||
match self.rproof_pmmr.get(n, true) {
|
||||
|
@ -675,9 +684,10 @@ impl<'a> Extension<'a> {
|
|||
/// and needs to be consistent with how we originally processed
|
||||
/// the outputs in apply_block()
|
||||
fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Error> {
|
||||
// If we have any regular outputs then the "last" output is the last regular output
|
||||
// otherwise it is the last coinbase output.
|
||||
// This is because we process coinbase outputs before regular outputs in apply_block().
|
||||
// If we have any regular outputs then the "last" output is the last regular
|
||||
// output otherwise it is the last coinbase output.
|
||||
// This is because we process coinbase outputs before regular outputs in
|
||||
// apply_block().
|
||||
//
|
||||
// TODO - consider maintaining coinbase outputs in a separate vec in a block?
|
||||
//
|
||||
|
@ -698,7 +708,7 @@ fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Er
|
|||
} else if last_coinbase_output.is_some() {
|
||||
last_coinbase_output.unwrap()
|
||||
} else {
|
||||
return Err(Error::Other("can't get index in an empty block".to_owned()))
|
||||
return Err(Error::Other("can't get index in an empty block".to_owned()));
|
||||
};
|
||||
|
||||
let out_idx = commit_index
|
||||
|
@ -706,10 +716,9 @@ fn indexes_at(block: &Block, commit_index: &ChainStore) -> Result<(u64, u64), Er
|
|||
.map_err(|e| Error::StoreErr(e, format!("missing output pos for block")))?;
|
||||
|
||||
let kern_idx = match block.kernels.last() {
|
||||
Some(kernel) => commit_index.get_kernel_pos(&kernel.excess)
|
||||
.map_err(|e| {
|
||||
Error::StoreErr(e, format!("missing kernel pos for block"))
|
||||
})?,
|
||||
Some(kernel) => commit_index
|
||||
.get_kernel_pos(&kernel.excess)
|
||||
.map_err(|e| Error::StoreErr(e, format!("missing kernel pos for block")))?,
|
||||
None => {
|
||||
return Err(Error::Other("can't get index in an empty block".to_owned()));
|
||||
}
|
||||
|
@ -741,6 +750,5 @@ pub fn zip_write(root_dir: String, sumtree_data: File) -> Result<(), Error> {
|
|||
let sumtrees_path = Path::new(&root_dir).join(SUMTREES_SUBDIR);
|
||||
|
||||
fs::create_dir_all(sumtrees_path.clone())?;
|
||||
zip::decompress(sumtree_data, &sumtrees_path)
|
||||
.map_err(|ze| Error::Other(ze.to_string()))
|
||||
zip::decompress(sumtree_data, &sumtrees_path).map_err(|ze| Error::Other(ze.to_string()))
|
||||
}
|
||||
|
|
|
@ -20,10 +20,10 @@ use util::secp;
|
|||
use util::secp::pedersen::Commitment;
|
||||
|
||||
use grin_store as store;
|
||||
use core::core::{Block, BlockHeader, block, transaction};
|
||||
use core::core::{block, transaction, Block, BlockHeader};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::target::Difficulty;
|
||||
use core::ser::{self, Readable, Writeable, Reader, Writer};
|
||||
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use grin_store;
|
||||
use grin_store::pmmr::PMMRFileMetadata;
|
||||
|
||||
|
@ -131,13 +131,13 @@ impl Error {
|
|||
pub fn is_bad_data(&self) -> bool {
|
||||
// shorter to match on all the "not the block's fault" errors
|
||||
match *self {
|
||||
Error::Unfit(_) |
|
||||
Error::Orphan |
|
||||
Error::StoreErr(_, _) |
|
||||
Error::SerErr(_) |
|
||||
Error::SumTreeErr(_)|
|
||||
Error::GenesisBlockRequired |
|
||||
Error::Other(_) => false,
|
||||
Error::Unfit(_)
|
||||
| Error::Orphan
|
||||
| Error::StoreErr(_, _)
|
||||
| Error::SerErr(_)
|
||||
| Error::SumTreeErr(_)
|
||||
| Error::GenesisBlockRequired
|
||||
| Error::Other(_) => false,
|
||||
_ => true,
|
||||
}
|
||||
}
|
||||
|
@ -291,11 +291,19 @@ pub trait ChainStore: Send + Sync {
|
|||
/// UTXO MMR. Used as an index for spending and pruning.
|
||||
fn get_kernel_pos(&self, commit: &Commitment) -> Result<u64, store::Error>;
|
||||
|
||||
/// Saves information about the last written PMMR file positions for each committed block
|
||||
fn save_block_pmmr_file_metadata(&self, h: &Hash, md: &PMMRFileMetadataCollection) -> Result<(), store::Error>;
|
||||
/// Saves information about the last written PMMR file positions for each
|
||||
/// committed block
|
||||
fn save_block_pmmr_file_metadata(
|
||||
&self,
|
||||
h: &Hash,
|
||||
md: &PMMRFileMetadataCollection,
|
||||
) -> Result<(), store::Error>;
|
||||
|
||||
/// Retrieves stored pmmr file metadata information for a given block
|
||||
fn get_block_pmmr_file_metadata(&self, h: &Hash) -> Result<PMMRFileMetadataCollection, store::Error>;
|
||||
fn get_block_pmmr_file_metadata(
|
||||
&self,
|
||||
h: &Hash,
|
||||
) -> Result<PMMRFileMetadataCollection, store::Error>;
|
||||
|
||||
/// Delete stored pmmr file metadata information for a given block
|
||||
fn delete_block_pmmr_file_metadata(&self, h: &Hash) -> Result<(), store::Error>;
|
||||
|
@ -306,7 +314,8 @@ pub trait ChainStore: Send + Sync {
|
|||
fn setup_height(&self, bh: &BlockHeader, old_tip: &Tip) -> Result<(), store::Error>;
|
||||
}
|
||||
|
||||
/// Single serializable struct to hold metadata about all PMMR file position for a given block
|
||||
/// Single serializable struct to hold metadata about all PMMR file position
|
||||
/// for a given block
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct PMMRFileMetadataCollection {
|
||||
/// file metadata for the utxo file
|
||||
|
@ -314,7 +323,7 @@ pub struct PMMRFileMetadataCollection {
|
|||
/// file metadata for the rangeproof file
|
||||
pub rproof_file_md: PMMRFileMetadata,
|
||||
/// file metadata for the kernel file
|
||||
pub kernel_file_md: PMMRFileMetadata
|
||||
pub kernel_file_md: PMMRFileMetadata,
|
||||
}
|
||||
|
||||
impl Writeable for PMMRFileMetadataCollection {
|
||||
|
@ -329,9 +338,9 @@ impl Writeable for PMMRFileMetadataCollection {
|
|||
impl Readable for PMMRFileMetadataCollection {
|
||||
fn read(reader: &mut Reader) -> Result<PMMRFileMetadataCollection, ser::Error> {
|
||||
Ok(PMMRFileMetadataCollection {
|
||||
utxo_file_md : PMMRFileMetadata::read(reader)?,
|
||||
rproof_file_md : PMMRFileMetadata::read(reader)?,
|
||||
kernel_file_md : PMMRFileMetadata::read(reader)?,
|
||||
utxo_file_md: PMMRFileMetadata::read(reader)?,
|
||||
rproof_file_md: PMMRFileMetadata::read(reader)?,
|
||||
kernel_file_md: PMMRFileMetadata::read(reader)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -347,11 +356,13 @@ impl PMMRFileMetadataCollection {
|
|||
}
|
||||
|
||||
/// Helper to create a new collection
|
||||
pub fn new(utxo_md: PMMRFileMetadata,
|
||||
pub fn new(
|
||||
utxo_md: PMMRFileMetadata,
|
||||
rproof_md: PMMRFileMetadata,
|
||||
kernel_md: PMMRFileMetadata) -> PMMRFileMetadataCollection {
|
||||
PMMRFileMetadataCollection {
|
||||
utxo_file_md : utxo_md,
|
||||
kernel_md: PMMRFileMetadata,
|
||||
) -> PMMRFileMetadataCollection {
|
||||
PMMRFileMetadataCollection {
|
||||
utxo_file_md: utxo_md,
|
||||
rproof_file_md: rproof_md,
|
||||
kernel_file_md: kernel_md,
|
||||
}
|
||||
|
|
|
@ -88,13 +88,8 @@ fn data_files() {
|
|||
let prev = chain.head_header().unwrap();
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
let pk = keychain.derive_key_id(n as u32).unwrap();
|
||||
let mut b = core::core::Block::new(
|
||||
&prev,
|
||||
vec![],
|
||||
&keychain,
|
||||
&pk,
|
||||
difficulty.clone(),
|
||||
).unwrap();
|
||||
let mut b =
|
||||
core::core::Block::new(&prev, vec![], &keychain, &pk, difficulty.clone()).unwrap();
|
||||
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
b.header.difficulty = difficulty.clone(); // TODO: overwrite here? really?
|
||||
|
@ -109,17 +104,20 @@ fn data_files() {
|
|||
|
||||
let prev_bhash = b.header.previous;
|
||||
let bhash = b.hash();
|
||||
chain.process_block(b.clone(), chain::Options::MINE).unwrap();
|
||||
chain
|
||||
.process_block(b.clone(), chain::Options::MINE)
|
||||
.unwrap();
|
||||
|
||||
let head = Tip::from_block(&b.header);
|
||||
|
||||
// Check we have indexes for the last block and the block previous
|
||||
let cur_pmmr_md = chain.get_block_pmmr_file_metadata(&head.last_block_h)
|
||||
let cur_pmmr_md = chain
|
||||
.get_block_pmmr_file_metadata(&head.last_block_h)
|
||||
.expect("block pmmr file data doesn't exist");
|
||||
let pref_pmmr_md = chain.get_block_pmmr_file_metadata(&head.prev_block_h)
|
||||
let pref_pmmr_md = chain
|
||||
.get_block_pmmr_file_metadata(&head.prev_block_h)
|
||||
.expect("previous block pmmr file data doesn't exist");
|
||||
|
||||
|
||||
println!("Cur_pmmr_md: {:?}", cur_pmmr_md);
|
||||
chain.validate().unwrap();
|
||||
}
|
||||
|
@ -137,7 +135,13 @@ fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) ->
|
|||
b
|
||||
}
|
||||
|
||||
fn prepare_block_tx(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>) -> Block {
|
||||
fn prepare_block_tx(
|
||||
kc: &Keychain,
|
||||
prev: &BlockHeader,
|
||||
chain: &Chain,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let mut b = prepare_block_nosum(kc, prev, diff, txs);
|
||||
chain.set_sumtree_roots(&mut b, false).unwrap();
|
||||
b
|
||||
|
@ -149,18 +153,29 @@ fn prepare_fork_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u6
|
|||
b
|
||||
}
|
||||
|
||||
fn prepare_fork_block_tx(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>) -> Block {
|
||||
fn prepare_fork_block_tx(
|
||||
kc: &Keychain,
|
||||
prev: &BlockHeader,
|
||||
chain: &Chain,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let mut b = prepare_block_nosum(kc, prev, diff, txs);
|
||||
chain.set_sumtree_roots(&mut b, true).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
fn prepare_block_nosum(kc: &Keychain, prev: &BlockHeader, diff: u64, txs: Vec<&Transaction>) -> Block {
|
||||
fn prepare_block_nosum(
|
||||
kc: &Keychain,
|
||||
prev: &BlockHeader,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let key_id = kc.derive_key_id(diff as u32).unwrap();
|
||||
|
||||
let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) {
|
||||
Err(e) => panic!("{:?}",e),
|
||||
Ok(b) => b
|
||||
Err(e) => panic!("{:?}", e),
|
||||
Ok(b) => b,
|
||||
};
|
||||
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
b.header.total_difficulty = Difficulty::from_num(diff);
|
||||
|
|
|
@ -26,7 +26,7 @@ use std::sync::Arc;
|
|||
|
||||
use chain::Chain;
|
||||
use chain::types::*;
|
||||
use core::core::{Block, BlockHeader, Transaction, OutputIdentifier, OutputFeatures, build};
|
||||
use core::core::{build, Block, BlockHeader, OutputFeatures, OutputIdentifier, Transaction};
|
||||
use core::core::hash::Hashed;
|
||||
use core::core::target::Difficulty;
|
||||
use core::consensus;
|
||||
|
@ -76,13 +76,8 @@ fn mine_empty_chain() {
|
|||
let prev = chain.head_header().unwrap();
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
let pk = keychain.derive_key_id(n as u32).unwrap();
|
||||
let mut b = core::core::Block::new(
|
||||
&prev,
|
||||
vec![],
|
||||
&keychain,
|
||||
&pk,
|
||||
difficulty.clone(),
|
||||
).unwrap();
|
||||
let mut b =
|
||||
core::core::Block::new(&prev, vec![], &keychain, &pk, difficulty.clone()).unwrap();
|
||||
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
b.header.difficulty = difficulty.clone(); // TODO: overwrite here? really?
|
||||
|
@ -181,11 +176,13 @@ fn mine_losing_fork() {
|
|||
let bfork = prepare_block(&kc, &b1head, &chain, 3);
|
||||
|
||||
// add higher difficulty first, prepare its successor, then fork
|
||||
// with lower diff
|
||||
// with lower diff
|
||||
chain.process_block(b2, chain::Options::SKIP_POW).unwrap();
|
||||
assert_eq!(chain.head_header().unwrap().hash(), b2head.hash());
|
||||
let b3 = prepare_block(&kc, &b2head, &chain, 5);
|
||||
chain.process_block(bfork, chain::Options::SKIP_POW).unwrap();
|
||||
chain
|
||||
.process_block(bfork, chain::Options::SKIP_POW)
|
||||
.unwrap();
|
||||
|
||||
// adding the successor
|
||||
let b3head = b3.header.clone();
|
||||
|
@ -206,12 +203,14 @@ fn longer_fork() {
|
|||
// for the forked chain
|
||||
let mut prev = chain.head_header().unwrap();
|
||||
for n in 0..10 {
|
||||
let b = prepare_block(&kc, &prev, &chain, 2*n + 2);
|
||||
let b = prepare_block(&kc, &prev, &chain, 2 * n + 2);
|
||||
let bh = b.header.clone();
|
||||
|
||||
if n < 5 {
|
||||
let b_fork = b.clone();
|
||||
chain_fork.process_block(b_fork, chain::Options::SKIP_POW).unwrap();
|
||||
chain_fork
|
||||
.process_block(b_fork, chain::Options::SKIP_POW)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||
|
@ -227,13 +226,15 @@ fn longer_fork() {
|
|||
|
||||
let mut prev_fork = head_fork.clone();
|
||||
for n in 0..7 {
|
||||
let b_fork = prepare_block(&kc, &prev_fork, &chain_fork, 2*n + 11);
|
||||
let b_fork = prepare_block(&kc, &prev_fork, &chain_fork, 2 * n + 11);
|
||||
let bh_fork = b_fork.header.clone();
|
||||
|
||||
let b = b_fork.clone();
|
||||
chain.process_block(b, chain::Options::SKIP_POW).unwrap();
|
||||
|
||||
chain_fork.process_block(b_fork, chain::Options::SKIP_POW).unwrap();
|
||||
chain_fork
|
||||
.process_block(b_fork, chain::Options::SKIP_POW)
|
||||
.unwrap();
|
||||
prev_fork = bh_fork;
|
||||
}
|
||||
}
|
||||
|
@ -254,7 +255,9 @@ fn spend_in_fork() {
|
|||
let out_id = OutputIdentifier::from_output(&b.outputs[0]);
|
||||
assert!(out_id.features.contains(OutputFeatures::COINBASE_OUTPUT));
|
||||
fork_head = b.header.clone();
|
||||
chain.process_block(b.clone(), chain::Options::SKIP_POW).unwrap();
|
||||
chain
|
||||
.process_block(b.clone(), chain::Options::SKIP_POW)
|
||||
.unwrap();
|
||||
|
||||
let merkle_proof = chain.get_merkle_proof(&out_id, &b).unwrap();
|
||||
|
||||
|
@ -290,7 +293,9 @@ fn spend_in_fork() {
|
|||
|
||||
let next = prepare_block_tx(&kc, &fork_head, &chain, 7, vec![&tx1]);
|
||||
let prev_main = next.header.clone();
|
||||
chain.process_block(next.clone(), chain::Options::SKIP_POW).unwrap();
|
||||
chain
|
||||
.process_block(next.clone(), chain::Options::SKIP_POW)
|
||||
.unwrap();
|
||||
chain.validate().unwrap();
|
||||
|
||||
println!("tx 1 processed, should have 6 outputs or 396 bytes in file, first skipped");
|
||||
|
@ -310,7 +315,7 @@ fn spend_in_fork() {
|
|||
chain.validate().unwrap();
|
||||
|
||||
println!("tx 2 processed");
|
||||
/*panic!("Stop");*/
|
||||
/* panic!("Stop"); */
|
||||
|
||||
// mine 2 forked blocks from the first
|
||||
let fork = prepare_fork_block_tx(&kc, &fork_head, &chain, 6, vec![&tx1]);
|
||||
|
@ -319,28 +324,48 @@ fn spend_in_fork() {
|
|||
|
||||
let fork_next = prepare_fork_block_tx(&kc, &prev_fork, &chain, 8, vec![&tx2]);
|
||||
let prev_fork = fork_next.header.clone();
|
||||
chain.process_block(fork_next, chain::Options::SKIP_POW).unwrap();
|
||||
chain
|
||||
.process_block(fork_next, chain::Options::SKIP_POW)
|
||||
.unwrap();
|
||||
chain.validate().unwrap();
|
||||
|
||||
// check state
|
||||
let head = chain.head_header().unwrap();
|
||||
assert_eq!(head.height, 6);
|
||||
assert_eq!(head.hash(), prev_main.hash());
|
||||
assert!(chain.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0])).is_ok());
|
||||
assert!(chain.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0])).is_err());
|
||||
assert!(
|
||||
chain
|
||||
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0]))
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
chain
|
||||
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0]))
|
||||
.is_err()
|
||||
);
|
||||
|
||||
// make the fork win
|
||||
let fork_next = prepare_fork_block(&kc, &prev_fork, &chain, 10);
|
||||
let prev_fork = fork_next.header.clone();
|
||||
chain.process_block(fork_next, chain::Options::SKIP_POW).unwrap();
|
||||
chain
|
||||
.process_block(fork_next, chain::Options::SKIP_POW)
|
||||
.unwrap();
|
||||
chain.validate().unwrap();
|
||||
|
||||
// check state
|
||||
let head = chain.head_header().unwrap();
|
||||
assert_eq!(head.height, 7);
|
||||
assert_eq!(head.hash(), prev_fork.hash());
|
||||
assert!(chain.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0])).is_ok());
|
||||
assert!(chain.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0])).is_err());
|
||||
assert!(
|
||||
chain
|
||||
.is_unspent(&OutputIdentifier::from_output(&tx2.outputs[0]))
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
chain
|
||||
.is_unspent(&OutputIdentifier::from_output(&tx1.outputs[0]))
|
||||
.is_err()
|
||||
);
|
||||
}
|
||||
|
||||
fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) -> Block {
|
||||
|
@ -349,7 +374,13 @@ fn prepare_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64) ->
|
|||
b
|
||||
}
|
||||
|
||||
fn prepare_block_tx(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>) -> Block {
|
||||
fn prepare_block_tx(
|
||||
kc: &Keychain,
|
||||
prev: &BlockHeader,
|
||||
chain: &Chain,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let mut b = prepare_block_nosum(kc, prev, diff, txs);
|
||||
chain.set_sumtree_roots(&mut b, false).unwrap();
|
||||
b
|
||||
|
@ -361,18 +392,29 @@ fn prepare_fork_block(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u6
|
|||
b
|
||||
}
|
||||
|
||||
fn prepare_fork_block_tx(kc: &Keychain, prev: &BlockHeader, chain: &Chain, diff: u64, txs: Vec<&Transaction>) -> Block {
|
||||
fn prepare_fork_block_tx(
|
||||
kc: &Keychain,
|
||||
prev: &BlockHeader,
|
||||
chain: &Chain,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let mut b = prepare_block_nosum(kc, prev, diff, txs);
|
||||
chain.set_sumtree_roots(&mut b, true).unwrap();
|
||||
b
|
||||
}
|
||||
|
||||
fn prepare_block_nosum(kc: &Keychain, prev: &BlockHeader, diff: u64, txs: Vec<&Transaction>) -> Block {
|
||||
fn prepare_block_nosum(
|
||||
kc: &Keychain,
|
||||
prev: &BlockHeader,
|
||||
diff: u64,
|
||||
txs: Vec<&Transaction>,
|
||||
) -> Block {
|
||||
let key_id = kc.derive_key_id(diff as u32).unwrap();
|
||||
|
||||
let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) {
|
||||
Err(e) => panic!("{:?}",e),
|
||||
Ok(b) => b
|
||||
Err(e) => panic!("{:?}", e),
|
||||
Ok(b) => b,
|
||||
};
|
||||
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
b.header.total_difficulty = Difficulty::from_num(diff);
|
||||
|
|
|
@ -46,20 +46,23 @@ fn test_various_store_indices() {
|
|||
global::set_mining_mode(ChainTypes::AutomatedTesting);
|
||||
let genesis = pow::mine_genesis_block(None).unwrap();
|
||||
chain_store.save_block(&genesis).unwrap();
|
||||
chain_store.setup_height(&genesis.header, &Tip::new(genesis.hash())).unwrap();
|
||||
chain_store
|
||||
.setup_height(&genesis.header, &Tip::new(genesis.hash()))
|
||||
.unwrap();
|
||||
|
||||
let block = Block::new(
|
||||
&genesis.header,
|
||||
vec![],
|
||||
&keychain,
|
||||
&key_id,
|
||||
Difficulty::one()
|
||||
Difficulty::one(),
|
||||
).unwrap();
|
||||
let block_hash = block.hash();
|
||||
|
||||
chain_store.save_block(&block).unwrap();
|
||||
chain_store.setup_height(&block.header,
|
||||
&Tip::from_block(&block.header)).unwrap();
|
||||
chain_store
|
||||
.setup_height(&block.header, &Tip::from_block(&block.header))
|
||||
.unwrap();
|
||||
|
||||
let block_header = chain_store.get_block_header(&block_hash).unwrap();
|
||||
assert_eq!(block_header.hash(), block_hash);
|
||||
|
|
|
@ -76,13 +76,8 @@ fn test_coinbase_maturity() {
|
|||
let key_id3 = keychain.derive_key_id(3).unwrap();
|
||||
let key_id4 = keychain.derive_key_id(4).unwrap();
|
||||
|
||||
let mut block = core::core::Block::new(
|
||||
&prev,
|
||||
vec![],
|
||||
&keychain,
|
||||
&key_id1,
|
||||
Difficulty::one()
|
||||
).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, vec![], &keychain, &key_id1, Difficulty::one()).unwrap();
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
|
@ -109,7 +104,9 @@ fn test_coinbase_maturity() {
|
|||
// we will need this later when we want to spend the coinbase output
|
||||
let block_hash = block.hash();
|
||||
|
||||
chain.process_block(block.clone(), chain::Options::MINE).unwrap();
|
||||
chain
|
||||
.process_block(block.clone(), chain::Options::MINE)
|
||||
.unwrap();
|
||||
|
||||
let merkle_proof = chain.get_merkle_proof(&out_id, &block).unwrap();
|
||||
|
||||
|
@ -124,26 +121,20 @@ fn test_coinbase_maturity() {
|
|||
// this is not a valid tx as the coinbase output cannot be spent yet
|
||||
let coinbase_txn = build::transaction(
|
||||
vec![
|
||||
build::coinbase_input(
|
||||
amount,
|
||||
block_hash,
|
||||
merkle_proof.clone(),
|
||||
key_id1.clone(),
|
||||
),
|
||||
build::coinbase_input(amount, block_hash, merkle_proof.clone(), key_id1.clone()),
|
||||
build::output(amount - 2, key_id2.clone()),
|
||||
build::with_fee(2),
|
||||
],
|
||||
&keychain,
|
||||
).unwrap();
|
||||
|
||||
let mut block =
|
||||
core::core::Block::new(
|
||||
&prev,
|
||||
vec![&coinbase_txn],
|
||||
&keychain,
|
||||
&key_id3,
|
||||
Difficulty::one(),
|
||||
).unwrap();
|
||||
let mut block = core::core::Block::new(
|
||||
&prev,
|
||||
vec![&coinbase_txn],
|
||||
&keychain,
|
||||
&key_id3,
|
||||
Difficulty::one(),
|
||||
).unwrap();
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
|
@ -169,13 +160,8 @@ fn test_coinbase_maturity() {
|
|||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let pk = keychain.derive_key_id(1).unwrap();
|
||||
|
||||
let mut block = core::core::Block::new(
|
||||
&prev,
|
||||
vec![],
|
||||
&keychain,
|
||||
&pk,
|
||||
Difficulty::one()
|
||||
).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, vec![], &keychain, &pk, Difficulty::one()).unwrap();
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
|
@ -196,12 +182,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let coinbase_txn = build::transaction(
|
||||
vec![
|
||||
build::coinbase_input(
|
||||
amount,
|
||||
block_hash,
|
||||
merkle_proof.clone(),
|
||||
key_id1.clone(),
|
||||
),
|
||||
build::coinbase_input(amount, block_hash, merkle_proof.clone(), key_id1.clone()),
|
||||
build::output(amount - 2, key_id2.clone()),
|
||||
build::with_fee(2),
|
||||
],
|
||||
|
|
|
@ -121,8 +121,8 @@ impl GlobalConfig {
|
|||
}
|
||||
|
||||
// Try to parse the config file if it exists
|
||||
// explode if it does exist but something's wrong
|
||||
// with it
|
||||
// explode if it does exist but something's wrong
|
||||
// with it
|
||||
return_value.read_config()
|
||||
}
|
||||
|
||||
|
@ -164,9 +164,10 @@ impl GlobalConfig {
|
|||
match encoded {
|
||||
Ok(enc) => return Ok(enc),
|
||||
Err(e) => {
|
||||
return Err(ConfigError::SerializationError(
|
||||
String::from(format!("{}", e)),
|
||||
));
|
||||
return Err(ConfigError::SerializationError(String::from(format!(
|
||||
"{}",
|
||||
e
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#![deny(unused_mut)]
|
||||
#![warn(missing_docs)]
|
||||
|
||||
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
|
|
|
@ -45,8 +45,7 @@ impl fmt::Display for ConfigError {
|
|||
ConfigError::ParseError(ref file_name, ref message) => write!(
|
||||
f,
|
||||
"Error parsing configuration file at {} - {}",
|
||||
file_name,
|
||||
message
|
||||
file_name, message
|
||||
),
|
||||
ConfigError::FileIOError(ref file_name, ref message) => {
|
||||
write!(f, "{} {}", message, file_name)
|
||||
|
|
|
@ -29,7 +29,7 @@ impl Summable for TestElem {
|
|||
type Sum = u64;
|
||||
fn sum(&self) -> u64 {
|
||||
// sums are not allowed to overflow, so we use this simple
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
|
||||
+ self.0[3] as u64
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ fn bench_small_tree(b: &mut Bencher) {
|
|||
let mut big_tree = SumTree::new();
|
||||
for i in 0..1000 {
|
||||
// To avoid RNG overflow we generate random elements that are small.
|
||||
// Though to avoid repeat elements they have to be reasonably big.
|
||||
// Though to avoid repeat elements they have to be reasonably big.
|
||||
let new_elem;
|
||||
let word1 = rng.gen::<u16>() as u32;
|
||||
let word2 = rng.gen::<u16>() as u32;
|
||||
|
|
|
@ -91,8 +91,7 @@ pub const MAX_BLOCK_INPUTS: usize = 300_000; // soft fork down when too_high
|
|||
/// Whether a block exceeds the maximum acceptable weight
|
||||
pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool {
|
||||
input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT
|
||||
+ kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT
|
||||
|| input_len > MAX_BLOCK_INPUTS
|
||||
+ kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT || input_len > MAX_BLOCK_INPUTS
|
||||
}
|
||||
|
||||
/// Fork every 250,000 blocks for first 2 years, simple number and just a
|
||||
|
@ -186,21 +185,24 @@ where
|
|||
// Get the difficulty sum for averaging later
|
||||
// Which in this case is the sum of the last
|
||||
// DIFFICULTY_ADJUST_WINDOW elements
|
||||
let diff_sum = diff_data.iter()
|
||||
let diff_sum = diff_data
|
||||
.iter()
|
||||
.skip(MEDIAN_TIME_WINDOW as usize)
|
||||
.take(DIFFICULTY_ADJUST_WINDOW as usize)
|
||||
.fold(Difficulty::zero(), |sum, d| sum + d.clone().unwrap().1);
|
||||
|
||||
// Obtain the median window for the earlier time period
|
||||
// which is just the first MEDIAN_TIME_WINDOW elements
|
||||
let mut window_earliest: Vec<u64> = diff_data.iter()
|
||||
let mut window_earliest: Vec<u64> = diff_data
|
||||
.iter()
|
||||
.take(MEDIAN_TIME_WINDOW as usize)
|
||||
.map(|n| n.clone().unwrap().0)
|
||||
.collect();
|
||||
|
||||
// Obtain the median window for the latest time period
|
||||
// i.e. the last MEDIAN_TIME_WINDOW elements
|
||||
let mut window_latest: Vec<u64> = diff_data.iter()
|
||||
let mut window_latest: Vec<u64> = diff_data
|
||||
.iter()
|
||||
.skip(DIFFICULTY_ADJUST_WINDOW as usize)
|
||||
.map(|n| n.clone().unwrap().0)
|
||||
.collect();
|
||||
|
@ -212,15 +214,14 @@ where
|
|||
let earliest_ts = window_earliest[MEDIAN_TIME_INDEX as usize];
|
||||
|
||||
// Calculate the average difficulty
|
||||
let diff_avg = diff_sum.into_num() /
|
||||
Difficulty::from_num(DIFFICULTY_ADJUST_WINDOW).into_num();
|
||||
let diff_avg = diff_sum.into_num() / Difficulty::from_num(DIFFICULTY_ADJUST_WINDOW).into_num();
|
||||
|
||||
// Actual undampened time delta
|
||||
let ts_delta = latest_ts - earliest_ts;
|
||||
|
||||
// Apply dampening
|
||||
let ts_damp = match diff_avg {
|
||||
n if n >= DAMP_FACTOR => ((DAMP_FACTOR-1) * BLOCK_TIME_WINDOW + ts_delta) / DAMP_FACTOR,
|
||||
n if n >= DAMP_FACTOR => ((DAMP_FACTOR - 1) * BLOCK_TIME_WINDOW + ts_delta) / DAMP_FACTOR,
|
||||
_ => ts_delta,
|
||||
};
|
||||
|
||||
|
@ -233,8 +234,7 @@ where
|
|||
ts_damp
|
||||
};
|
||||
|
||||
let difficulty =
|
||||
diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW).into_num()
|
||||
let difficulty = diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW).into_num()
|
||||
/ Difficulty::from_num(adj_ts).into_num();
|
||||
|
||||
Ok(max(Difficulty::from_num(difficulty), Difficulty::one()))
|
||||
|
|
|
@ -18,26 +18,15 @@ use time;
|
|||
use rand::{thread_rng, Rng};
|
||||
use std::collections::HashSet;
|
||||
|
||||
use core::{
|
||||
Committed,
|
||||
Input,
|
||||
Output,
|
||||
ShortId,
|
||||
SwitchCommitHash,
|
||||
Proof,
|
||||
ProofMessageElements,
|
||||
TxKernel,
|
||||
Transaction,
|
||||
OutputFeatures,
|
||||
KernelFeatures
|
||||
};
|
||||
use core::{Committed, Input, KernelFeatures, Output, OutputFeatures, Proof, ProofMessageElements,
|
||||
ShortId, SwitchCommitHash, Transaction, TxKernel};
|
||||
use consensus;
|
||||
use consensus::{exceeds_weight, reward, REWARD, VerifySortOrder};
|
||||
use consensus::{exceeds_weight, reward, VerifySortOrder, REWARD};
|
||||
use core::hash::{Hash, Hashed, ZERO_HASH};
|
||||
use core::id::ShortIdentifiable;
|
||||
use core::target::Difficulty;
|
||||
use core::transaction;
|
||||
use ser::{self, Readable, Reader, Writeable, Writer, WriteableSorted, read_and_verify_sorted};
|
||||
use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
|
||||
use global;
|
||||
use keychain;
|
||||
use keychain::BlindingFactor;
|
||||
|
@ -61,7 +50,8 @@ pub enum Error {
|
|||
KernelLockHeight(u64),
|
||||
/// Underlying tx related error
|
||||
Transaction(transaction::Error),
|
||||
/// Underlying Secp256k1 error (signature validation or invalid public key typically)
|
||||
/// Underlying Secp256k1 error (signature validation or invalid public key
|
||||
/// typically)
|
||||
Secp(secp::Error),
|
||||
/// Underlying keychain related error
|
||||
Keychain(keychain::Error),
|
||||
|
@ -69,15 +59,17 @@ pub enum Error {
|
|||
Consensus(consensus::Error),
|
||||
/// Coinbase has not yet matured and cannot be spent (1,000 blocks)
|
||||
ImmatureCoinbase {
|
||||
/// The height of the block containing the input spending the coinbase output
|
||||
/// The height of the block containing the input spending the coinbase
|
||||
/// output
|
||||
height: u64,
|
||||
/// The lock_height needed to be reached for the coinbase output to mature
|
||||
/// The lock_height needed to be reached for the coinbase output to
|
||||
/// mature
|
||||
lock_height: u64,
|
||||
},
|
||||
/// Underlying Merkle proof error
|
||||
MerkleProof,
|
||||
/// Other unspecified error condition
|
||||
Other(String)
|
||||
Other(String),
|
||||
}
|
||||
|
||||
impl From<transaction::Error> for Error {
|
||||
|
@ -129,7 +121,8 @@ pub struct BlockHeader {
|
|||
pub difficulty: Difficulty,
|
||||
/// Total accumulated difficulty since genesis block
|
||||
pub total_difficulty: Difficulty,
|
||||
/// The single aggregate "offset" that needs to be applied for all commitments to sum
|
||||
/// The single aggregate "offset" that needs to be applied for all
|
||||
/// commitments to sum
|
||||
pub kernel_offset: BlindingFactor,
|
||||
}
|
||||
|
||||
|
@ -229,7 +222,8 @@ pub struct CompactBlock {
|
|||
pub out_full: Vec<Output>,
|
||||
/// List of full kernels - specifically the coinbase kernel(s)
|
||||
pub kern_full: Vec<TxKernel>,
|
||||
/// List of transaction kernels, excluding those in the full list (short_ids)
|
||||
/// List of transaction kernels, excluding those in the full list
|
||||
/// (short_ids)
|
||||
pub kern_ids: Vec<ShortId>,
|
||||
}
|
||||
|
||||
|
@ -254,7 +248,8 @@ impl Writeable for CompactBlock {
|
|||
let mut kern_full = self.kern_full.clone();
|
||||
let mut kern_ids = self.kern_ids.clone();
|
||||
|
||||
// Consensus rule that everything is sorted in lexicographical order on the wire.
|
||||
// Consensus rule that everything is sorted in lexicographical order on the
|
||||
// wire.
|
||||
try!(out_full.write_sorted(writer));
|
||||
try!(kern_full.write_sorted(writer));
|
||||
try!(kern_ids.write_sorted(writer));
|
||||
|
@ -298,7 +293,8 @@ pub struct Block {
|
|||
pub inputs: Vec<Input>,
|
||||
/// List of transaction outputs
|
||||
pub outputs: Vec<Output>,
|
||||
/// List of kernels with associated proofs (note these are offset from tx_kernels)
|
||||
/// List of kernels with associated proofs (note these are offset from
|
||||
/// tx_kernels)
|
||||
pub kernels: Vec<TxKernel>,
|
||||
}
|
||||
|
||||
|
@ -321,7 +317,8 @@ impl Writeable for Block {
|
|||
let mut outputs = self.outputs.clone();
|
||||
let mut kernels = self.kernels.clone();
|
||||
|
||||
// Consensus rule that everything is sorted in lexicographical order on the wire.
|
||||
// Consensus rule that everything is sorted in lexicographical order on the
|
||||
// wire.
|
||||
try!(inputs.write_sorted(writer));
|
||||
try!(outputs.write_sorted(writer));
|
||||
try!(kernels.write_sorted(writer));
|
||||
|
@ -394,12 +391,8 @@ impl Block {
|
|||
difficulty: Difficulty,
|
||||
) -> Result<Block, Error> {
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let (reward_out, reward_proof) = Block::reward_output(
|
||||
keychain,
|
||||
key_id,
|
||||
fees,
|
||||
prev.height + 1,
|
||||
)?;
|
||||
let (reward_out, reward_proof) =
|
||||
Block::reward_output(keychain, key_id, fees, prev.height + 1)?;
|
||||
let block = Block::with_reward(prev, txs, reward_out, reward_proof, difficulty)?;
|
||||
Ok(block)
|
||||
}
|
||||
|
@ -544,9 +537,7 @@ impl Block {
|
|||
.iter()
|
||||
.cloned()
|
||||
.filter(|x| *x != BlindingFactor::zero())
|
||||
.filter_map(|x| {
|
||||
x.secret_key(&secp).ok()
|
||||
})
|
||||
.filter_map(|x| x.secret_key(&secp).ok())
|
||||
.collect::<Vec<_>>();
|
||||
if keys.is_empty() {
|
||||
BlindingFactor::zero()
|
||||
|
@ -557,25 +548,22 @@ impl Block {
|
|||
}
|
||||
};
|
||||
|
||||
Ok(
|
||||
Block {
|
||||
header: BlockHeader {
|
||||
height: prev.height + 1,
|
||||
timestamp: time::Tm {
|
||||
tm_nsec: 0,
|
||||
..time::now_utc()
|
||||
},
|
||||
previous: prev.hash(),
|
||||
total_difficulty: difficulty +
|
||||
prev.total_difficulty.clone(),
|
||||
kernel_offset: kernel_offset,
|
||||
..Default::default()
|
||||
Ok(Block {
|
||||
header: BlockHeader {
|
||||
height: prev.height + 1,
|
||||
timestamp: time::Tm {
|
||||
tm_nsec: 0,
|
||||
..time::now_utc()
|
||||
},
|
||||
inputs: inputs,
|
||||
outputs: outputs,
|
||||
kernels: kernels,
|
||||
}.cut_through(),
|
||||
)
|
||||
previous: prev.hash(),
|
||||
total_difficulty: difficulty + prev.total_difficulty.clone(),
|
||||
kernel_offset: kernel_offset,
|
||||
..Default::default()
|
||||
},
|
||||
inputs: inputs,
|
||||
outputs: outputs,
|
||||
kernels: kernels,
|
||||
}.cut_through())
|
||||
}
|
||||
|
||||
/// Blockhash, computed using only the header
|
||||
|
@ -702,10 +690,7 @@ impl Block {
|
|||
|
||||
// sum all kernels commitments
|
||||
let kernel_sum = {
|
||||
let mut kernel_commits = self.kernels
|
||||
.iter()
|
||||
.map(|x| x.excess)
|
||||
.collect::<Vec<_>>();
|
||||
let mut kernel_commits = self.kernels.iter().map(|x| x.excess).collect::<Vec<_>>();
|
||||
|
||||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
|
@ -763,10 +748,7 @@ impl Block {
|
|||
cb_outs.iter().map(|x| x.commitment()).collect(),
|
||||
vec![over_commit],
|
||||
)?;
|
||||
kerns_sum = secp.commit_sum(
|
||||
cb_kerns.iter().map(|x| x.excess).collect(),
|
||||
vec![],
|
||||
)?;
|
||||
kerns_sum = secp.commit_sum(cb_kerns.iter().map(|x| x.excess).collect(), vec![])?;
|
||||
}
|
||||
|
||||
if kerns_sum != out_adjust_sum {
|
||||
|
@ -775,7 +757,8 @@ impl Block {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Builds the blinded output and related signature proof for the block reward.
|
||||
/// Builds the blinded output and related signature proof for the block
|
||||
/// reward.
|
||||
pub fn reward_output(
|
||||
keychain: &keychain::Keychain,
|
||||
key_id: &keychain::Identifier,
|
||||
|
@ -784,11 +767,8 @@ impl Block {
|
|||
) -> Result<(Output, TxKernel), keychain::Error> {
|
||||
let commit = keychain.commit(reward(fees), key_id)?;
|
||||
let switch_commit = keychain.switch_commit(key_id)?;
|
||||
let switch_commit_hash = SwitchCommitHash::from_switch_commit(
|
||||
switch_commit,
|
||||
keychain,
|
||||
key_id,
|
||||
);
|
||||
let switch_commit_hash =
|
||||
SwitchCommitHash::from_switch_commit(switch_commit, keychain, key_id);
|
||||
|
||||
trace!(
|
||||
LOGGER,
|
||||
|
@ -803,11 +783,15 @@ impl Block {
|
|||
);
|
||||
|
||||
let value = reward(fees);
|
||||
let msg = (ProofMessageElements {
|
||||
value: value
|
||||
}).to_proof_message();
|
||||
let msg = (ProofMessageElements { value: value }).to_proof_message();
|
||||
|
||||
let rproof = keychain.range_proof(value, key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg)?;
|
||||
let rproof = keychain.range_proof(
|
||||
value,
|
||||
key_id,
|
||||
commit,
|
||||
Some(switch_commit_hash.as_ref().to_vec()),
|
||||
msg,
|
||||
)?;
|
||||
|
||||
let output = Output {
|
||||
features: OutputFeatures::COINBASE_OUTPUT,
|
||||
|
@ -826,7 +810,8 @@ impl Block {
|
|||
// For a coinbase output the fee is 0 and the lock_height is
|
||||
// the lock_height of the coinbase output itself,
|
||||
// not the lock_height of the tx (there is no tx for a coinbase output).
|
||||
// This output will not be spendable earlier than lock_height (and we sign this here).
|
||||
// This output will not be spendable earlier than lock_height (and we sign this
|
||||
// here).
|
||||
let msg = secp::Message::from_slice(&kernel_sig_msg(0, height))?;
|
||||
let sig = keychain.aggsig_sign_from_key_id(&msg, &key_id)?;
|
||||
|
||||
|
@ -850,7 +835,7 @@ mod test {
|
|||
use core::build::{self, input, output, with_fee};
|
||||
use core::test::{tx1i2o, tx2i1o};
|
||||
use keychain::{Identifier, Keychain};
|
||||
use consensus::{MAX_BLOCK_WEIGHT, BLOCK_OUTPUT_WEIGHT};
|
||||
use consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
|
||||
use std::time::Instant;
|
||||
|
||||
use util::secp;
|
||||
|
@ -864,7 +849,7 @@ mod test {
|
|||
txs,
|
||||
keychain,
|
||||
&key_id,
|
||||
Difficulty::one()
|
||||
Difficulty::one(),
|
||||
).unwrap()
|
||||
}
|
||||
|
||||
|
@ -901,8 +886,7 @@ mod test {
|
|||
|
||||
let now = Instant::now();
|
||||
parts.append(&mut vec![input(500000, pks.pop().unwrap()), with_fee(2)]);
|
||||
let mut tx = build::transaction(parts, &keychain)
|
||||
.unwrap();
|
||||
let mut tx = build::transaction(parts, &keychain).unwrap();
|
||||
println!("Build tx: {}", now.elapsed().as_secs());
|
||||
|
||||
let b = new_block(vec![&mut tx], &keychain);
|
||||
|
@ -924,7 +908,6 @@ mod test {
|
|||
b.verify_coinbase(),
|
||||
Err(Error::Secp(secp::Error::IncorrectCommitSum))
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -989,19 +972,19 @@ mod test {
|
|||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let mut b = new_block(vec![], &keychain);
|
||||
|
||||
assert!(b.outputs[0].features.contains(OutputFeatures::COINBASE_OUTPUT));
|
||||
b.outputs[0].features.remove(OutputFeatures::COINBASE_OUTPUT);
|
||||
|
||||
assert_eq!(
|
||||
b.verify_coinbase(),
|
||||
Err(Error::CoinbaseSumMismatch)
|
||||
assert!(
|
||||
b.outputs[0]
|
||||
.features
|
||||
.contains(OutputFeatures::COINBASE_OUTPUT)
|
||||
);
|
||||
b.outputs[0]
|
||||
.features
|
||||
.remove(OutputFeatures::COINBASE_OUTPUT);
|
||||
|
||||
assert_eq!(b.verify_coinbase(), Err(Error::CoinbaseSumMismatch));
|
||||
assert_eq!(b.verify_kernels(), Ok(()));
|
||||
|
||||
assert_eq!(
|
||||
b.validate(),
|
||||
Err(Error::CoinbaseSumMismatch)
|
||||
);
|
||||
assert_eq!(b.validate(), Err(Error::CoinbaseSumMismatch));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1011,8 +994,14 @@ mod test {
|
|||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let mut b = new_block(vec![], &keychain);
|
||||
|
||||
assert!(b.kernels[0].features.contains(KernelFeatures::COINBASE_KERNEL));
|
||||
b.kernels[0].features.remove(KernelFeatures::COINBASE_KERNEL);
|
||||
assert!(
|
||||
b.kernels[0]
|
||||
.features
|
||||
.contains(KernelFeatures::COINBASE_KERNEL)
|
||||
);
|
||||
b.kernels[0]
|
||||
.features
|
||||
.remove(KernelFeatures::COINBASE_KERNEL);
|
||||
|
||||
assert_eq!(
|
||||
b.verify_coinbase(),
|
||||
|
@ -1047,10 +1036,7 @@ mod test {
|
|||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 1_256;
|
||||
assert_eq!(
|
||||
vec.len(),
|
||||
target_len,
|
||||
);
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1061,10 +1047,7 @@ mod test {
|
|||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 2_900;
|
||||
assert_eq!(
|
||||
vec.len(),
|
||||
target_len,
|
||||
);
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1074,10 +1057,7 @@ mod test {
|
|||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
|
||||
let target_len = 1_264;
|
||||
assert_eq!(
|
||||
vec.len(),
|
||||
target_len,
|
||||
);
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1088,10 +1068,7 @@ mod test {
|
|||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
|
||||
let target_len = 1_270;
|
||||
assert_eq!(
|
||||
vec.len(),
|
||||
target_len,
|
||||
);
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1104,17 +1081,11 @@ mod test {
|
|||
txs.push(tx);
|
||||
}
|
||||
|
||||
let b = new_block(
|
||||
txs.iter().collect(),
|
||||
&keychain,
|
||||
);
|
||||
let b = new_block(txs.iter().collect(), &keychain);
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b).expect("serialization failed");
|
||||
let target_len = 17_696;
|
||||
assert_eq!(
|
||||
vec.len(),
|
||||
target_len,
|
||||
);
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1127,17 +1098,11 @@ mod test {
|
|||
txs.push(tx);
|
||||
}
|
||||
|
||||
let b = new_block(
|
||||
txs.iter().collect(),
|
||||
&keychain,
|
||||
);
|
||||
let b = new_block(txs.iter().collect(), &keychain);
|
||||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
|
||||
let target_len = 1_324;
|
||||
assert_eq!(
|
||||
vec.len(),
|
||||
target_len,
|
||||
);
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1158,8 +1123,14 @@ mod test {
|
|||
|
||||
// check we can identify the specified kernel from the short_id
|
||||
// correctly in both of the compact_blocks
|
||||
assert_eq!(cb1.kern_ids[0], tx.kernels[0].short_id(&cb1.hash(), cb1.nonce));
|
||||
assert_eq!(cb2.kern_ids[0], tx.kernels[0].short_id(&cb2.hash(), cb2.nonce));
|
||||
assert_eq!(
|
||||
cb1.kern_ids[0],
|
||||
tx.kernels[0].short_id(&cb1.hash(), cb1.nonce)
|
||||
);
|
||||
assert_eq!(
|
||||
cb2.kern_ids[0],
|
||||
tx.kernels[0].short_id(&cb2.hash(), cb2.nonce)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -25,13 +25,14 @@
|
|||
//! build::transaction(vec![input_rand(75), output_rand(42), output_rand(32),
|
||||
//! with_fee(1)])
|
||||
|
||||
use util::{secp, kernel_sig_msg};
|
||||
use util::{kernel_sig_msg, secp};
|
||||
|
||||
use core::{Transaction, TxKernel, Input, Output, OutputFeatures, ProofMessageElements, SwitchCommitHash};
|
||||
use core::{Input, Output, OutputFeatures, ProofMessageElements, SwitchCommitHash, Transaction,
|
||||
TxKernel};
|
||||
use core::hash::Hash;
|
||||
use core::pmmr::MerkleProof;
|
||||
use keychain;
|
||||
use keychain::{Keychain, BlindSum, BlindingFactor, Identifier};
|
||||
use keychain::{BlindSum, BlindingFactor, Identifier, Keychain};
|
||||
use util::LOGGER;
|
||||
|
||||
/// Context information available to transaction combinators.
|
||||
|
@ -41,7 +42,8 @@ pub struct Context<'a> {
|
|||
|
||||
/// Function type returned by the transaction combinators. Transforms a
|
||||
/// (Transaction, BlindSum) pair into another, provided some context.
|
||||
pub type Append = for<'a> Fn(&'a mut Context, (Transaction, TxKernel, BlindSum)) -> (Transaction, TxKernel, BlindSum);
|
||||
pub type Append = for<'a> Fn(&'a mut Context, (Transaction, TxKernel, BlindSum))
|
||||
-> (Transaction, TxKernel, BlindSum);
|
||||
|
||||
/// Adds an input with the provided value and blinding key to the transaction
|
||||
/// being built.
|
||||
|
@ -52,25 +54,22 @@ fn build_input(
|
|||
merkle_proof: Option<MerkleProof>,
|
||||
key_id: Identifier,
|
||||
) -> Box<Append> {
|
||||
Box::new(move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
let commit = build.keychain.commit(value, &key_id).unwrap();
|
||||
let input = Input::new(
|
||||
features,
|
||||
commit,
|
||||
block_hash.clone(),
|
||||
merkle_proof.clone(),
|
||||
);
|
||||
(tx.with_input(input), kern, sum.sub_key_id(key_id.clone()))
|
||||
})
|
||||
Box::new(
|
||||
move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
let commit = build.keychain.commit(value, &key_id).unwrap();
|
||||
let input = Input::new(features, commit, block_hash.clone(), merkle_proof.clone());
|
||||
(tx.with_input(input), kern, sum.sub_key_id(key_id.clone()))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Adds an input with the provided value and blinding key to the transaction
|
||||
/// being built.
|
||||
pub fn input(
|
||||
value: u64,
|
||||
key_id: Identifier,
|
||||
) -> Box<Append> {
|
||||
debug!(LOGGER, "Building input (spending regular output): {}, {}", value, key_id);
|
||||
pub fn input(value: u64, key_id: Identifier) -> Box<Append> {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Building input (spending regular output): {}, {}", value, key_id
|
||||
);
|
||||
build_input(value, OutputFeatures::DEFAULT_OUTPUT, None, None, key_id)
|
||||
}
|
||||
|
||||
|
@ -82,90 +81,105 @@ pub fn coinbase_input(
|
|||
merkle_proof: MerkleProof,
|
||||
key_id: Identifier,
|
||||
) -> Box<Append> {
|
||||
debug!(LOGGER, "Building input (spending coinbase): {}, {}", value, key_id);
|
||||
build_input(value, OutputFeatures::COINBASE_OUTPUT, Some(block_hash), Some(merkle_proof), key_id)
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Building input (spending coinbase): {}, {}", value, key_id
|
||||
);
|
||||
build_input(
|
||||
value,
|
||||
OutputFeatures::COINBASE_OUTPUT,
|
||||
Some(block_hash),
|
||||
Some(merkle_proof),
|
||||
key_id,
|
||||
)
|
||||
}
|
||||
|
||||
/// Adds an output with the provided value and key identifier from the
|
||||
/// keychain.
|
||||
pub fn output(value: u64, key_id: Identifier) -> Box<Append> {
|
||||
Box::new(move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Building an output: {}, {}",
|
||||
value,
|
||||
key_id,
|
||||
);
|
||||
Box::new(
|
||||
move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
debug!(LOGGER, "Building an output: {}, {}", value, key_id,);
|
||||
|
||||
let commit = build.keychain.commit(value, &key_id).unwrap();
|
||||
let switch_commit = build.keychain.switch_commit(&key_id).unwrap();
|
||||
let switch_commit_hash = SwitchCommitHash::from_switch_commit(
|
||||
switch_commit,
|
||||
build.keychain,
|
||||
&key_id,
|
||||
);
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Builder - Pedersen Commit is: {:?}, Switch Commit is: {:?}",
|
||||
commit,
|
||||
switch_commit,
|
||||
);
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Builder - Switch Commit Hash is: {:?}",
|
||||
switch_commit_hash
|
||||
);
|
||||
let commit = build.keychain.commit(value, &key_id).unwrap();
|
||||
let switch_commit = build.keychain.switch_commit(&key_id).unwrap();
|
||||
let switch_commit_hash =
|
||||
SwitchCommitHash::from_switch_commit(switch_commit, build.keychain, &key_id);
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Builder - Pedersen Commit is: {:?}, Switch Commit is: {:?}",
|
||||
commit,
|
||||
switch_commit,
|
||||
);
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Builder - Switch Commit Hash is: {:?}",
|
||||
switch_commit_hash
|
||||
);
|
||||
|
||||
let msg = (ProofMessageElements {
|
||||
value: value,
|
||||
}).to_proof_message();
|
||||
let msg = (ProofMessageElements { value: value }).to_proof_message();
|
||||
|
||||
let rproof = build
|
||||
.keychain
|
||||
.range_proof(value, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg)
|
||||
.unwrap();
|
||||
let rproof = build
|
||||
.keychain
|
||||
.range_proof(
|
||||
value,
|
||||
&key_id,
|
||||
commit,
|
||||
Some(switch_commit_hash.as_ref().to_vec()),
|
||||
msg,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
(
|
||||
tx.with_output(Output {
|
||||
features: OutputFeatures::DEFAULT_OUTPUT,
|
||||
commit: commit,
|
||||
switch_commit_hash: switch_commit_hash,
|
||||
proof: rproof,
|
||||
}),
|
||||
kern,
|
||||
sum.add_key_id(key_id.clone()),
|
||||
)
|
||||
})
|
||||
(
|
||||
tx.with_output(Output {
|
||||
features: OutputFeatures::DEFAULT_OUTPUT,
|
||||
commit: commit,
|
||||
switch_commit_hash: switch_commit_hash,
|
||||
proof: rproof,
|
||||
}),
|
||||
kern,
|
||||
sum.add_key_id(key_id.clone()),
|
||||
)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Sets the fee on the transaction being built.
|
||||
pub fn with_fee(fee: u64) -> Box<Append> {
|
||||
Box::new(move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx, kern.with_fee(fee), sum)
|
||||
})
|
||||
Box::new(
|
||||
move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx, kern.with_fee(fee), sum)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Sets the lock_height on the transaction being built.
|
||||
pub fn with_lock_height(lock_height: u64) -> Box<Append> {
|
||||
Box::new(move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx, kern.with_lock_height(lock_height), sum)
|
||||
})
|
||||
Box::new(
|
||||
move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx, kern.with_lock_height(lock_height), sum)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Adds a known excess value on the transaction being built. Usually used in
|
||||
/// combination with the initial_tx function when a new transaction is built
|
||||
/// by adding to a pre-existing one.
|
||||
pub fn with_excess(excess: BlindingFactor) -> Box<Append> {
|
||||
Box::new(move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx, kern, sum.add_blinding_factor(excess.clone()))
|
||||
})
|
||||
Box::new(
|
||||
move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx, kern, sum.add_blinding_factor(excess.clone()))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Sets a known tx "offset". Used in final step of tx construction.
|
||||
pub fn with_offset(offset: BlindingFactor) -> Box<Append> {
|
||||
Box::new(move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx.with_offset(offset), kern, sum)
|
||||
})
|
||||
Box::new(
|
||||
move |_build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx.with_offset(offset), kern, sum)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Sets an initial transaction to add to when building a new transaction.
|
||||
|
@ -173,9 +187,11 @@ pub fn with_offset(offset: BlindingFactor) -> Box<Append> {
|
|||
pub fn initial_tx(mut tx: Transaction) -> Box<Append> {
|
||||
assert_eq!(tx.kernels.len(), 1);
|
||||
let kern = tx.kernels.remove(0);
|
||||
Box::new(move |_build, (_, _, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx.clone(), kern.clone(), sum)
|
||||
})
|
||||
Box::new(
|
||||
move |_build, (_, _, sum)| -> (Transaction, TxKernel, BlindSum) {
|
||||
(tx.clone(), kern.clone(), sum)
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Builds a new transaction by combining all the combinators provided in a
|
||||
|
|
|
@ -177,7 +177,9 @@ impl HashWriter {
|
|||
|
||||
impl Default for HashWriter {
|
||||
fn default() -> HashWriter {
|
||||
HashWriter { state: Blake2b::new(32) }
|
||||
HashWriter {
|
||||
state: Blake2b::new(32),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -225,9 +227,10 @@ impl<T: Writeable> consensus::VerifySortOrder<T> for Vec<T> {
|
|||
.map(|item| item.hash())
|
||||
.collect::<Vec<_>>()
|
||||
.windows(2)
|
||||
.any(|pair| pair[0] > pair[1]) {
|
||||
true => Err(consensus::Error::SortError),
|
||||
false => Ok(()),
|
||||
}
|
||||
.any(|pair| pair[0] > pair[1])
|
||||
{
|
||||
true => Err(consensus::Error::SortError),
|
||||
false => Ok(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,15 +16,14 @@
|
|||
|
||||
use std::cmp::min;
|
||||
|
||||
use byteorder::{LittleEndian, ByteOrder};
|
||||
use byteorder::{ByteOrder, LittleEndian};
|
||||
use siphasher::sip::SipHasher24;
|
||||
|
||||
use core::hash::{Hash, Hashed};
|
||||
use ser;
|
||||
use ser::{Reader, Readable, Writer, Writeable};
|
||||
use ser::{Readable, Reader, Writeable, Writer};
|
||||
use util;
|
||||
|
||||
|
||||
/// The size of a short id used to identify inputs|outputs|kernels (6 bytes)
|
||||
pub const SHORT_ID_SIZE: usize = 6;
|
||||
|
||||
|
@ -62,7 +61,8 @@ impl<H: Hashed> ShortIdentifiable for H {
|
|||
sip_hasher.write(&self.hash().to_vec()[..]);
|
||||
let res = sip_hasher.finish();
|
||||
|
||||
// construct a short_id from the resulting bytes (dropping the 2 most significant bytes)
|
||||
// construct a short_id from the resulting bytes (dropping the 2 most
|
||||
// significant bytes)
|
||||
let mut buf = [0; 8];
|
||||
LittleEndian::write_u64(&mut buf, res);
|
||||
ShortId::from_bytes(&buf[0..6])
|
||||
|
@ -131,7 +131,6 @@ mod test {
|
|||
use super::*;
|
||||
use ser::{Writeable, Writer};
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_short_id() {
|
||||
// minimal struct for testing
|
||||
|
@ -152,7 +151,10 @@ mod test {
|
|||
assert_eq!(foo.hash(), expected_hash);
|
||||
|
||||
let other_hash = Hash::zero();
|
||||
assert_eq!(foo.short_id(&other_hash, foo.0), ShortId::from_hex("4cc808b62476").unwrap());
|
||||
assert_eq!(
|
||||
foo.short_id(&other_hash, foo.0),
|
||||
ShortId::from_hex("4cc808b62476").unwrap()
|
||||
);
|
||||
|
||||
let foo = Foo(5);
|
||||
let expected_hash = Hash::from_hex(
|
||||
|
@ -161,7 +163,10 @@ mod test {
|
|||
assert_eq!(foo.hash(), expected_hash);
|
||||
|
||||
let other_hash = Hash::zero();
|
||||
assert_eq!(foo.short_id(&other_hash, foo.0), ShortId::from_hex("02955a094534").unwrap());
|
||||
assert_eq!(
|
||||
foo.short_id(&other_hash, foo.0),
|
||||
ShortId::from_hex("02955a094534").unwrap()
|
||||
);
|
||||
|
||||
let foo = Foo(5);
|
||||
let expected_hash = Hash::from_hex(
|
||||
|
@ -172,6 +177,9 @@ mod test {
|
|||
let other_hash = Hash::from_hex(
|
||||
"81e47a19e6b29b0a65b9591762ce5143ed30d0261e5d24a3201752506b20f15c",
|
||||
).unwrap();
|
||||
assert_eq!(foo.short_id(&other_hash, foo.0), ShortId::from_hex("3e9cde72a687").unwrap());
|
||||
assert_eq!(
|
||||
foo.short_id(&other_hash, foo.0),
|
||||
ShortId::from_hex("3e9cde72a687").unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -264,10 +264,7 @@ mod test {
|
|||
let mut vec = Vec::new();
|
||||
ser::serialize(&mut vec, &tx).expect("serialization failed");
|
||||
let target_len = 986;
|
||||
assert_eq!(
|
||||
vec.len(),
|
||||
target_len,
|
||||
);
|
||||
assert_eq!(vec.len(), target_len,);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -392,8 +389,8 @@ mod test {
|
|||
let btx = tx2i1o();
|
||||
assert!(btx.validate().is_ok());
|
||||
|
||||
// Ignored for bullet proofs, because calling range_proof_info
|
||||
// with a bullet proof causes painful errors
|
||||
// Ignored for bullet proofs, because calling range_proof_info
|
||||
// with a bullet proof causes painful errors
|
||||
|
||||
// checks that the range proof on our blind output is sufficiently hiding
|
||||
let Output { proof, .. } = btx.outputs[0];
|
||||
|
@ -423,12 +420,13 @@ mod test {
|
|||
// let key_id2 = keychain.derive_key_id(2).unwrap();
|
||||
// let key_id3 = keychain.derive_key_id(3).unwrap();
|
||||
// let key_id4 = keychain.derive_key_id(4).unwrap();
|
||||
//
|
||||
//
|
||||
// let (tx_alice, blind_sum) = {
|
||||
// // Alice gets 2 of her pre-existing outputs to send 5 coins to Bob, they
|
||||
// // become inputs in the new transaction
|
||||
// let (in1, in2) = (input(4, ZERO_HASH, key_id1), input(3, ZERO_HASH, key_id2));
|
||||
//
|
||||
// let (in1, in2) = (input(4, ZERO_HASH, key_id1), input(3, ZERO_HASH,
|
||||
// key_id2));
|
||||
//
|
||||
// // Alice builds her transaction, with change, which also produces the sum
|
||||
// // of blinding factors before they're obscured.
|
||||
// let (tx, sum) = build::partial_transaction(
|
||||
|
@ -436,21 +434,21 @@ mod test {
|
|||
// with_fee(2)],
|
||||
// &keychain,
|
||||
// ).unwrap();
|
||||
//
|
||||
//
|
||||
// (tx, sum)
|
||||
// };
|
||||
//
|
||||
//
|
||||
// let blind = blind_sum.secret_key(&keychain.secp())?;
|
||||
// keychain.aggsig_create_context(blind);
|
||||
// let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys();
|
||||
//
|
||||
//
|
||||
// let sig_part = keychain.aggsig_calculate_partial_sig(
|
||||
// &pub_nonce,
|
||||
// tx.fee(),
|
||||
// tx.lock_height(),
|
||||
// ).unwrap();
|
||||
//
|
||||
//
|
||||
//
|
||||
//
|
||||
// // From now on, Bob only has the obscured transaction and the sum of
|
||||
// // blinding factors. He adds his output, finalizes the transaction so it's
|
||||
// // ready for broadcast.
|
||||
|
@ -462,9 +460,9 @@ mod test {
|
|||
// ],
|
||||
// &keychain,
|
||||
// ).unwrap();
|
||||
//
|
||||
//
|
||||
// tx_final.validate().unwrap();
|
||||
//
|
||||
//
|
||||
// }
|
||||
|
||||
/// Simulate the standard exchange between 2 parties when creating a basic
|
||||
|
@ -485,8 +483,7 @@ mod test {
|
|||
// Alice builds her transaction, with change, which also produces the sum
|
||||
// of blinding factors before they're obscured.
|
||||
let (tx, sum) = build::partial_transaction(
|
||||
vec![in1, in2, output(1, key_id3),
|
||||
with_fee(2)],
|
||||
vec![in1, in2, output(1, key_id3), with_fee(2)],
|
||||
&keychain,
|
||||
).unwrap();
|
||||
|
||||
|
@ -567,8 +564,8 @@ mod test {
|
|||
let key_id2 = keychain.derive_key_id(2).unwrap();
|
||||
let key_id3 = keychain.derive_key_id(3).unwrap();
|
||||
|
||||
// first check we can add a timelocked tx where lock height matches current block height
|
||||
// and that the resulting block is valid
|
||||
// first check we can add a timelocked tx where lock height matches current
|
||||
// block height and that the resulting block is valid
|
||||
let tx1 = build::transaction(
|
||||
vec![
|
||||
input(5, key_id1.clone()),
|
||||
|
@ -588,7 +585,8 @@ mod test {
|
|||
).unwrap();
|
||||
b.validate().unwrap();
|
||||
|
||||
// now try adding a timelocked tx where lock height is greater than current block height
|
||||
// now try adding a timelocked tx where lock height is greater than current
|
||||
// block height
|
||||
let tx1 = build::transaction(
|
||||
vec![
|
||||
input(5, key_id1.clone()),
|
||||
|
|
|
@ -30,10 +30,10 @@
|
|||
//! binary operations, they're extremely fast. For more information, see the
|
||||
//! doc on bintree_jump_left_sibling.
|
||||
//! 2. The implementation of a prunable MMR tree using the above. Each leaf
|
||||
//! is required to be Writeable (which implements Hashed). Tree roots can be trivially and
|
||||
//! efficiently calculated without materializing the full tree. The underlying
|
||||
//! Hashes are stored in a Backend implementation that can either be
|
||||
//! a simple Vec or a database.
|
||||
//! is required to be Writeable (which implements Hashed). Tree roots can be
|
||||
//! trivially and efficiently calculated without materializing the full tree.
|
||||
//! The underlying Hashes are stored in a Backend implementation that can
|
||||
//! either be a simple Vec or a database.
|
||||
|
||||
use std::clone::Clone;
|
||||
use std::marker::PhantomData;
|
||||
|
@ -48,8 +48,10 @@ use util::LOGGER;
|
|||
/// The PMMR itself does not need the Backend to be accurate on the existence
|
||||
/// of an element (i.e. remove could be a no-op) but layers above can
|
||||
/// depend on an accurate Backend to check existence.
|
||||
pub trait Backend<T> where
|
||||
T:PMMRable {
|
||||
pub trait Backend<T>
|
||||
where
|
||||
T: PMMRable,
|
||||
{
|
||||
/// Append the provided Hashes to the backend storage, and optionally an associated
|
||||
/// data element to flatfile storage (for leaf nodes only). The position of the
|
||||
/// first element of the Vec in the MMR is provided to help the implementation.
|
||||
|
@ -65,7 +67,8 @@ pub trait Backend<T> where
|
|||
/// also return the associated data element
|
||||
fn get(&self, position: u64, include_data: bool) -> Option<(Hash, Option<T>)>;
|
||||
|
||||
/// Get a Hash/Element by original insertion position (ignoring the remove list).
|
||||
/// Get a Hash/Element by original insertion position (ignoring the remove
|
||||
/// list).
|
||||
fn get_from_file(&self, position: u64) -> Option<Hash>;
|
||||
|
||||
/// Remove HashSums by insertion position. An index is also provided so the
|
||||
|
@ -97,7 +100,8 @@ pub struct MerkleProof {
|
|||
pub peaks: Vec<Hash>,
|
||||
/// The siblings along the path of the tree as we traverse from node to peak
|
||||
pub path: Vec<Hash>,
|
||||
/// Order of siblings (left vs right) matters, so track this here for each path element
|
||||
/// Order of siblings (left vs right) matters, so track this here for each
|
||||
/// path element
|
||||
pub left_right: Vec<bool>,
|
||||
}
|
||||
|
||||
|
@ -108,7 +112,6 @@ impl Writeable for MerkleProof {
|
|||
[write_fixed_bytes, &self.root],
|
||||
[write_fixed_bytes, &self.node],
|
||||
[write_u64, self.peaks.len() as u64],
|
||||
|
||||
// note: path length used for both path and left_right vecs
|
||||
[write_u64, self.path.len() as u64]
|
||||
);
|
||||
|
@ -134,8 +137,7 @@ impl Readable for MerkleProof {
|
|||
let root = Hash::read(reader)?;
|
||||
let node = Hash::read(reader)?;
|
||||
|
||||
let (peaks_len, path_len) =
|
||||
ser_multiread!(reader, read_u64, read_u64);
|
||||
let (peaks_len, path_len) = ser_multiread!(reader, read_u64, read_u64);
|
||||
|
||||
let mut peaks = Vec::with_capacity(peaks_len as usize);
|
||||
for _ in 0..peaks_len {
|
||||
|
@ -148,15 +150,13 @@ impl Readable for MerkleProof {
|
|||
|
||||
let left_right_bytes = reader.read_fixed_bytes(path_len as usize)?;
|
||||
let left_right = left_right_bytes.iter().map(|&x| x == 1).collect();
|
||||
Ok(
|
||||
MerkleProof {
|
||||
root,
|
||||
node,
|
||||
peaks,
|
||||
path,
|
||||
left_right,
|
||||
}
|
||||
)
|
||||
Ok(MerkleProof {
|
||||
root,
|
||||
node,
|
||||
peaks,
|
||||
path,
|
||||
left_right,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,7 +223,8 @@ impl MerkleProof {
|
|||
let sibling = path.remove(0);
|
||||
let mut left_right = self.left_right.clone();
|
||||
|
||||
// hash our node and sibling together (noting left/right position of the sibling)
|
||||
// hash our node and sibling together (noting left/right position of the
|
||||
// sibling)
|
||||
let parent = if left_right.remove(0) {
|
||||
self.node.hash_with(sibling)
|
||||
} else {
|
||||
|
@ -242,7 +243,6 @@ impl MerkleProof {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/// Prunable Merkle Mountain Range implementation. All positions within the tree
|
||||
/// start at 1 as they're postorder tree traversal positions rather than array
|
||||
/// indices.
|
||||
|
@ -290,7 +290,8 @@ where
|
|||
/// tree and "bags" them to get a single peak.
|
||||
pub fn root(&self) -> Hash {
|
||||
let peaks_pos = peaks(self.last_pos);
|
||||
let peaks: Vec<Option<(Hash, Option<T>)>> = peaks_pos.into_iter()
|
||||
let peaks: Vec<Option<(Hash, Option<T>)>> = peaks_pos
|
||||
.into_iter()
|
||||
.map(|pi| self.backend.get(pi, false))
|
||||
.collect();
|
||||
|
||||
|
@ -307,7 +308,10 @@ where
|
|||
|
||||
/// Build a Merkle proof for the element at the given position in the MMR
|
||||
pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> {
|
||||
debug!(LOGGER, "merkle_proof (via rewind) - {}, last_pos {}", pos, self.last_pos);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"merkle_proof (via rewind) - {}, last_pos {}", pos, self.last_pos
|
||||
);
|
||||
|
||||
if !is_leaf(pos) {
|
||||
return Err(format!("not a leaf at pos {}", pos));
|
||||
|
@ -320,10 +324,7 @@ where
|
|||
.0;
|
||||
|
||||
let family_branch = family_branch(pos, self.last_pos);
|
||||
let left_right = family_branch
|
||||
.iter()
|
||||
.map(|x| x.2)
|
||||
.collect::<Vec<_>>();
|
||||
let left_right = family_branch.iter().map(|x| x.2).collect::<Vec<_>>();
|
||||
|
||||
let path = family_branch
|
||||
.iter()
|
||||
|
@ -370,9 +371,9 @@ where
|
|||
// creation of another parent.
|
||||
while bintree_postorder_height(pos + 1) > height {
|
||||
let left_sibling = bintree_jump_left_sibling(pos);
|
||||
let left_elem = self.backend.get(left_sibling, false).expect(
|
||||
"missing left sibling in tree, should not have been pruned",
|
||||
);
|
||||
let left_elem = self.backend
|
||||
.get(left_sibling, false)
|
||||
.expect("missing left sibling in tree, should not have been pruned");
|
||||
current_hash = left_elem.0 + current_hash;
|
||||
|
||||
to_append.push((current_hash.clone(), None));
|
||||
|
@ -498,16 +499,18 @@ where
|
|||
if bintree_postorder_height(n) > 0 {
|
||||
if let Some(hs) = self.get(n, false) {
|
||||
// take the left and right children, if they exist
|
||||
let left_pos = bintree_move_down_left(n)
|
||||
.ok_or(format!("left_pos not found"))?;
|
||||
let left_pos = bintree_move_down_left(n).ok_or(format!("left_pos not found"))?;
|
||||
let right_pos = bintree_jump_right_sibling(left_pos);
|
||||
|
||||
if let Some(left_child_hs) = self.get(left_pos, false) {
|
||||
if let Some(right_child_hs) = self.get(right_pos, false) {
|
||||
// add hashes and compare
|
||||
if left_child_hs.0+right_child_hs.0 != hs.0 {
|
||||
return Err(format!("Invalid MMR, hash of parent at {} does \
|
||||
not match children.", n));
|
||||
if left_child_hs.0 + right_child_hs.0 != hs.0 {
|
||||
return Err(format!(
|
||||
"Invalid MMR, hash of parent at {} does \
|
||||
not match children.",
|
||||
n
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -575,7 +578,9 @@ pub struct PruneList {
|
|||
impl PruneList {
|
||||
/// Instantiate a new empty prune list
|
||||
pub fn new() -> PruneList {
|
||||
PruneList { pruned_nodes: vec![] }
|
||||
PruneList {
|
||||
pruned_nodes: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes by how many positions a node at pos should be shifted given the
|
||||
|
@ -602,7 +607,6 @@ impl PruneList {
|
|||
/// given leaf. Helpful if, for instance, data for each leaf is being stored
|
||||
/// separately in a continuous flat-file
|
||||
pub fn get_leaf_shift(&self, pos: u64) -> Option<u64> {
|
||||
|
||||
// get the position where the node at pos would fit in the pruned list, if
|
||||
// it's already pruned, nothing to skip
|
||||
match self.pruned_pos(pos) {
|
||||
|
@ -716,12 +720,13 @@ pub fn peaks(num: u64) -> Vec<u64> {
|
|||
/// The number of leaves nodes in a MMR of the provided size. Uses peaks to
|
||||
/// get the positions of all full binary trees and uses the height of these
|
||||
pub fn n_leaves(mut sz: u64) -> u64 {
|
||||
while bintree_postorder_height(sz+1) > 0 {
|
||||
while bintree_postorder_height(sz + 1) > 0 {
|
||||
sz += 1;
|
||||
}
|
||||
peaks(sz).iter().map(|n| {
|
||||
(1 << bintree_postorder_height(*n)) as u64
|
||||
}).sum()
|
||||
peaks(sz)
|
||||
.iter()
|
||||
.map(|n| (1 << bintree_postorder_height(*n)) as u64)
|
||||
.sum()
|
||||
}
|
||||
|
||||
/// The height of a node in a full binary tree from its postorder traversal
|
||||
|
@ -909,23 +914,26 @@ fn most_significant_pos(num: u64) -> u64 {
|
|||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use ser::{Writeable, Readable, Error};
|
||||
use core::{Writer, Reader};
|
||||
use core::hash::{Hash};
|
||||
use ser::{Error, Readable, Writeable};
|
||||
use core::{Reader, Writer};
|
||||
use core::hash::Hash;
|
||||
|
||||
/// Simple MMR backend implementation based on a Vector. Pruning does not
|
||||
/// compact the Vec itself.
|
||||
#[derive(Clone)]
|
||||
pub struct VecBackend<T>
|
||||
where T:PMMRable {
|
||||
where
|
||||
T: PMMRable,
|
||||
{
|
||||
/// Backend elements
|
||||
pub elems: Vec<Option<(Hash, Option<T>)>>,
|
||||
/// Positions of removed elements
|
||||
pub remove_list: Vec<u64>,
|
||||
}
|
||||
|
||||
impl <T> Backend <T> for VecBackend<T>
|
||||
where T: PMMRable
|
||||
impl<T> Backend<T> for VecBackend<T>
|
||||
where
|
||||
T: PMMRable,
|
||||
{
|
||||
fn append(&mut self, _position: u64, data: Vec<(Hash, Option<T>)>) -> Result<(), String> {
|
||||
self.elems.append(&mut map_vec!(data, |d| Some(d.clone())));
|
||||
|
@ -965,8 +973,9 @@ mod test {
|
|||
}
|
||||
}
|
||||
|
||||
impl <T> VecBackend <T>
|
||||
where T:PMMRable
|
||||
impl<T> VecBackend<T>
|
||||
where
|
||||
T: PMMRable,
|
||||
{
|
||||
/// Instantiates a new VecBackend<T>
|
||||
pub fn new() -> VecBackend<T> {
|
||||
|
@ -990,14 +999,13 @@ mod test {
|
|||
}
|
||||
|
||||
#[test]
|
||||
fn test_leaf_index(){
|
||||
assert_eq!(n_leaves(1),1);
|
||||
assert_eq!(n_leaves(2),2);
|
||||
assert_eq!(n_leaves(4),3);
|
||||
assert_eq!(n_leaves(5),4);
|
||||
assert_eq!(n_leaves(8),5);
|
||||
assert_eq!(n_leaves(9),6);
|
||||
|
||||
fn test_leaf_index() {
|
||||
assert_eq!(n_leaves(1), 1);
|
||||
assert_eq!(n_leaves(2), 2);
|
||||
assert_eq!(n_leaves(4), 3);
|
||||
assert_eq!(n_leaves(5), 4);
|
||||
assert_eq!(n_leaves(8), 5);
|
||||
assert_eq!(n_leaves(9), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1044,7 +1052,8 @@ mod test {
|
|||
#[test]
|
||||
fn various_n_leaves() {
|
||||
assert_eq!(n_leaves(1), 1);
|
||||
// 2 is not a valid size for a tree, but n_leaves rounds up to next valid tree size
|
||||
// 2 is not a valid size for a tree, but n_leaves rounds up to next valid tree
|
||||
// size
|
||||
assert_eq!(n_leaves(2), 2);
|
||||
assert_eq!(n_leaves(3), 2);
|
||||
assert_eq!(n_leaves(7), 4);
|
||||
|
@ -1076,7 +1085,8 @@ mod test {
|
|||
// leaf node in a larger tree of 7 nodes (height 2)
|
||||
assert_eq!(family_branch(1, 7), [(3, 2, true), (7, 6, true)]);
|
||||
|
||||
// note these only go as far up as the local peak, not necessarily the single root
|
||||
// note these only go as far up as the local peak, not necessarily the single
|
||||
// root
|
||||
assert_eq!(family_branch(1, 4), [(3, 2, true)]);
|
||||
// pos 4 in a tree of size 4 is a local peak
|
||||
assert_eq!(family_branch(4, 4), []);
|
||||
|
@ -1089,9 +1099,10 @@ mod test {
|
|||
|
||||
// ok now for a more realistic one, a tree with over a million nodes in it
|
||||
// find the "family path" back up the tree from a leaf node at 0
|
||||
// Note: the first two entries in the branch are consistent with a small 7 node tree
|
||||
// Note: each sibling is on the left branch, this is an example of the largest possible
|
||||
// list of peaks before we start combining them into larger peaks.
|
||||
// Note: the first two entries in the branch are consistent with a small 7 node
|
||||
// tree Note: each sibling is on the left branch, this is an example of the
|
||||
// largest possible list of peaks before we start combining them into larger
|
||||
// peaks.
|
||||
assert_eq!(
|
||||
family_branch(1, 1_049_000),
|
||||
[
|
||||
|
@ -1139,34 +1150,19 @@ mod test {
|
|||
assert_eq!(peaks(42), [31, 38, 41, 42]);
|
||||
|
||||
// large realistic example with almost 1.5 million nodes
|
||||
// note the distance between peaks decreases toward the right (trees get smaller)
|
||||
// note the distance between peaks decreases toward the right (trees get
|
||||
// smaller)
|
||||
assert_eq!(
|
||||
peaks(1048555),
|
||||
[
|
||||
524287,
|
||||
786430,
|
||||
917501,
|
||||
983036,
|
||||
1015803,
|
||||
1032186,
|
||||
1040377,
|
||||
1044472,
|
||||
1046519,
|
||||
1047542,
|
||||
1048053,
|
||||
1048308,
|
||||
1048435,
|
||||
1048498,
|
||||
1048529,
|
||||
1048544,
|
||||
1048551,
|
||||
1048554,
|
||||
524287, 786430, 917501, 983036, 1015803, 1032186, 1040377, 1044472, 1046519,
|
||||
1047542, 1048053, 1048308, 1048435, 1048498, 1048529, 1048544, 1048551, 1048554,
|
||||
1048555,
|
||||
],
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
||||
struct TestElem([u32; 4]);
|
||||
|
||||
impl PMMRable for TestElem {
|
||||
|
@ -1186,14 +1182,12 @@ mod test {
|
|||
|
||||
impl Readable for TestElem {
|
||||
fn read(reader: &mut Reader) -> Result<TestElem, Error> {
|
||||
Ok(TestElem (
|
||||
[
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
]
|
||||
))
|
||||
Ok(TestElem([
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
]))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1237,7 +1231,10 @@ mod test {
|
|||
assert!(proof2.verify());
|
||||
|
||||
// check that we cannot generate a merkle proof for pos 3 (not a leaf node)
|
||||
assert_eq!(pmmr.merkle_proof(3).err(), Some(format!("not a leaf at pos 3")));
|
||||
assert_eq!(
|
||||
pmmr.merkle_proof(3).err(),
|
||||
Some(format!("not a leaf at pos 3"))
|
||||
);
|
||||
|
||||
let proof4 = pmmr.merkle_proof(4).unwrap();
|
||||
assert_eq!(proof4.peaks.len(), 2);
|
||||
|
@ -1309,10 +1306,7 @@ mod test {
|
|||
// one element
|
||||
pmmr.push(elems[0]).unwrap();
|
||||
let node_hash = elems[0].hash();
|
||||
assert_eq!(
|
||||
pmmr.root(),
|
||||
node_hash,
|
||||
);
|
||||
assert_eq!(pmmr.root(), node_hash,);
|
||||
assert_eq!(pmmr.unpruned_size(), 1);
|
||||
pmmr.dump(false);
|
||||
|
||||
|
@ -1347,8 +1341,7 @@ mod test {
|
|||
|
||||
// six elements
|
||||
pmmr.push(elems[5]).unwrap();
|
||||
let sum6 = sum4 +
|
||||
(elems[4].hash() + elems[5].hash());
|
||||
let sum6 = sum4 + (elems[4].hash() + elems[5].hash());
|
||||
assert_eq!(pmmr.root(), sum6.clone());
|
||||
assert_eq!(pmmr.unpruned_size(), 10);
|
||||
|
||||
|
@ -1360,9 +1353,8 @@ mod test {
|
|||
|
||||
// eight elements
|
||||
pmmr.push(elems[7]).unwrap();
|
||||
let sum8 = sum4 +
|
||||
((elems[4].hash() + elems[5].hash()) +
|
||||
(elems[6].hash() + elems[7].hash()));
|
||||
let sum8 =
|
||||
sum4 + ((elems[4].hash() + elems[5].hash()) + (elems[6].hash() + elems[7].hash()));
|
||||
assert_eq!(pmmr.root(), sum8);
|
||||
assert_eq!(pmmr.unpruned_size(), 15);
|
||||
|
||||
|
@ -1411,9 +1403,7 @@ mod test {
|
|||
pmmr.push(elems[3]).unwrap();
|
||||
|
||||
let res = pmmr.get_last_n_insertions(19);
|
||||
assert!(
|
||||
res.len() == 4
|
||||
);
|
||||
assert!(res.len() == 4);
|
||||
|
||||
pmmr.push(elems[5]).unwrap();
|
||||
pmmr.push(elems[6]).unwrap();
|
||||
|
@ -1421,9 +1411,7 @@ mod test {
|
|||
pmmr.push(elems[8]).unwrap();
|
||||
|
||||
let res = pmmr.get_last_n_insertions(7);
|
||||
assert!(
|
||||
res.len() == 7
|
||||
);
|
||||
assert!(res.len() == 7);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1455,7 +1443,7 @@ mod test {
|
|||
|
||||
// pruning a leaf with no parent should do nothing
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(16, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
|
@ -1463,14 +1451,14 @@ mod test {
|
|||
|
||||
// pruning leaves with no shared parent just removes 1 element
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(2, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
assert_eq!(ba.used_size(), 15);
|
||||
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(4, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
|
@ -1478,7 +1466,7 @@ mod test {
|
|||
|
||||
// pruning a non-leaf node has no effect
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(3, 0).unwrap_err();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
|
@ -1486,7 +1474,7 @@ mod test {
|
|||
|
||||
// pruning sibling removes subtree
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(5, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
|
@ -1494,7 +1482,7 @@ mod test {
|
|||
|
||||
// pruning all leaves under level >1 removes all subtree
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
pmmr.prune(1, 0).unwrap();
|
||||
assert_eq!(orig_root, pmmr.root());
|
||||
}
|
||||
|
@ -1502,7 +1490,7 @@ mod test {
|
|||
|
||||
// pruning everything should only leave us the peaks
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut ba, sz);
|
||||
for n in 1..16 {
|
||||
let _ = pmmr.prune(n, 0);
|
||||
}
|
||||
|
@ -1550,7 +1538,6 @@ mod test {
|
|||
assert_eq!(pl.get_shift(17), Some(11));
|
||||
}
|
||||
|
||||
|
||||
#[test]
|
||||
fn n_size_check() {
|
||||
assert_eq!(n_leaves(1), 1);
|
||||
|
|
|
@ -29,7 +29,6 @@ use core::hash::Hash;
|
|||
use ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use core::global;
|
||||
|
||||
|
||||
/// The difficulty is defined as the maximum target divided by the block hash.
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
|
||||
pub struct Difficulty {
|
||||
|
@ -63,7 +62,9 @@ impl Difficulty {
|
|||
let mut in_vec = h.to_vec();
|
||||
in_vec.truncate(8);
|
||||
let num = BigEndian::read_u64(&in_vec);
|
||||
Difficulty { num: max_target / num }
|
||||
Difficulty {
|
||||
num: max_target / num,
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts the difficulty into a u64
|
||||
|
@ -81,28 +82,36 @@ impl fmt::Display for Difficulty {
|
|||
impl Add<Difficulty> for Difficulty {
|
||||
type Output = Difficulty;
|
||||
fn add(self, other: Difficulty) -> Difficulty {
|
||||
Difficulty { num: self.num + other.num }
|
||||
Difficulty {
|
||||
num: self.num + other.num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sub<Difficulty> for Difficulty {
|
||||
type Output = Difficulty;
|
||||
fn sub(self, other: Difficulty) -> Difficulty {
|
||||
Difficulty { num: self.num - other.num }
|
||||
Difficulty {
|
||||
num: self.num - other.num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Mul<Difficulty> for Difficulty {
|
||||
type Output = Difficulty;
|
||||
fn mul(self, other: Difficulty) -> Difficulty {
|
||||
Difficulty { num: self.num * other.num }
|
||||
Difficulty {
|
||||
num: self.num * other.num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<Difficulty> for Difficulty {
|
||||
type Output = Difficulty;
|
||||
fn div(self, other: Difficulty) -> Difficulty {
|
||||
Difficulty { num: self.num / other.num }
|
||||
Difficulty {
|
||||
num: self.num / other.num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,7 +166,9 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
|
|||
&"a value number",
|
||||
));
|
||||
};
|
||||
Ok(Difficulty { num: num_in.unwrap() })
|
||||
Ok(Difficulty {
|
||||
num: num_in.unwrap(),
|
||||
})
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
|
||||
|
|
|
@ -15,9 +15,9 @@
|
|||
//! Transactions
|
||||
use blake2::blake2b::blake2b;
|
||||
use util::secp::{self, Message, Signature};
|
||||
use util::{static_secp_instance, kernel_sig_msg};
|
||||
use util::secp::pedersen::{Commitment, RangeProof, ProofMessage};
|
||||
use std::cmp::{min, max};
|
||||
use util::{kernel_sig_msg, static_secp_instance};
|
||||
use util::secp::pedersen::{Commitment, ProofMessage, RangeProof};
|
||||
use std::cmp::{max, min};
|
||||
use std::cmp::Ordering;
|
||||
use std::{error, fmt};
|
||||
|
||||
|
@ -29,8 +29,9 @@ use core::BlockHeader;
|
|||
use core::hash::{Hash, Hashed, ZERO_HASH};
|
||||
use core::pmmr::MerkleProof;
|
||||
use keychain;
|
||||
use keychain::{Identifier, Keychain, BlindingFactor};
|
||||
use ser::{self, read_and_verify_sorted, PMMRable, Readable, Reader, Writeable, WriteableSorted, Writer, ser_vec};
|
||||
use keychain::{BlindingFactor, Identifier, Keychain};
|
||||
use ser::{self, read_and_verify_sorted, ser_vec, PMMRable, Readable, Reader, Writeable,
|
||||
WriteableSorted, Writer};
|
||||
use std::io::Cursor;
|
||||
use util;
|
||||
use util::LOGGER;
|
||||
|
@ -38,7 +39,8 @@ use util::LOGGER;
|
|||
/// The size of the blake2 hash of a switch commitment (256 bits)
|
||||
pub const SWITCH_COMMIT_HASH_SIZE: usize = 32;
|
||||
|
||||
/// The size of the secret key used in to generate blake2 switch commitment hash (256 bits)
|
||||
/// The size of the secret key used in to generate blake2 switch commitment
|
||||
/// hash (256 bits)
|
||||
pub const SWITCH_COMMIT_KEY_SIZE: usize = 32;
|
||||
|
||||
bitflags! {
|
||||
|
@ -81,7 +83,8 @@ pub enum Error {
|
|||
OddFee,
|
||||
/// Kernel fee can't be odd, due to half fee burning
|
||||
OddKernelFee,
|
||||
/// Underlying Secp256k1 error (signature validation or invalid public key typically)
|
||||
/// Underlying Secp256k1 error (signature validation or invalid public key
|
||||
/// typically)
|
||||
Secp(secp::Error),
|
||||
/// Underlying keychain related error
|
||||
Keychain(keychain::Error),
|
||||
|
@ -100,7 +103,8 @@ pub enum Error {
|
|||
RangeProof,
|
||||
/// Error originating from an invalid Merkle proof
|
||||
MerkleProof,
|
||||
/// Error originating from an input attempting to spend an immature coinbase output
|
||||
/// Error originating from an input attempting to spend an immature
|
||||
/// coinbase output
|
||||
ImmatureCoinbase,
|
||||
}
|
||||
|
||||
|
@ -138,7 +142,6 @@ impl From<keychain::Error> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/// A proof that a transaction sums to zero. Includes both the transaction's
|
||||
/// Pedersen commitment and the signature, that guarantees that the commitments
|
||||
/// amount to zero.
|
||||
|
@ -164,7 +167,8 @@ pub struct TxKernel {
|
|||
|
||||
hashable_ord!(TxKernel);
|
||||
|
||||
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash implementations?
|
||||
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash
|
||||
/// implementations?
|
||||
impl ::std::hash::Hash for TxKernel {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
|
@ -189,9 +193,8 @@ impl Writeable for TxKernel {
|
|||
|
||||
impl Readable for TxKernel {
|
||||
fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> {
|
||||
let features = KernelFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
let features =
|
||||
KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||
Ok(TxKernel {
|
||||
features: features,
|
||||
fee: reader.read_u64()?,
|
||||
|
@ -246,8 +249,7 @@ impl TxKernel {
|
|||
impl PMMRable for TxKernel {
|
||||
fn len() -> usize {
|
||||
17 + // features plus fee and lock_height
|
||||
secp::constants::PEDERSEN_COMMITMENT_SIZE +
|
||||
secp::constants::AGG_SIGNATURE_SIZE
|
||||
secp::constants::PEDERSEN_COMMITMENT_SIZE + secp::constants::AGG_SIGNATURE_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -277,7 +279,8 @@ impl Writeable for Transaction {
|
|||
[write_u64, self.kernels.len() as u64]
|
||||
);
|
||||
|
||||
// Consensus rule that everything is sorted in lexicographical order on the wire.
|
||||
// Consensus rule that everything is sorted in lexicographical order on the
|
||||
// wire.
|
||||
let mut inputs = self.inputs.clone();
|
||||
let mut outputs = self.outputs.clone();
|
||||
let mut kernels = self.kernels.clone();
|
||||
|
@ -344,11 +347,7 @@ impl Transaction {
|
|||
|
||||
/// Creates a new transaction initialized with
|
||||
/// the provided inputs, outputs, kernels
|
||||
pub fn new(
|
||||
inputs: Vec<Input>,
|
||||
outputs: Vec<Output>,
|
||||
kernels: Vec<TxKernel>,
|
||||
) -> Transaction {
|
||||
pub fn new(inputs: Vec<Input>, outputs: Vec<Output>, kernels: Vec<TxKernel>) -> Transaction {
|
||||
Transaction {
|
||||
offset: BlindingFactor::zero(),
|
||||
inputs: inputs,
|
||||
|
@ -397,7 +396,9 @@ impl Transaction {
|
|||
|
||||
/// Lock height of a transaction is the max lock height of the kernels.
|
||||
pub fn lock_height(&self) -> u64 {
|
||||
self.kernels.iter().fold(0, |acc, ref x| max(acc, x.lock_height))
|
||||
self.kernels
|
||||
.iter()
|
||||
.fold(0, |acc, ref x| max(acc, x.lock_height))
|
||||
}
|
||||
|
||||
/// To verify transaction kernels we check that -
|
||||
|
@ -419,10 +420,7 @@ impl Transaction {
|
|||
|
||||
// sum all kernels commitments
|
||||
let kernel_sum = {
|
||||
let mut kernel_commits = self.kernels
|
||||
.iter()
|
||||
.map(|x| x.excess)
|
||||
.collect::<Vec<_>>();
|
||||
let mut kernel_commits = self.kernels.iter().map(|x| x.excess).collect::<Vec<_>>();
|
||||
|
||||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
|
@ -508,7 +506,7 @@ impl Transaction {
|
|||
/// But also information required to verify coinbase maturity through
|
||||
/// the lock_height hashed in the switch_commit_hash.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Input{
|
||||
pub struct Input {
|
||||
/// The features of the output being spent.
|
||||
/// We will check maturity for coinbase output.
|
||||
pub features: OutputFeatures,
|
||||
|
@ -524,7 +522,8 @@ pub struct Input{
|
|||
|
||||
hashable_ord!(Input);
|
||||
|
||||
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash implementations?
|
||||
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash
|
||||
/// implementations?
|
||||
impl ::std::hash::Hash for Input {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
|
@ -558,28 +557,17 @@ impl Writeable for Input {
|
|||
/// an Input from a binary stream.
|
||||
impl Readable for Input {
|
||||
fn read(reader: &mut Reader) -> Result<Input, ser::Error> {
|
||||
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
let features =
|
||||
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||
|
||||
let commit = Commitment::read(reader)?;
|
||||
|
||||
if features.contains(OutputFeatures::COINBASE_OUTPUT) {
|
||||
let block_hash = Some(Hash::read(reader)?);
|
||||
let merkle_proof = Some(MerkleProof::read(reader)?);
|
||||
Ok(Input::new(
|
||||
features,
|
||||
commit,
|
||||
block_hash,
|
||||
merkle_proof,
|
||||
))
|
||||
Ok(Input::new(features, commit, block_hash, merkle_proof))
|
||||
} else {
|
||||
Ok(Input::new(
|
||||
features,
|
||||
commit,
|
||||
None,
|
||||
None,
|
||||
))
|
||||
Ok(Input::new(features, commit, None, None))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -589,7 +577,8 @@ impl Readable for Input {
|
|||
/// Input must also provide the original output features and the hash of the block
|
||||
/// the output originated from.
|
||||
impl Input {
|
||||
/// Build a new input from the data required to identify and verify an output being spent.
|
||||
/// Build a new input from the data required to identify and verify an
|
||||
/// output being spent.
|
||||
pub fn new(
|
||||
features: OutputFeatures,
|
||||
commit: Commitment,
|
||||
|
@ -701,7 +690,7 @@ bitflags! {
|
|||
|
||||
/// Definition of the switch commitment hash
|
||||
#[derive(Debug, Copy, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct SwitchCommitHashKey ([u8; SWITCH_COMMIT_KEY_SIZE]);
|
||||
pub struct SwitchCommitHashKey([u8; SWITCH_COMMIT_KEY_SIZE]);
|
||||
|
||||
impl SwitchCommitHashKey {
|
||||
/// We use a zero value key for regular transactions.
|
||||
|
@ -712,14 +701,18 @@ impl SwitchCommitHashKey {
|
|||
/// Generate a switch commit hash key from the provided keychain and key id.
|
||||
pub fn from_keychain(keychain: &Keychain, key_id: &Identifier) -> SwitchCommitHashKey {
|
||||
SwitchCommitHashKey(
|
||||
keychain.switch_commit_hash_key(key_id)
|
||||
.expect("failed to derive switch commit hash key")
|
||||
keychain
|
||||
.switch_commit_hash_key(key_id)
|
||||
.expect("failed to derive switch commit hash key"),
|
||||
)
|
||||
}
|
||||
|
||||
/// Reconstructs a switch commit hash key from a byte slice.
|
||||
pub fn from_bytes(bytes: &[u8]) -> SwitchCommitHashKey {
|
||||
assert!(bytes.len() == 32, "switch_commit_hash_key requires 32 bytes");
|
||||
assert!(
|
||||
bytes.len() == 32,
|
||||
"switch_commit_hash_key requires 32 bytes"
|
||||
);
|
||||
|
||||
let mut key = [0; SWITCH_COMMIT_KEY_SIZE];
|
||||
for i in 0..min(SWITCH_COMMIT_KEY_SIZE, bytes.len()) {
|
||||
|
@ -833,7 +826,8 @@ pub struct Output {
|
|||
|
||||
hashable_ord!(Output);
|
||||
|
||||
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash implementations?
|
||||
/// TODO - no clean way to bridge core::hash::Hash and std::hash::Hash
|
||||
/// implementations?
|
||||
impl ::std::hash::Hash for Output {
|
||||
fn hash<H: ::std::hash::Hasher>(&self, state: &mut H) {
|
||||
let mut vec = Vec::new();
|
||||
|
@ -866,9 +860,8 @@ impl Writeable for Output {
|
|||
/// an Output from a binary stream.
|
||||
impl Readable for Output {
|
||||
fn read(reader: &mut Reader) -> Result<Output, ser::Error> {
|
||||
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
let features =
|
||||
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||
|
||||
Ok(Output {
|
||||
features: features,
|
||||
|
@ -899,7 +892,12 @@ impl Output {
|
|||
pub fn verify_proof(&self) -> Result<(), secp::Error> {
|
||||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
match Keychain::verify_range_proof(&secp, self.commit, self.proof, Some(self.switch_commit_hash.as_ref().to_vec())){
|
||||
match Keychain::verify_range_proof(
|
||||
&secp,
|
||||
self.commit,
|
||||
self.proof,
|
||||
Some(self.switch_commit_hash.as_ref().to_vec()),
|
||||
) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
|
@ -908,10 +906,16 @@ impl Output {
|
|||
/// Given the original blinding factor we can recover the
|
||||
/// value from the range proof and the commitment
|
||||
pub fn recover_value(&self, keychain: &Keychain, key_id: &Identifier) -> Option<u64> {
|
||||
match keychain.rewind_range_proof(key_id, self.commit, Some(self.switch_commit_hash.as_ref().to_vec()), self.proof) {
|
||||
match keychain.rewind_range_proof(
|
||||
key_id,
|
||||
self.commit,
|
||||
Some(self.switch_commit_hash.as_ref().to_vec()),
|
||||
self.proof,
|
||||
) {
|
||||
Ok(proof_info) => {
|
||||
if proof_info.success {
|
||||
let elements = ProofMessageElements::from_proof_message(proof_info.message).unwrap();
|
||||
let elements =
|
||||
ProofMessageElements::from_proof_message(proof_info.message).unwrap();
|
||||
Some(elements.value)
|
||||
} else {
|
||||
None
|
||||
|
@ -920,7 +924,6 @@ impl Output {
|
|||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// An output_identifier can be build from either an input _or_ an output and
|
||||
|
@ -980,9 +983,8 @@ impl Writeable for OutputIdentifier {
|
|||
|
||||
impl Readable for OutputIdentifier {
|
||||
fn read(reader: &mut Reader) -> Result<OutputIdentifier, ser::Error> {
|
||||
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
let features =
|
||||
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||
Ok(OutputIdentifier {
|
||||
commit: Commitment::read(reader)?,
|
||||
features: features,
|
||||
|
@ -1015,7 +1017,7 @@ impl OutputStoreable {
|
|||
|
||||
/// Return a regular output
|
||||
pub fn to_output(self, rproof: RangeProof) -> Output {
|
||||
Output{
|
||||
Output {
|
||||
features: self.features,
|
||||
commit: self.commit,
|
||||
switch_commit_hash: self.switch_commit_hash,
|
||||
|
@ -1043,9 +1045,8 @@ impl Writeable for OutputStoreable {
|
|||
|
||||
impl Readable for OutputStoreable {
|
||||
fn read(reader: &mut Reader) -> Result<OutputStoreable, ser::Error> {
|
||||
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
let features =
|
||||
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||
Ok(OutputStoreable {
|
||||
commit: Commitment::read(reader)?,
|
||||
switch_commit_hash: SwitchCommitHash::read(reader)?,
|
||||
|
@ -1083,13 +1084,14 @@ impl Readable for ProofMessageElements {
|
|||
|
||||
impl ProofMessageElements {
|
||||
/// Serialise and return a ProofMessage
|
||||
pub fn to_proof_message(&self)->ProofMessage {
|
||||
pub fn to_proof_message(&self) -> ProofMessage {
|
||||
ProofMessage::from_bytes(&ser_vec(self).unwrap())
|
||||
}
|
||||
|
||||
/// Deserialise and return the message elements
|
||||
pub fn from_proof_message(proof_message:ProofMessage)
|
||||
-> Result<ProofMessageElements, ser::Error> {
|
||||
pub fn from_proof_message(
|
||||
proof_message: ProofMessage,
|
||||
) -> Result<ProofMessageElements, ser::Error> {
|
||||
let mut c = Cursor::new(proof_message.as_bytes());
|
||||
ser::deserialize::<ProofMessageElements>(&mut c)
|
||||
}
|
||||
|
@ -1109,7 +1111,7 @@ mod test {
|
|||
let commit = keychain.commit(5, &key_id).unwrap();
|
||||
|
||||
// just some bytes for testing ser/deser
|
||||
let sig = secp::Signature::from_raw_data(&[0;64]).unwrap();
|
||||
let sig = secp::Signature::from_raw_data(&[0; 64]).unwrap();
|
||||
|
||||
let kernel = TxKernel {
|
||||
features: KernelFeatures::DEFAULT_KERNEL,
|
||||
|
@ -1153,13 +1155,18 @@ mod test {
|
|||
let key_id = keychain.derive_key_id(1).unwrap();
|
||||
let commit = keychain.commit(5, &key_id).unwrap();
|
||||
let switch_commit = keychain.switch_commit(&key_id).unwrap();
|
||||
let switch_commit_hash = SwitchCommitHash::from_switch_commit(
|
||||
switch_commit,
|
||||
&keychain,
|
||||
&key_id,
|
||||
);
|
||||
let switch_commit_hash =
|
||||
SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id);
|
||||
let msg = secp::pedersen::ProofMessage::empty();
|
||||
let proof = keychain.range_proof(5, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg).unwrap();
|
||||
let proof = keychain
|
||||
.range_proof(
|
||||
5,
|
||||
&key_id,
|
||||
commit,
|
||||
Some(switch_commit_hash.as_ref().to_vec()),
|
||||
msg,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let out = Output {
|
||||
features: OutputFeatures::DEFAULT_OUTPUT,
|
||||
|
@ -1185,16 +1192,19 @@ mod test {
|
|||
|
||||
let commit = keychain.commit(value, &key_id).unwrap();
|
||||
let switch_commit = keychain.switch_commit(&key_id).unwrap();
|
||||
let switch_commit_hash = SwitchCommitHash::from_switch_commit(
|
||||
switch_commit,
|
||||
&keychain,
|
||||
&key_id,
|
||||
);
|
||||
let msg = (ProofMessageElements {
|
||||
value: value,
|
||||
}).to_proof_message();
|
||||
let switch_commit_hash =
|
||||
SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id);
|
||||
let msg = (ProofMessageElements { value: value }).to_proof_message();
|
||||
|
||||
let proof = keychain.range_proof(value, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg).unwrap();
|
||||
let proof = keychain
|
||||
.range_proof(
|
||||
value,
|
||||
&key_id,
|
||||
commit,
|
||||
Some(switch_commit_hash.as_ref().to_vec()),
|
||||
msg,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
let output = Output {
|
||||
features: OutputFeatures::DEFAULT_OUTPUT,
|
||||
|
@ -1212,7 +1222,8 @@ mod test {
|
|||
return;
|
||||
}
|
||||
|
||||
// Bulletproofs message unwind will just be gibberish given the wrong blinding factor
|
||||
// Bulletproofs message unwind will just be gibberish given the wrong blinding
|
||||
// factor
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
@ -1257,8 +1268,8 @@ mod test {
|
|||
let short_id = input.short_id(&block_hash, nonce);
|
||||
assert_eq!(short_id, ShortId::from_hex("28fea5a693af").unwrap());
|
||||
|
||||
// now generate the short_id for a *very* similar output (single feature flag different)
|
||||
// and check it generates a different short_id
|
||||
// now generate the short_id for a *very* similar output (single feature flag
|
||||
// different) and check it generates a different short_id
|
||||
let input = Input {
|
||||
features: OutputFeatures::COINBASE_OUTPUT,
|
||||
commit: commit,
|
||||
|
|
|
@ -59,13 +59,13 @@ pub fn genesis_testnet1() -> core::Block {
|
|||
..time::empty_tm()
|
||||
},
|
||||
nonce: 28205,
|
||||
pow: core::Proof::new(vec![0x21e, 0x7a2, 0xeae, 0x144e, 0x1b1c, 0x1fbd,
|
||||
0x203a, 0x214b, 0x293b, 0x2b74, 0x2bfa, 0x2c26,
|
||||
0x32bb, 0x346a, 0x34c7, 0x37c5, 0x4164, 0x42cc,
|
||||
0x4cc3, 0x55af, 0x5a70, 0x5b14, 0x5e1c, 0x5f76,
|
||||
0x6061, 0x60f9, 0x61d7, 0x6318, 0x63a1, 0x63fb,
|
||||
0x649b, 0x64e5, 0x65a1, 0x6b69, 0x70f8, 0x71c7,
|
||||
0x71cd, 0x7492, 0x7b11, 0x7db8, 0x7f29, 0x7ff8]),
|
||||
pow: core::Proof::new(vec![
|
||||
0x21e, 0x7a2, 0xeae, 0x144e, 0x1b1c, 0x1fbd, 0x203a, 0x214b, 0x293b, 0x2b74,
|
||||
0x2bfa, 0x2c26, 0x32bb, 0x346a, 0x34c7, 0x37c5, 0x4164, 0x42cc, 0x4cc3, 0x55af,
|
||||
0x5a70, 0x5b14, 0x5e1c, 0x5f76, 0x6061, 0x60f9, 0x61d7, 0x6318, 0x63a1, 0x63fb,
|
||||
0x649b, 0x64e5, 0x65a1, 0x6b69, 0x70f8, 0x71c7, 0x71cd, 0x7492, 0x7b11, 0x7db8,
|
||||
0x7f29, 0x7ff8,
|
||||
]),
|
||||
..Default::default()
|
||||
},
|
||||
inputs: vec![],
|
||||
|
@ -93,13 +93,14 @@ pub fn genesis_testnet2() -> core::Block {
|
|||
difficulty: Difficulty::from_num(global::initial_block_difficulty()),
|
||||
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
|
||||
nonce: 70081,
|
||||
pow: core::Proof::new(vec![0x43ee48, 0x18d5a49, 0x2b76803, 0x3181a29, 0x39d6a8a, 0x39ef8d8,
|
||||
0x478a0fb, 0x69c1f9e, 0x6da4bca, 0x6f8782c, 0x9d842d7, 0xa051397,
|
||||
0xb56934c, 0xbf1f2c7, 0xc992c89, 0xce53a5a, 0xfa87225, 0x1070f99e,
|
||||
0x107b39af, 0x1160a11b, 0x11b379a8, 0x12420e02, 0x12991602, 0x12cc4a71,
|
||||
0x13d91075, 0x15c950d0, 0x1659b7be, 0x1682c2b4, 0x1796c62f, 0x191cf4c9,
|
||||
0x19d71ac0, 0x1b812e44, 0x1d150efe, 0x1d15bd77, 0x1d172841, 0x1d51e967,
|
||||
0x1ee1de39, 0x1f35c9b3, 0x1f557204, 0x1fbf884f, 0x1fcf80bf, 0x1fd59d40]),
|
||||
pow: core::Proof::new(vec![
|
||||
0x43ee48, 0x18d5a49, 0x2b76803, 0x3181a29, 0x39d6a8a, 0x39ef8d8, 0x478a0fb,
|
||||
0x69c1f9e, 0x6da4bca, 0x6f8782c, 0x9d842d7, 0xa051397, 0xb56934c, 0xbf1f2c7,
|
||||
0xc992c89, 0xce53a5a, 0xfa87225, 0x1070f99e, 0x107b39af, 0x1160a11b, 0x11b379a8,
|
||||
0x12420e02, 0x12991602, 0x12cc4a71, 0x13d91075, 0x15c950d0, 0x1659b7be, 0x1682c2b4,
|
||||
0x1796c62f, 0x191cf4c9, 0x19d71ac0, 0x1b812e44, 0x1d150efe, 0x1d15bd77, 0x1d172841,
|
||||
0x1d51e967, 0x1ee1de39, 0x1f35c9b3, 0x1f557204, 0x1fbf884f, 0x1fcf80bf, 0x1fd59d40,
|
||||
]),
|
||||
..Default::default()
|
||||
},
|
||||
inputs: vec![],
|
||||
|
|
|
@ -25,8 +25,8 @@ use std::sync::RwLock;
|
|||
use consensus::PROOFSIZE;
|
||||
use consensus::DEFAULT_SIZESHIFT;
|
||||
use consensus::COINBASE_MATURITY;
|
||||
use consensus::{MEDIAN_TIME_WINDOW, INITIAL_DIFFICULTY,
|
||||
BLOCK_TIME_SEC, DIFFICULTY_ADJUST_WINDOW, CUT_THROUGH_HORIZON};
|
||||
use consensus::{BLOCK_TIME_SEC, CUT_THROUGH_HORIZON, DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY,
|
||||
MEDIAN_TIME_WINDOW};
|
||||
use core::target::Difficulty;
|
||||
use consensus::TargetError;
|
||||
|
||||
|
@ -64,8 +64,9 @@ pub const TESTNET2_INITIAL_DIFFICULTY: u64 = 1;
|
|||
|
||||
/// The target is the 32-bytes hash block hashes must be lower than.
|
||||
pub const MAX_PROOF_TARGET: [u8; 8] = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
|
||||
|
||||
/// We want to slow this right down for user testing at cuckoo 16, so pick a smaller max
|
||||
|
||||
/// We want to slow this right down for user testing at cuckoo 16, so pick a
|
||||
/// smaller max
|
||||
pub const MAX_PROOF_TARGET_TESTING: [u8; 8] = [0x05, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
|
||||
|
||||
/// Types of chain a server can run with, dictates the genesis block and
|
||||
|
@ -78,13 +79,13 @@ pub enum ChainTypes {
|
|||
/// For User testing
|
||||
UserTesting,
|
||||
|
||||
/// First test network
|
||||
/// First test network
|
||||
Testnet1,
|
||||
|
||||
/// Second test network
|
||||
Testnet2,
|
||||
|
||||
/// Main production network
|
||||
/// Main production network
|
||||
Mainnet,
|
||||
}
|
||||
|
||||
|
@ -193,9 +194,8 @@ pub fn is_user_testing_mode() -> bool {
|
|||
/// Are we in production mode (a live public network)?
|
||||
pub fn is_production_mode() -> bool {
|
||||
let param_ref = CHAIN_TYPE.read().unwrap();
|
||||
ChainTypes::Testnet1 == *param_ref ||
|
||||
ChainTypes::Testnet2 == *param_ref ||
|
||||
ChainTypes::Mainnet == *param_ref
|
||||
ChainTypes::Testnet1 == *param_ref || ChainTypes::Testnet2 == *param_ref
|
||||
|| ChainTypes::Mainnet == *param_ref
|
||||
}
|
||||
|
||||
/// Helper function to get a nonce known to create a valid POW on
|
||||
|
@ -210,22 +210,21 @@ pub fn get_genesis_nonce() -> u64 {
|
|||
// Magic nonce for current genesis block at cuckoo16
|
||||
ChainTypes::UserTesting => 27944,
|
||||
// Magic nonce for genesis block for testnet2 (cuckoo30)
|
||||
|
||||
_ => panic!("Pre-set"),
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts an iterator of block difficulty data to more a more mangeable vector and pads
|
||||
/// Converts an iterator of block difficulty data to more a more mangeable vector and pads
|
||||
/// if needed (which will) only be needed for the first few blocks after genesis
|
||||
|
||||
pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<Result<(u64, Difficulty), TargetError>>
|
||||
where
|
||||
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>> {
|
||||
where
|
||||
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
|
||||
{
|
||||
// Convert iterator to vector, so we can append to it if necessary
|
||||
let needed_block_count = (MEDIAN_TIME_WINDOW + DIFFICULTY_ADJUST_WINDOW) as usize;
|
||||
let mut last_n: Vec<Result<(u64, Difficulty), TargetError>> = cursor.into_iter()
|
||||
.take(needed_block_count)
|
||||
.collect();
|
||||
let mut last_n: Vec<Result<(u64, Difficulty), TargetError>> =
|
||||
cursor.into_iter().take(needed_block_count).collect();
|
||||
|
||||
// Sort blocks from earliest to latest (to keep conceptually easier)
|
||||
last_n.reverse();
|
||||
|
@ -235,18 +234,19 @@ pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<Result<(u64, Difficulty),
|
|||
let block_count_difference = needed_block_count - last_n.len();
|
||||
if block_count_difference > 0 {
|
||||
// Collect any real data we have
|
||||
let mut live_intervals:Vec<(u64, Difficulty)> = last_n.iter()
|
||||
let mut live_intervals: Vec<(u64, Difficulty)> = last_n
|
||||
.iter()
|
||||
.map(|b| (b.clone().unwrap().0, b.clone().unwrap().1))
|
||||
.collect();
|
||||
for i in (1..live_intervals.len()).rev() {
|
||||
// prevents issues with very fast automated test chains
|
||||
if live_intervals[i-1].0 > live_intervals[i].0 {
|
||||
if live_intervals[i - 1].0 > live_intervals[i].0 {
|
||||
live_intervals[i].0 = 0;
|
||||
} else {
|
||||
live_intervals[i].0=live_intervals[i].0-live_intervals[i-1].0;
|
||||
live_intervals[i].0 = live_intervals[i].0 - live_intervals[i - 1].0;
|
||||
}
|
||||
}
|
||||
//
|
||||
//
|
||||
// Remove genesis "interval"
|
||||
if live_intervals.len() > 1 {
|
||||
live_intervals.remove(0);
|
||||
|
@ -266,7 +266,7 @@ pub fn difficulty_data_to_vector<T>(cursor: T) -> Vec<Result<(u64, Difficulty),
|
|||
let last_diff = &live_intervals[interval_index].1;
|
||||
last_n.insert(0, Ok((last_ts, last_diff.clone())));
|
||||
interval_index = match interval_index {
|
||||
0 => live_intervals.len()-1,
|
||||
0 => live_intervals.len() - 1,
|
||||
_ => interval_index - 1,
|
||||
};
|
||||
}
|
||||
|
|
|
@ -22,20 +22,16 @@
|
|||
use std::{cmp, error, fmt};
|
||||
use std::io::{self, Read, Write};
|
||||
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
|
||||
use keychain::{Identifier, BlindingFactor, IDENTIFIER_SIZE};
|
||||
use keychain::{BlindingFactor, Identifier, IDENTIFIER_SIZE};
|
||||
use consensus;
|
||||
use consensus::VerifySortOrder;
|
||||
use core::hash::Hashed;
|
||||
use core::transaction::{SWITCH_COMMIT_HASH_SIZE, SwitchCommitHash};
|
||||
use core::transaction::{SwitchCommitHash, SWITCH_COMMIT_HASH_SIZE};
|
||||
use util::secp::pedersen::Commitment;
|
||||
use util::secp::pedersen::RangeProof;
|
||||
use util::secp::Signature;
|
||||
use util::secp::constants::{
|
||||
MAX_PROOF_SIZE,
|
||||
PEDERSEN_COMMITMENT_SIZE,
|
||||
AGG_SIGNATURE_SIZE,
|
||||
SECRET_KEY_SIZE,
|
||||
};
|
||||
use util::secp::constants::{AGG_SIGNATURE_SIZE, MAX_PROOF_SIZE, PEDERSEN_COMMITMENT_SIZE,
|
||||
SECRET_KEY_SIZE};
|
||||
|
||||
/// Possible errors deriving from serializing or deserializing.
|
||||
#[derive(Debug)]
|
||||
|
@ -207,7 +203,8 @@ pub trait Writeable {
|
|||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Trait to allow a collection of Writeables to be written in lexicographical sort order.
|
||||
/// Trait to allow a collection of Writeables to be written in lexicographical
|
||||
/// sort order.
|
||||
pub trait WriteableSorted {
|
||||
/// Write the data but sort it first.
|
||||
fn write_sorted<W: Writer>(&mut self, writer: &mut W) -> Result<(), Error>;
|
||||
|
@ -403,7 +400,6 @@ impl Writeable for Signature {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/// Utility wrapper for an underlying byte Writer. Defines higher level methods
|
||||
/// to write numbers, byte vectors, hashes, etc.
|
||||
struct BinWriter<'a> {
|
||||
|
|
|
@ -20,9 +20,14 @@ use core::core::target::Difficulty;
|
|||
use core::global;
|
||||
use core::consensus::*;
|
||||
|
||||
// Builds an iterator for next difficulty calculation with the provided
|
||||
// constant time interval, difficulty and total length.
|
||||
fn repeat(interval: u64, diff: u64, len: u64, cur_time:Option<u64>) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
// Builds an iterator for next difficulty calculation with the provided
|
||||
// constant time interval, difficulty and total length.
|
||||
fn repeat(
|
||||
interval: u64,
|
||||
diff: u64,
|
||||
len: u64,
|
||||
cur_time: Option<u64>,
|
||||
) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
let cur_time = match cur_time {
|
||||
Some(t) => t,
|
||||
None => time::get_time().sec as u64,
|
||||
|
@ -39,25 +44,32 @@ fn repeat(interval: u64, diff: u64, len: u64, cur_time:Option<u64>) -> Vec<Resul
|
|||
|
||||
// Creates a new chain with a genesis at a simulated difficulty
|
||||
fn create_chain_sim(diff: u64) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
vec![Ok((time::get_time().sec as u64, Difficulty::from_num(diff)))]
|
||||
vec![
|
||||
Ok((time::get_time().sec as u64, Difficulty::from_num(diff))),
|
||||
]
|
||||
}
|
||||
|
||||
// Adds another 'block' to the iterator, so to speak, with difficulty calculated
|
||||
// from the difficulty adjustment at interval seconds from the previous block
|
||||
fn add_block(interval: u64, chain_sim: Vec<Result<(u64, Difficulty), TargetError>>)
|
||||
-> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
fn add_block(
|
||||
interval: u64,
|
||||
chain_sim: Vec<Result<(u64, Difficulty), TargetError>>,
|
||||
) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
let mut return_chain = chain_sim.clone();
|
||||
// get last interval
|
||||
let last_elem = chain_sim.first().as_ref().unwrap().as_ref().unwrap();
|
||||
return_chain.insert(0, Ok((last_elem.0+interval, last_elem.clone().1)));
|
||||
return_chain.insert(0, Ok((last_elem.0 + interval, last_elem.clone().1)));
|
||||
let diff = next_difficulty(return_chain.clone()).unwrap();
|
||||
return_chain[0]=Ok((last_elem.0+interval, diff));
|
||||
return_chain[0] = Ok((last_elem.0 + interval, diff));
|
||||
return_chain
|
||||
}
|
||||
|
||||
// Adds another n 'blocks' to the iterator, with difficulty calculated
|
||||
fn add_block_repeated(interval: u64, chain_sim: Vec<Result<(u64, Difficulty), TargetError>>, iterations: usize)
|
||||
-> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
fn add_block_repeated(
|
||||
interval: u64,
|
||||
chain_sim: Vec<Result<(u64, Difficulty), TargetError>>,
|
||||
iterations: usize,
|
||||
) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
let mut return_chain = chain_sim.clone();
|
||||
for _ in 0..iterations {
|
||||
return_chain = add_block(interval, return_chain.clone());
|
||||
|
@ -65,19 +77,23 @@ fn add_block_repeated(interval: u64, chain_sim: Vec<Result<(u64, Difficulty), Ta
|
|||
return_chain
|
||||
}
|
||||
|
||||
// Prints the contents of the iterator and its difficulties.. useful for tweaking
|
||||
fn print_chain_sim(chain_sim: &Vec<Result<(u64, Difficulty), TargetError>>) {
|
||||
let mut chain_sim=chain_sim.clone();
|
||||
// Prints the contents of the iterator and its difficulties.. useful for
|
||||
// tweaking
|
||||
fn print_chain_sim(chain_sim: &Vec<Result<(u64, Difficulty), TargetError>>) {
|
||||
let mut chain_sim = chain_sim.clone();
|
||||
chain_sim.reverse();
|
||||
let mut last_time=0;
|
||||
chain_sim.iter()
|
||||
.enumerate()
|
||||
.for_each(|(i, b)| {
|
||||
let block = b.as_ref().unwrap();
|
||||
println!("Height: {}, Time: {}, Interval: {}, Next network difficulty:{}",
|
||||
i, block.0, block.0-last_time, block.1);
|
||||
last_time=block.0;
|
||||
});
|
||||
let mut last_time = 0;
|
||||
chain_sim.iter().enumerate().for_each(|(i, b)| {
|
||||
let block = b.as_ref().unwrap();
|
||||
println!(
|
||||
"Height: {}, Time: {}, Interval: {}, Next network difficulty:{}",
|
||||
i,
|
||||
block.0,
|
||||
block.0 - last_time,
|
||||
block.1
|
||||
);
|
||||
last_time = block.0;
|
||||
});
|
||||
}
|
||||
|
||||
fn repeat_offs(
|
||||
|
@ -86,10 +102,13 @@ fn repeat_offs(
|
|||
diff: u64,
|
||||
len: u64,
|
||||
) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
map_vec!(repeat(interval, diff, len, Some(from)), |e| match e.clone() {
|
||||
Err(e) => Err(e),
|
||||
Ok((t, d)) => Ok((t, d)),
|
||||
})
|
||||
map_vec!(
|
||||
repeat(interval, diff, len, Some(from)),
|
||||
|e| match e.clone() {
|
||||
Err(e) => Err(e),
|
||||
Ok((t, d)) => Ok((t, d)),
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
/// Checks different next_target adjustments and difficulty boundaries
|
||||
|
@ -123,7 +142,7 @@ fn adjustment_scenarios() {
|
|||
println!("*********************************************************");
|
||||
let just_enough = (DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW) as usize;
|
||||
|
||||
// Steady difficulty for a good while, then a sudden drop
|
||||
// Steady difficulty for a good while, then a sudden drop
|
||||
let chain_sim = create_chain_sim(global::initial_block_difficulty());
|
||||
let chain_sim = add_block_repeated(10, chain_sim, just_enough as usize);
|
||||
let chain_sim = add_block_repeated(600, chain_sim, 10);
|
||||
|
@ -135,7 +154,7 @@ fn adjustment_scenarios() {
|
|||
print_chain_sim(&chain_sim);
|
||||
println!("*********************************************************");
|
||||
|
||||
// Sudden increase
|
||||
// Sudden increase
|
||||
let chain_sim = create_chain_sim(global::initial_block_difficulty());
|
||||
let chain_sim = add_block_repeated(60, chain_sim, just_enough as usize);
|
||||
let chain_sim = add_block_repeated(10, chain_sim, 10);
|
||||
|
@ -147,7 +166,7 @@ fn adjustment_scenarios() {
|
|||
print_chain_sim(&chain_sim);
|
||||
println!("*********************************************************");
|
||||
|
||||
// Oscillations
|
||||
// Oscillations
|
||||
let chain_sim = create_chain_sim(global::initial_block_difficulty());
|
||||
let chain_sim = add_block_repeated(60, chain_sim, just_enough as usize);
|
||||
let chain_sim = add_block_repeated(10, chain_sim, 10);
|
||||
|
@ -166,7 +185,7 @@ fn adjustment_scenarios() {
|
|||
#[test]
|
||||
fn next_target_adjustment() {
|
||||
global::set_mining_mode(global::ChainTypes::AutomatedTesting);
|
||||
let cur_time = time::get_time().sec as u64;
|
||||
let cur_time = time::get_time().sec as u64;
|
||||
|
||||
assert_eq!(
|
||||
next_difficulty(vec![Ok((cur_time, Difficulty::one()))]).unwrap(),
|
||||
|
@ -194,7 +213,12 @@ fn next_target_adjustment() {
|
|||
// checking averaging works
|
||||
let sec = DIFFICULTY_ADJUST_WINDOW / 2 + MEDIAN_TIME_WINDOW;
|
||||
let mut s1 = repeat(60, 500, sec, Some(cur_time));
|
||||
let mut s2 = repeat_offs(cur_time+(sec * 60) as u64, 60, 1500, DIFFICULTY_ADJUST_WINDOW / 2);
|
||||
let mut s2 = repeat_offs(
|
||||
cur_time + (sec * 60) as u64,
|
||||
60,
|
||||
1500,
|
||||
DIFFICULTY_ADJUST_WINDOW / 2,
|
||||
);
|
||||
s2.append(&mut s1);
|
||||
assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(1000));
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
use std::fs::File;
|
||||
use std::net::SocketAddr;
|
||||
use std::ops::Deref;
|
||||
use std::sync::{Arc, Weak, RwLock};
|
||||
use std::sync::{Arc, RwLock, Weak};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use rand;
|
||||
use rand::Rng;
|
||||
|
@ -88,8 +88,8 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
b.header.height,
|
||||
addr,
|
||||
);
|
||||
self.process_block(b, addr)
|
||||
}
|
||||
self.process_block(b, addr)
|
||||
}
|
||||
|
||||
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
|
||||
let bhash = cb.hash();
|
||||
|
@ -105,7 +105,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
let block = core::Block::hydrate_from(cb, vec![]);
|
||||
|
||||
// push the freshly hydrated block through the chain pipeline
|
||||
self.process_block(block, addr)
|
||||
self.process_block(block, addr)
|
||||
} else {
|
||||
// TODO - do we need to validate the header here?
|
||||
|
||||
|
@ -114,11 +114,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
tx_pool.retrieve_transactions(&cb)
|
||||
};
|
||||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"adapter: txs from tx pool - {}",
|
||||
txs.len(),
|
||||
);
|
||||
debug!(LOGGER, "adapter: txs from tx pool - {}", txs.len(),);
|
||||
|
||||
// TODO - 3 scenarios here -
|
||||
// 1) we hydrate a valid block (good to go)
|
||||
|
@ -131,7 +127,10 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
|
||||
self.process_block(block, addr)
|
||||
} else {
|
||||
debug!(LOGGER, "adapter: block invalid after hydration, requesting full block");
|
||||
debug!(
|
||||
LOGGER,
|
||||
"adapter: block invalid after hydration, requesting full block"
|
||||
);
|
||||
self.request_block(&cb.header, &addr);
|
||||
true
|
||||
}
|
||||
|
@ -142,10 +141,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
let bhash = bh.hash();
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Received block header {} at {} from {}, going to process.",
|
||||
bhash,
|
||||
bh.height,
|
||||
addr,
|
||||
"Received block header {} at {} from {}, going to process.", bhash, bh.height, addr,
|
||||
);
|
||||
|
||||
// pushing the new block header through the header chain pipeline
|
||||
|
@ -155,7 +151,10 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
if let &Err(ref e) = &res {
|
||||
debug!(LOGGER, "Block header {} refused by chain: {:?}", bhash, e);
|
||||
if e.is_bad_data() {
|
||||
debug!(LOGGER, "header_received: {} is a bad header, resetting header head", bhash);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"header_received: {} is a bad header, resetting header head", bhash
|
||||
);
|
||||
let _ = w(&self.chain).reset_head();
|
||||
return false;
|
||||
} else {
|
||||
|
@ -226,22 +225,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
}
|
||||
|
||||
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"locate_headers: {:?}",
|
||||
locator,
|
||||
);
|
||||
debug!(LOGGER, "locate_headers: {:?}", locator,);
|
||||
|
||||
let header = match self.find_common_header(locator) {
|
||||
Some(header) => header,
|
||||
None => return vec![],
|
||||
};
|
||||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"locate_headers: common header: {:?}",
|
||||
header.hash(),
|
||||
);
|
||||
debug!(LOGGER, "locate_headers: common header: {:?}", header.hash(),);
|
||||
|
||||
// looks like we know one, getting as many following headers as allowed
|
||||
let hh = header.height;
|
||||
|
@ -287,8 +278,10 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
reader: read,
|
||||
}),
|
||||
Err(e) => {
|
||||
warn!(LOGGER, "Couldn't produce sumtrees data for block {}: {:?}",
|
||||
h, e);
|
||||
warn!(
|
||||
LOGGER,
|
||||
"Couldn't produce sumtrees data for block {}: {:?}", h, e
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
@ -307,9 +300,9 @@ impl p2p::ChainAdapter for NetToChainAdapter {
|
|||
_peer_addr: SocketAddr,
|
||||
) -> bool {
|
||||
// TODO check whether we should accept any sumtree now
|
||||
if let Err(e) = w(&self.chain).
|
||||
sumtrees_write(h, rewind_to_output, rewind_to_kernel, sumtree_data) {
|
||||
|
||||
if let Err(e) =
|
||||
w(&self.chain).sumtrees_write(h, rewind_to_output, rewind_to_kernel, sumtree_data)
|
||||
{
|
||||
error!(LOGGER, "Failed to save sumtree archive: {:?}", e);
|
||||
!e.is_bad_data()
|
||||
} else {
|
||||
|
@ -362,10 +355,10 @@ impl NetToChainAdapter {
|
|||
} else {
|
||||
self.find_common_header(locator[1..].to_vec())
|
||||
}
|
||||
},
|
||||
}
|
||||
Err(chain::Error::StoreErr(store::Error::NotFoundErr, _)) => {
|
||||
self.find_common_header(locator[1..].to_vec())
|
||||
},
|
||||
}
|
||||
Err(e) => {
|
||||
error!(LOGGER, "Could not build header locator: {:?}", e);
|
||||
None
|
||||
|
@ -375,31 +368,37 @@ impl NetToChainAdapter {
|
|||
|
||||
// pushing the new block through the chain pipeline
|
||||
// remembering to reset the head if we have a bad block
|
||||
fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool {
|
||||
let prev_hash = b.header.previous;
|
||||
let bhash = b.hash();
|
||||
let chain = w(&self.chain);
|
||||
match chain.process_block(b, self.chain_opts()) {
|
||||
Ok(_) => true,
|
||||
Err(chain::Error::Orphan) => {
|
||||
// make sure we did not miss the parent block
|
||||
if !self.currently_syncing.load(Ordering::Relaxed) && !chain.is_orphan(&prev_hash) {
|
||||
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
|
||||
self.request_block_by_hash(prev_hash, &addr)
|
||||
}
|
||||
true
|
||||
}
|
||||
Err(ref e) if e.is_bad_data() => {
|
||||
debug!(LOGGER, "adapter: process_block: {} is a bad block, resetting head", bhash);
|
||||
let _ = chain.reset_head();
|
||||
false
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(LOGGER, "adapter: process_block :block {} refused by chain: {:?}", bhash, e);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
fn process_block(&self, b: core::Block, addr: SocketAddr) -> bool {
|
||||
let prev_hash = b.header.previous;
|
||||
let bhash = b.hash();
|
||||
let chain = w(&self.chain);
|
||||
match chain.process_block(b, self.chain_opts()) {
|
||||
Ok(_) => true,
|
||||
Err(chain::Error::Orphan) => {
|
||||
// make sure we did not miss the parent block
|
||||
if !self.currently_syncing.load(Ordering::Relaxed) && !chain.is_orphan(&prev_hash) {
|
||||
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
|
||||
self.request_block_by_hash(prev_hash, &addr)
|
||||
}
|
||||
true
|
||||
}
|
||||
Err(ref e) if e.is_bad_data() => {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"adapter: process_block: {} is a bad block, resetting head", bhash
|
||||
);
|
||||
let _ = chain.reset_head();
|
||||
false
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"adapter: process_block :block {} refused by chain: {:?}", bhash, e
|
||||
);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// After receiving a compact block if we cannot successfully hydrate
|
||||
// it into a full block then fallback to requesting the full block
|
||||
|
@ -408,12 +407,12 @@ impl NetToChainAdapter {
|
|||
// TODO - currently only request block from a single peer
|
||||
// consider additional peers for redundancy?
|
||||
fn request_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
||||
self.request_block_by_hash(bh.hash(), addr)
|
||||
self.request_block_by_hash(bh.hash(), addr)
|
||||
}
|
||||
|
||||
fn request_block_by_hash(&self, h: Hash, addr: &SocketAddr) {
|
||||
self.send_block_request_to_peer(h, addr, |peer, h| peer.send_block_request(h))
|
||||
}
|
||||
self.send_block_request_to_peer(h, addr, |peer, h| peer.send_block_request(h))
|
||||
}
|
||||
|
||||
// After we have received a block header in "header first" propagation
|
||||
// we need to go request the block (compact representation) from the
|
||||
|
@ -422,12 +421,16 @@ impl NetToChainAdapter {
|
|||
// TODO - currently only request block from a single peer
|
||||
// consider additional peers for redundancy?
|
||||
fn request_compact_block(&self, bh: &BlockHeader, addr: &SocketAddr) {
|
||||
self.send_block_request_to_peer(bh.hash(), addr, |peer, h| peer.send_compact_block_request(h))
|
||||
self.send_block_request_to_peer(bh.hash(), addr, |peer, h| {
|
||||
peer.send_compact_block_request(h)
|
||||
})
|
||||
}
|
||||
|
||||
fn send_block_request_to_peer<F>(&self, h: Hash, addr: &SocketAddr, f: F)
|
||||
where F: Fn(&p2p::Peer, Hash) -> Result<(), p2p::Error> {
|
||||
match w(&self.chain).block_exists(h) {
|
||||
fn send_block_request_to_peer<F>(&self, h: Hash, addr: &SocketAddr, f: F)
|
||||
where
|
||||
F: Fn(&p2p::Peer, Hash) -> Result<(), p2p::Error>,
|
||||
{
|
||||
match w(&self.chain).block_exists(h) {
|
||||
Ok(false) => {
|
||||
match wo(&self.peers).get_connected_peer(addr) {
|
||||
None => debug!(LOGGER, "send_block_request_to_peer: can't send request to peer {:?}, not connected", addr),
|
||||
|
@ -446,7 +449,7 @@ impl NetToChainAdapter {
|
|||
Ok(true) => debug!(LOGGER, "send_block_request_to_peer: block {} already known", h),
|
||||
Err(e) => error!(LOGGER, "send_block_request_to_peer: failed to check block exists: {:?}", e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepare options for the chain pipeline
|
||||
fn chain_opts(&self) -> chain::Options {
|
||||
|
@ -490,7 +493,6 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
|
|||
// but broadcast full block if we have no txs
|
||||
let cb = b.as_compact_block();
|
||||
if cb.kern_ids.is_empty() {
|
||||
|
||||
// in the interest of testing all code paths
|
||||
// randomly decide how we send an empty block out
|
||||
// TODO - lock this down once we are comfortable it works...
|
||||
|
@ -574,13 +576,11 @@ impl PoolToChainAdapter {
|
|||
|
||||
impl pool::BlockChain for PoolToChainAdapter {
|
||||
fn is_unspent(&self, output_ref: &OutputIdentifier) -> Result<Hash, pool::PoolError> {
|
||||
wo(&self.chain)
|
||||
.is_unspent(output_ref)
|
||||
.map_err(|e| match e {
|
||||
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
|
||||
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
|
||||
_ => pool::PoolError::GenericPoolError,
|
||||
})
|
||||
wo(&self.chain).is_unspent(output_ref).map_err(|e| match e {
|
||||
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
|
||||
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
|
||||
_ => pool::PoolError::GenericPoolError,
|
||||
})
|
||||
}
|
||||
|
||||
fn is_matured(&self, input: &Input, height: u64) -> Result<(), pool::PoolError> {
|
||||
|
@ -590,7 +590,7 @@ impl pool::BlockChain for PoolToChainAdapter {
|
|||
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
|
||||
_ => pool::PoolError::GenericPoolError,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
|
||||
wo(&self.chain)
|
||||
|
|
|
@ -48,7 +48,6 @@ use pow::plugin::PluginMiner;
|
|||
|
||||
use itertools::Itertools;
|
||||
|
||||
|
||||
// Max number of transactions this miner will assemble in a block
|
||||
const MAX_TX: u32 = 5000;
|
||||
|
||||
|
@ -202,10 +201,13 @@ impl Miner {
|
|||
if let Some(s) = job_handle.get_solution() {
|
||||
let proof = Proof::new(s.solution_nonces.to_vec());
|
||||
let proof_diff = proof.clone().to_difficulty();
|
||||
trace!(LOGGER, "Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})",
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})",
|
||||
s.get_nonce_as_u64(),
|
||||
proof_diff.into_num(),
|
||||
difficulty.into_num());
|
||||
difficulty.into_num()
|
||||
);
|
||||
if proof_diff >= b.header.difficulty {
|
||||
sol = Some(proof);
|
||||
b.header.nonce = s.get_nonce_as_u64();
|
||||
|
@ -218,8 +220,11 @@ impl Miner {
|
|||
let stats = job_handle.get_stats(i);
|
||||
if let Ok(stat_vec) = stats {
|
||||
for s in stat_vec {
|
||||
if s.in_use == 0 {continue;}
|
||||
let last_solution_time_secs = s.last_solution_time as f64 / 1000000000.0;
|
||||
if s.in_use == 0 {
|
||||
continue;
|
||||
}
|
||||
let last_solution_time_secs =
|
||||
s.last_solution_time as f64 / 1000000000.0;
|
||||
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
|
||||
let status = match s.has_errored {
|
||||
0 => "OK",
|
||||
|
@ -274,8 +279,8 @@ impl Miner {
|
|||
latest_hash: &mut Hash,
|
||||
) -> Option<Proof> {
|
||||
// look for a pow for at most attempt_time_per_block sec on the same block (to
|
||||
// give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
// give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
let deadline = time::get_time().sec + attempt_time_per_block as i64;
|
||||
let stat_check_interval = 3;
|
||||
let mut next_stat_check = time::get_time().sec + stat_check_interval;
|
||||
|
@ -306,10 +311,13 @@ impl Miner {
|
|||
let pow_hash = b.hash();
|
||||
if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) {
|
||||
let proof_diff = proof.clone().to_difficulty();
|
||||
trace!(LOGGER, "Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})",
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})",
|
||||
b.header.nonce,
|
||||
proof_diff.into_num(),
|
||||
b.header.difficulty.into_num());
|
||||
b.header.difficulty.into_num()
|
||||
);
|
||||
if proof_diff >= b.header.difficulty {
|
||||
sol = Some(proof);
|
||||
break;
|
||||
|
@ -319,7 +327,9 @@ impl Miner {
|
|||
if time::get_time().sec >= next_stat_check {
|
||||
let stats_vec = plugin_miner.get_stats(0).unwrap();
|
||||
for s in stats_vec.into_iter() {
|
||||
if s.in_use == 0 {continue;}
|
||||
if s.in_use == 0 {
|
||||
continue;
|
||||
}
|
||||
let last_solution_time_secs = s.last_solution_time as f64 / 1000000000.0;
|
||||
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
|
||||
let status = match s.has_errored {
|
||||
|
@ -336,7 +346,10 @@ impl Miner {
|
|||
3,
|
||||
last_hashes_per_sec
|
||||
);
|
||||
info!(LOGGER, "Mining at {} graphs per second", last_hashes_per_sec);
|
||||
info!(
|
||||
LOGGER,
|
||||
"Mining at {} graphs per second", last_hashes_per_sec
|
||||
);
|
||||
}
|
||||
next_stat_check = time::get_time().sec + stat_check_interval;
|
||||
}
|
||||
|
@ -443,8 +456,7 @@ impl Miner {
|
|||
pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) {
|
||||
info!(
|
||||
LOGGER,
|
||||
"(Server ID: {}) Starting miner loop.",
|
||||
self.debug_output_id
|
||||
"(Server ID: {}) Starting miner loop.", self.debug_output_id
|
||||
);
|
||||
let mut plugin_miner = None;
|
||||
let mut miner = None;
|
||||
|
@ -555,8 +567,7 @@ impl Miner {
|
|||
} else {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"setting pubkey in miner to pubkey from block_fees - {:?}",
|
||||
block_fees
|
||||
"setting pubkey in miner to pubkey from block_fees - {:?}", block_fees
|
||||
);
|
||||
key_id = block_fees.key_id();
|
||||
}
|
||||
|
@ -574,7 +585,6 @@ impl Miner {
|
|||
head: &core::BlockHeader,
|
||||
key_id: Option<Identifier>,
|
||||
) -> Result<(core::Block, BlockFees), Error> {
|
||||
|
||||
// prepare the block header timestamp
|
||||
let mut now_sec = time::get_time().sec;
|
||||
let head_sec = head.timestamp.to_timespec().sec;
|
||||
|
@ -637,7 +647,10 @@ impl Miner {
|
|||
|
||||
//Some other issue, possibly duplicate kernel
|
||||
Err(e) => {
|
||||
error!(LOGGER, "Error setting sumtree root to build a block: {:?}", e);
|
||||
error!(
|
||||
LOGGER,
|
||||
"Error setting sumtree root to build a block: {:?}", e
|
||||
);
|
||||
Err(Error::Chain(chain::Error::Other(format!("{:?}", e))))
|
||||
}
|
||||
}
|
||||
|
@ -652,12 +665,9 @@ impl Miner {
|
|||
) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let key_id = keychain.derive_key_id(1).unwrap();
|
||||
let (out, kernel) = core::Block::reward_output(
|
||||
&keychain,
|
||||
&key_id,
|
||||
block_fees.fees,
|
||||
block_fees.height,
|
||||
).unwrap();
|
||||
let (out, kernel) =
|
||||
core::Block::reward_output(&keychain, &key_id, block_fees.fees, block_fees.height)
|
||||
.unwrap();
|
||||
Ok((out, kernel, block_fees))
|
||||
}
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@
|
|||
use std::io::Read;
|
||||
use std::net::SocketAddr;
|
||||
use std::str;
|
||||
use std::sync::{Arc, mpsc};
|
||||
use std::sync::{mpsc, Arc};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::time::Duration;
|
||||
use std::thread;
|
||||
|
@ -41,7 +41,6 @@ pub fn connect_and_monitor(
|
|||
seed_list: Box<Fn() -> Vec<SocketAddr> + Send>,
|
||||
stop: Arc<AtomicBool>,
|
||||
) {
|
||||
|
||||
let _ = thread::Builder::new()
|
||||
.name("seed".to_string())
|
||||
.spawn(move || {
|
||||
|
@ -106,7 +105,7 @@ fn monitor_peers(
|
|||
debug!(
|
||||
LOGGER,
|
||||
"monitor_peers: unbanned {} after {} seconds", x.addr, interval
|
||||
);
|
||||
);
|
||||
} else {
|
||||
banned_count += 1;
|
||||
}
|
||||
|
@ -123,7 +122,7 @@ fn monitor_peers(
|
|||
healthy_count,
|
||||
banned_count,
|
||||
defunct_count,
|
||||
);
|
||||
);
|
||||
|
||||
// maintenance step first, clean up p2p server peers
|
||||
peers.clean_peers(PEER_MAX_COUNT as usize);
|
||||
|
@ -160,7 +159,6 @@ fn connect_to_seeds(
|
|||
tx: mpsc::Sender<SocketAddr>,
|
||||
seed_list: Box<Fn() -> Vec<SocketAddr>>,
|
||||
) {
|
||||
|
||||
// check if we have some peers in db
|
||||
let peers = peers.find_peers(p2p::State::Healthy, p2p::Capabilities::FULL_HIST, 100);
|
||||
|
||||
|
@ -190,7 +188,6 @@ fn listen_for_addrs(
|
|||
capab: p2p::Capabilities,
|
||||
rx: &mpsc::Receiver<SocketAddr>,
|
||||
) {
|
||||
|
||||
let pc = peers.peer_count();
|
||||
for addr in rx.try_iter() {
|
||||
if pc < PEER_MAX_COUNT {
|
||||
|
@ -201,11 +198,11 @@ fn listen_for_addrs(
|
|||
if let Ok(p) = p.try_read() {
|
||||
let _ = p.send_peer_request(capab);
|
||||
}
|
||||
},
|
||||
}
|
||||
Err(e) => {
|
||||
debug!(LOGGER, "connect_and_req: {} is Defunct; {:?}", addr, e);
|
||||
let _ = peers.update_state(addr, p2p::State::Defunct);
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -219,12 +216,16 @@ pub fn web_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
|
|||
debug!(LOGGER, "Retrieving seed nodes from {}", &SEEDS_URL);
|
||||
|
||||
// http get, filtering out non 200 results
|
||||
let mut res = client.get(SEEDS_URL).send().expect("Failed to resolve seeds.");
|
||||
let mut res = client
|
||||
.get(SEEDS_URL)
|
||||
.send()
|
||||
.expect("Failed to resolve seeds.");
|
||||
if res.status != hyper::Ok {
|
||||
panic!("Failed to resolve seeds, got status {}.", res.status);
|
||||
}
|
||||
let mut buf = vec![];
|
||||
res.read_to_end(&mut buf).expect("Could not read seed list.");
|
||||
res.read_to_end(&mut buf)
|
||||
.expect("Could not read seed list.");
|
||||
|
||||
let text = str::from_utf8(&buf[..]).expect("Corrupted seed list.");
|
||||
let addrs = text.split_whitespace()
|
||||
|
|
|
@ -25,7 +25,7 @@ use std::time;
|
|||
use adapters::*;
|
||||
use api;
|
||||
use chain;
|
||||
use core::{global, genesis};
|
||||
use core::{genesis, global};
|
||||
use miner;
|
||||
use p2p;
|
||||
use pool;
|
||||
|
@ -85,11 +85,7 @@ impl Server {
|
|||
//global::ChainTypes::Testnet2 => genesis::genesis_testnet2(),
|
||||
_ => pow::mine_genesis_block(config.mining_config.clone())?,
|
||||
};
|
||||
info!(
|
||||
LOGGER,
|
||||
"Starting server, genesis block: {}",
|
||||
genesis.hash(),
|
||||
);
|
||||
info!(LOGGER, "Starting server, genesis block: {}", genesis.hash(),);
|
||||
|
||||
let shared_chain = Arc::new(chain::Chain::init(
|
||||
config.db_root.clone(),
|
||||
|
@ -122,22 +118,24 @@ impl Server {
|
|||
net_adapter.init(Arc::downgrade(&p2p_server.peers));
|
||||
|
||||
if config.seeding_type.clone() != Seeding::Programmatic {
|
||||
|
||||
let seeder = match config.seeding_type.clone() {
|
||||
Seeding::None => {
|
||||
warn!(LOGGER, "No seed configured, will stay solo until connected to");
|
||||
warn!(
|
||||
LOGGER,
|
||||
"No seed configured, will stay solo until connected to"
|
||||
);
|
||||
seed::predefined_seeds(vec![])
|
||||
}
|
||||
Seeding::List => {
|
||||
seed::predefined_seeds(config.seeds.as_mut().unwrap().clone())
|
||||
}
|
||||
Seeding::WebStatic => {
|
||||
seed::web_seeds()
|
||||
}
|
||||
Seeding::List => seed::predefined_seeds(config.seeds.as_mut().unwrap().clone()),
|
||||
Seeding::WebStatic => seed::web_seeds(),
|
||||
_ => unreachable!(),
|
||||
};
|
||||
seed::connect_and_monitor(
|
||||
p2p_server.clone(), config.capabilities, seeder, stop.clone());
|
||||
p2p_server.clone(),
|
||||
config.capabilities,
|
||||
seeder,
|
||||
stop.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
// Defaults to None (optional) in config file.
|
||||
|
@ -164,9 +162,9 @@ impl Server {
|
|||
);
|
||||
|
||||
let p2p_inner = p2p_server.clone();
|
||||
let _ = thread::Builder::new().name("p2p-server".to_string()).spawn(move || {
|
||||
p2p_inner.listen()
|
||||
});
|
||||
let _ = thread::Builder::new()
|
||||
.name("p2p-server".to_string())
|
||||
.spawn(move || p2p_inner.listen());
|
||||
|
||||
info!(LOGGER, "Starting rest apis at: {}", &config.api_http_addr);
|
||||
|
||||
|
@ -207,7 +205,11 @@ impl Server {
|
|||
let currently_syncing = self.currently_syncing.clone();
|
||||
|
||||
let mut miner = miner::Miner::new(
|
||||
config.clone(), self.chain.clone(), self.tx_pool.clone(), self.stop.clone());
|
||||
config.clone(),
|
||||
self.chain.clone(),
|
||||
self.tx_pool.clone(),
|
||||
self.stop.clone(),
|
||||
);
|
||||
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.port));
|
||||
let _ = thread::Builder::new()
|
||||
.name("miner".to_string())
|
||||
|
|
104
grin/src/sync.rs
104
grin/src/sync.rs
|
@ -63,50 +63,52 @@ pub fn run_sync(
|
|||
|
||||
// in archival nodes (no fast sync) we just consider we have the whole
|
||||
// state already
|
||||
let have_sumtrees = !fast_sync || head.height > 0 &&
|
||||
header_head.height.saturating_sub(head.height) <= horizon;
|
||||
let have_sumtrees = !fast_sync
|
||||
|| head.height > 0 && header_head.height.saturating_sub(head.height) <= horizon;
|
||||
|
||||
let syncing = needs_syncing(
|
||||
currently_syncing.clone(), peers.clone(), chain.clone(), !have_sumtrees);
|
||||
currently_syncing.clone(),
|
||||
peers.clone(),
|
||||
chain.clone(),
|
||||
!have_sumtrees,
|
||||
);
|
||||
|
||||
let current_time = time::now_utc();
|
||||
if syncing {
|
||||
|
||||
// run the header sync every 10s
|
||||
if current_time - prev_header_sync > time::Duration::seconds(10) {
|
||||
header_sync(
|
||||
peers.clone(),
|
||||
chain.clone(),
|
||||
);
|
||||
header_sync(peers.clone(), chain.clone());
|
||||
prev_header_sync = current_time;
|
||||
}
|
||||
|
||||
// run the body_sync every 5s
|
||||
if have_sumtrees && current_time - prev_body_sync > time::Duration::seconds(5) {
|
||||
body_sync(
|
||||
peers.clone(),
|
||||
chain.clone(),
|
||||
);
|
||||
body_sync(peers.clone(), chain.clone());
|
||||
prev_body_sync = current_time;
|
||||
}
|
||||
|
||||
} else if !have_sumtrees &&
|
||||
current_time - prev_state_sync > time::Duration::seconds(5*60) {
|
||||
|
||||
} else if !have_sumtrees
|
||||
&& current_time - prev_state_sync > time::Duration::seconds(5 * 60)
|
||||
{
|
||||
if let Some(peer) = peers.most_work_peer() {
|
||||
if let Ok(p) = peer.try_read() {
|
||||
debug!(LOGGER, "Header head before sumtree request: {} / {}",
|
||||
header_head.height, header_head.last_block_h);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Header head before sumtree request: {} / {}",
|
||||
header_head.height,
|
||||
header_head.last_block_h
|
||||
);
|
||||
|
||||
// just to handle corner case of a too early start
|
||||
if header_head.height > horizon {
|
||||
|
||||
// ask for sumtree at horizon
|
||||
let mut sumtree_head = chain.get_block_header(&header_head.prev_block_h).unwrap();
|
||||
for _ in 0..horizon-2 {
|
||||
sumtree_head = chain.get_block_header(&sumtree_head.previous).unwrap();
|
||||
let mut sumtree_head =
|
||||
chain.get_block_header(&header_head.prev_block_h).unwrap();
|
||||
for _ in 0..horizon - 2 {
|
||||
sumtree_head =
|
||||
chain.get_block_header(&sumtree_head.previous).unwrap();
|
||||
}
|
||||
p.send_sumtrees_request(sumtree_head.height, sumtree_head.hash()).unwrap();
|
||||
p.send_sumtrees_request(sumtree_head.height, sumtree_head.hash())
|
||||
.unwrap();
|
||||
prev_state_sync = current_time;
|
||||
}
|
||||
}
|
||||
|
@ -122,7 +124,6 @@ pub fn run_sync(
|
|||
}
|
||||
|
||||
fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
|
||||
|
||||
let body_head: chain::Tip = chain.head().unwrap();
|
||||
let header_head: chain::Tip = chain.get_header_head().unwrap();
|
||||
let sync_head: chain::Tip = chain.get_sync_head().unwrap();
|
||||
|
@ -143,7 +144,6 @@ fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
|
|||
if header_head.total_difficulty > body_head.total_difficulty {
|
||||
let mut current = chain.get_block_header(&header_head.last_block_h);
|
||||
while let Ok(header) = current {
|
||||
|
||||
// break out of the while loop when we find a header common
|
||||
// between the this chain and the current chain
|
||||
if let Ok(_) = chain.is_on_current_chain(&header) {
|
||||
|
@ -156,8 +156,8 @@ fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
|
|||
}
|
||||
hashes.reverse();
|
||||
|
||||
// if we have 5 peers to sync from then ask for 50 blocks total (peer_count * 10)
|
||||
// max will be 80 if all 8 peers are advertising more work
|
||||
// if we have 5 peers to sync from then ask for 50 blocks total (peer_count *
|
||||
// 10) max will be 80 if all 8 peers are advertising more work
|
||||
let peer_count = cmp::min(peers.more_work_peers().len(), 10);
|
||||
let block_count = peer_count * 10;
|
||||
|
||||
|
@ -180,10 +180,11 @@ fn body_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
|
|||
header_head.height,
|
||||
hashes_to_get,
|
||||
peer_count,
|
||||
);
|
||||
);
|
||||
|
||||
for hash in hashes_to_get.clone() {
|
||||
// TODO - Is there a threshold where we sync from most_work_peer (not more_work_peer)?
|
||||
// TODO - Is there a threshold where we sync from most_work_peer (not
|
||||
// more_work_peer)?
|
||||
let peer = peers.more_work_peer();
|
||||
if let Some(peer) = peer {
|
||||
if let Ok(peer) = peer.try_read() {
|
||||
|
@ -202,10 +203,7 @@ pub fn header_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
|
|||
if let Ok(p) = peer.try_read() {
|
||||
let peer_difficulty = p.info.total_difficulty.clone();
|
||||
if peer_difficulty > difficulty {
|
||||
let _ = request_headers(
|
||||
peer.clone(),
|
||||
chain.clone(),
|
||||
);
|
||||
let _ = request_headers(peer.clone(), chain.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -213,17 +211,12 @@ pub fn header_sync(peers: Arc<Peers>, chain: Arc<chain::Chain>) {
|
|||
}
|
||||
|
||||
/// Request some block headers from a peer to advance us.
|
||||
fn request_headers(
|
||||
peer: Arc<RwLock<Peer>>,
|
||||
chain: Arc<chain::Chain>,
|
||||
) -> Result<(), Error> {
|
||||
fn request_headers(peer: Arc<RwLock<Peer>>, chain: Arc<chain::Chain>) -> Result<(), Error> {
|
||||
let locator = get_locator(chain)?;
|
||||
if let Ok(peer) = peer.try_read() {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"sync: request_headers: asking {} for headers, {:?}",
|
||||
peer.info.addr,
|
||||
locator,
|
||||
"sync: request_headers: asking {} for headers, {:?}", peer.info.addr, locator,
|
||||
);
|
||||
let _ = peer.send_header_request(locator);
|
||||
} else {
|
||||
|
@ -236,15 +229,14 @@ fn request_headers(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
/// Whether we're currently syncing the chain or we're fully caught up and
|
||||
/// just receiving blocks through gossip.
|
||||
pub fn needs_syncing(
|
||||
currently_syncing: Arc<AtomicBool>,
|
||||
peers: Arc<Peers>,
|
||||
chain: Arc<chain::Chain>,
|
||||
header_only: bool) -> bool {
|
||||
|
||||
header_only: bool,
|
||||
) -> bool {
|
||||
let local_diff = if header_only {
|
||||
chain.total_header_difficulty().unwrap()
|
||||
} else {
|
||||
|
@ -252,15 +244,22 @@ pub fn needs_syncing(
|
|||
};
|
||||
let peer = peers.most_work_peer();
|
||||
|
||||
|
||||
// if we're already syncing, we're caught up if no peer has a higher
|
||||
// difficulty than us
|
||||
if currently_syncing.load(Ordering::Relaxed) {
|
||||
if let Some(peer) = peer {
|
||||
if let Ok(peer) = peer.try_read() {
|
||||
debug!(LOGGER, "needs_syncing {} {} {}", local_diff, peer.info.total_difficulty, header_only);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"needs_syncing {} {} {}", local_diff, peer.info.total_difficulty, header_only
|
||||
);
|
||||
if peer.info.total_difficulty <= local_diff {
|
||||
info!(LOGGER, "synchronized at {:?} @ {:?}", local_diff, chain.head().unwrap().height);
|
||||
info!(
|
||||
LOGGER,
|
||||
"synchronized at {:?} @ {:?}",
|
||||
local_diff,
|
||||
chain.head().unwrap().height
|
||||
);
|
||||
currently_syncing.store(false, Ordering::Relaxed);
|
||||
if !header_only {
|
||||
let _ = chain.reset_head();
|
||||
|
@ -327,11 +326,7 @@ fn get_locator_heights(height: u64) -> Vec<u64> {
|
|||
while current > 0 {
|
||||
heights.push(current);
|
||||
let next = 2u64.pow(heights.len() as u32);
|
||||
current = if current > next {
|
||||
current - next
|
||||
} else {
|
||||
0
|
||||
}
|
||||
current = if current > next { current - next } else { 0 }
|
||||
}
|
||||
heights.push(0);
|
||||
heights
|
||||
|
@ -353,10 +348,13 @@ mod test {
|
|||
get_locator_heights(1000),
|
||||
vec![1000, 998, 994, 986, 970, 938, 874, 746, 490, 0]
|
||||
);
|
||||
// check the locator is still a manageable length, even for large numbers of headers
|
||||
// check the locator is still a manageable length, even for large numbers of
|
||||
// headers
|
||||
assert_eq!(
|
||||
get_locator_heights(10000),
|
||||
vec![10000, 9998, 9994, 9986, 9970, 9938, 9874, 9746, 9490, 8978, 7954, 5906, 1810, 0]
|
||||
vec![
|
||||
10000, 9998, 9994, 9986, 9970, 9938, 9874, 9746, 9490, 8978, 7954, 5906, 1810, 0
|
||||
]
|
||||
);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,13 +17,13 @@ extern crate slog;
|
|||
|
||||
extern crate grin_api as api;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_config as config;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_pow as pow;
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_wallet as wallet;
|
||||
extern crate grin_config as config;
|
||||
|
||||
mod framework;
|
||||
|
||||
|
@ -33,7 +33,7 @@ use std::sync::{Arc, Mutex};
|
|||
use core::global;
|
||||
use core::global::ChainTypes;
|
||||
|
||||
use framework::{LocalServerContainer,LocalServerContainerConfig};
|
||||
use framework::{LocalServerContainer, LocalServerContainerConfig};
|
||||
use util::{init_test_logger, LOGGER};
|
||||
|
||||
#[test]
|
||||
|
@ -47,9 +47,11 @@ fn simple_server_wallet() {
|
|||
// Run a separate coinbase wallet for coinbase transactions
|
||||
let mut coinbase_config = LocalServerContainerConfig::default();
|
||||
coinbase_config.name = String::from("coinbase_wallet_api");
|
||||
coinbase_config.wallet_validating_node_url=String::from("http://127.0.0.1:40001");
|
||||
coinbase_config.wallet_validating_node_url = String::from("http://127.0.0.1:40001");
|
||||
coinbase_config.wallet_port = 50002;
|
||||
let coinbase_wallet = Arc::new(Mutex::new(LocalServerContainer::new(coinbase_config).unwrap()));
|
||||
let coinbase_wallet = Arc::new(Mutex::new(
|
||||
LocalServerContainer::new(coinbase_config).unwrap(),
|
||||
));
|
||||
|
||||
let _ = thread::spawn(move || {
|
||||
let mut w = coinbase_wallet.lock().unwrap();
|
||||
|
@ -62,11 +64,8 @@ fn simple_server_wallet() {
|
|||
server_config.api_server_port = 40001;
|
||||
server_config.start_miner = true;
|
||||
server_config.start_wallet = false;
|
||||
server_config.coinbase_wallet_address = String::from(format!(
|
||||
"http://{}:{}",
|
||||
server_config.base_addr,
|
||||
50002
|
||||
));
|
||||
server_config.coinbase_wallet_address =
|
||||
String::from(format!("http://{}:{}", server_config.base_addr, 50002));
|
||||
let mut server_one = LocalServerContainer::new(server_config.clone()).unwrap();
|
||||
|
||||
// Spawn server and let it run for a bit
|
||||
|
@ -90,26 +89,29 @@ fn simple_server_wallet() {
|
|||
// Be sure that at least a block is mined by Travis
|
||||
let mut current_tip = get_tip(&base_addr, api_server_port).unwrap();
|
||||
while current_tip.height == 0 {
|
||||
thread::sleep(time::Duration::from_millis(1000));
|
||||
current_tip = get_tip(&base_addr, api_server_port).unwrap();
|
||||
thread::sleep(time::Duration::from_millis(1000));
|
||||
current_tip = get_tip(&base_addr, api_server_port).unwrap();
|
||||
}
|
||||
|
||||
warn!(LOGGER, "Testing block handler");
|
||||
let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height);
|
||||
assert!(last_block_by_height.is_ok());
|
||||
let last_block_by_height_compact = get_block_by_height_compact(&base_addr, api_server_port, current_tip.height);
|
||||
let last_block_by_height_compact =
|
||||
get_block_by_height_compact(&base_addr, api_server_port, current_tip.height);
|
||||
assert!(last_block_by_height_compact.is_ok());
|
||||
|
||||
let block_hash = current_tip.last_block_pushed;
|
||||
let last_block_by_hash = get_block_by_hash(&base_addr, api_server_port, &block_hash);
|
||||
assert!(last_block_by_hash.is_ok());
|
||||
let last_block_by_hash_compact = get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
|
||||
let last_block_by_hash_compact =
|
||||
get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
|
||||
assert!(last_block_by_hash_compact.is_ok());
|
||||
|
||||
warn!(LOGGER, "Testing chain utxo handler");
|
||||
let start_height = 0;
|
||||
let end_height = current_tip.height;
|
||||
let utxos_by_height = get_utxos_by_height(&base_addr, api_server_port, start_height, end_height);
|
||||
let utxos_by_height =
|
||||
get_utxos_by_height(&base_addr, api_server_port, start_height, end_height);
|
||||
assert!(utxos_by_height.is_ok());
|
||||
let ids = get_ids_from_block_outputs(utxos_by_height.unwrap());
|
||||
let utxos_by_ids1 = get_utxos_by_ids1(&base_addr, api_server_port, ids.clone());
|
||||
|
@ -169,7 +171,10 @@ fn test_p2p() {
|
|||
server_config_two.start_wallet = false;
|
||||
server_config_two.is_seeding = false;
|
||||
let mut server_two = LocalServerContainer::new(server_config_two.clone()).unwrap();
|
||||
server_two.add_peer(format!("{}:{}", server_config_one.base_addr, server_config_one.p2p_server_port));
|
||||
server_two.add_peer(format!(
|
||||
"{}:{}",
|
||||
server_config_one.base_addr, server_config_one.p2p_server_port
|
||||
));
|
||||
let _ = thread::spawn(move || server_two.run_server(120));
|
||||
|
||||
// Let them do the handshake
|
||||
|
@ -191,7 +196,10 @@ fn test_p2p() {
|
|||
assert_eq!(peers_all.unwrap().len(), 1);
|
||||
|
||||
// Check that the peer status is Healthy
|
||||
let addr = format!("{}:{}", server_config_two.base_addr, server_config_two.p2p_server_port);
|
||||
let addr = format!(
|
||||
"{}:{}",
|
||||
server_config_two.base_addr, server_config_two.p2p_server_port
|
||||
);
|
||||
let peer = get_peer(&base_addr, api_server_port, &addr);
|
||||
assert!(peer.is_ok());
|
||||
assert_eq!(peer.unwrap().flags, p2p::State::Healthy);
|
||||
|
@ -239,85 +247,171 @@ fn get_status(base_addr: &String, api_server_port: u16) -> Result<api::Status, E
|
|||
}
|
||||
|
||||
// Block handler functions
|
||||
fn get_block_by_height(base_addr: &String, api_server_port: u16, height: u64) -> Result<api::BlockPrintable, Error> {
|
||||
let url = format!("http://{}:{}/v1/blocks/{}", base_addr, api_server_port, height);
|
||||
fn get_block_by_height(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
height: u64,
|
||||
) -> Result<api::BlockPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/blocks/{}",
|
||||
base_addr, api_server_port, height
|
||||
);
|
||||
api::client::get::<api::BlockPrintable>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_block_by_height_compact(base_addr: &String, api_server_port: u16, height: u64) -> Result<api::CompactBlockPrintable, Error> {
|
||||
let url = format!("http://{}:{}/v1/blocks/{}?compact", base_addr, api_server_port, height);
|
||||
fn get_block_by_height_compact(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
height: u64,
|
||||
) -> Result<api::CompactBlockPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/blocks/{}?compact",
|
||||
base_addr, api_server_port, height
|
||||
);
|
||||
api::client::get::<api::CompactBlockPrintable>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_block_by_hash(base_addr: &String, api_server_port: u16, block_hash: &String) -> Result<api::BlockPrintable, Error> {
|
||||
let url = format!("http://{}:{}/v1/blocks/{}", base_addr, api_server_port, block_hash);
|
||||
fn get_block_by_hash(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
block_hash: &String,
|
||||
) -> Result<api::BlockPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/blocks/{}",
|
||||
base_addr, api_server_port, block_hash
|
||||
);
|
||||
api::client::get::<api::BlockPrintable>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_block_by_hash_compact(base_addr: &String, api_server_port: u16, block_hash: &String) -> Result<api::CompactBlockPrintable, Error> {
|
||||
let url = format!("http://{}:{}/v1/blocks/{}?compact", base_addr, api_server_port, block_hash);
|
||||
fn get_block_by_hash_compact(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
block_hash: &String,
|
||||
) -> Result<api::CompactBlockPrintable, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/blocks/{}?compact",
|
||||
base_addr, api_server_port, block_hash
|
||||
);
|
||||
api::client::get::<api::CompactBlockPrintable>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// Chain utxo handler functions
|
||||
fn get_utxos_by_ids1(base_addr: &String, api_server_port: u16, ids: Vec<String>) -> Result<Vec<api::Utxo>, Error> {
|
||||
let url = format!("http://{}:{}/v1/chain/utxos/byids?id={}", base_addr, api_server_port, ids.join(","));
|
||||
fn get_utxos_by_ids1(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
ids: Vec<String>,
|
||||
) -> Result<Vec<api::Utxo>, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/chain/utxos/byids?id={}",
|
||||
base_addr,
|
||||
api_server_port,
|
||||
ids.join(",")
|
||||
);
|
||||
api::client::get::<Vec<api::Utxo>>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_utxos_by_ids2(base_addr: &String, api_server_port: u16, ids: Vec<String>) -> Result<Vec<api::Utxo>, Error> {
|
||||
fn get_utxos_by_ids2(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
ids: Vec<String>,
|
||||
) -> Result<Vec<api::Utxo>, Error> {
|
||||
let mut ids_string: String = String::from("");
|
||||
for id in ids {
|
||||
ids_string = ids_string + "?id=" + &id;
|
||||
}
|
||||
let ids_string = String::from(&ids_string[1..ids_string.len()]);
|
||||
println!("{}", ids_string);
|
||||
let url = format!("http://{}:{}/v1/chain/utxos/byids?{}", base_addr, api_server_port, ids_string);
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/chain/utxos/byids?{}",
|
||||
base_addr, api_server_port, ids_string
|
||||
);
|
||||
api::client::get::<Vec<api::Utxo>>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_utxos_by_height(base_addr: &String, api_server_port: u16, start_height: u64, end_height: u64) -> Result<Vec<api::BlockOutputs>, Error> {
|
||||
let url = format!("http://{}:{}/v1/chain/utxos/byheight?start_height={}&end_height={}", base_addr, api_server_port, start_height, end_height);
|
||||
fn get_utxos_by_height(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
start_height: u64,
|
||||
end_height: u64,
|
||||
) -> Result<Vec<api::BlockOutputs>, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/chain/utxos/byheight?start_height={}&end_height={}",
|
||||
base_addr, api_server_port, start_height, end_height
|
||||
);
|
||||
api::client::get::<Vec<api::BlockOutputs>>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// Sumtree handler functions
|
||||
fn get_sumtree_roots(base_addr: &String, api_server_port: u16) -> Result<api::SumTrees, Error> {
|
||||
let url = format!("http://{}:{}/v1/pmmrtrees/roots", base_addr, api_server_port);
|
||||
fn get_sumtree_roots(base_addr: &String, api_server_port: u16) -> Result<api::SumTrees, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/pmmrtrees/roots",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
api::client::get::<api::SumTrees>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_sumtree_lastutxos(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||
fn get_sumtree_lastutxos(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
n: u64,
|
||||
) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||
let url: String;
|
||||
if n == 0 {
|
||||
url = format!("http://{}:{}/v1/pmmrtrees/lastutxos", base_addr, api_server_port);
|
||||
url = format!(
|
||||
"http://{}:{}/v1/pmmrtrees/lastutxos",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
} else {
|
||||
url = format!("http://{}:{}/v1/pmmrtrees/lastutxos?n={}", base_addr, api_server_port, n);
|
||||
url = format!(
|
||||
"http://{}:{}/v1/pmmrtrees/lastutxos?n={}",
|
||||
base_addr, api_server_port, n
|
||||
);
|
||||
}
|
||||
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn get_sumtree_lastrangeproofs(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||
fn get_sumtree_lastrangeproofs(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
n: u64,
|
||||
) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||
let url: String;
|
||||
if n == 0 {
|
||||
url = format!("http://{}:{}/v1/pmmrtrees/lastrangeproofs", base_addr, api_server_port);
|
||||
url = format!(
|
||||
"http://{}:{}/v1/pmmrtrees/lastrangeproofs",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
} else {
|
||||
url = format!("http://{}:{}/v1/pmmrtrees/lastrangeproofs?n={}", base_addr, api_server_port, n);
|
||||
url = format!(
|
||||
"http://{}:{}/v1/pmmrtrees/lastrangeproofs?n={}",
|
||||
base_addr, api_server_port, n
|
||||
);
|
||||
}
|
||||
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
fn getsumtree_lastkernels(base_addr: &String, api_server_port: u16, n: u64) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||
fn getsumtree_lastkernels(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
n: u64,
|
||||
) -> Result<Vec<api::PmmrTreeNode>, Error> {
|
||||
let url: String;
|
||||
if n == 0 {
|
||||
url = format!("http://{}:{}/v1/pmmrtrees/lastkernels", base_addr, api_server_port);
|
||||
url = format!(
|
||||
"http://{}:{}/v1/pmmrtrees/lastkernels",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
} else {
|
||||
url = format!("http://{}:{}/v1/pmmrtrees/lastkernels?n={}", base_addr, api_server_port, n);
|
||||
url = format!(
|
||||
"http://{}:{}/v1/pmmrtrees/lastkernels?n={}",
|
||||
base_addr, api_server_port, n
|
||||
);
|
||||
}
|
||||
api::client::get::<Vec<api::PmmrTreeNode>>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
// Helper function to get a vec of commitment output ids from a vec of block outputs
|
||||
// Helper function to get a vec of commitment output ids from a vec of block
|
||||
// outputs
|
||||
fn get_ids_from_block_outputs(block_outputs: Vec<api::BlockOutputs>) -> Vec<String> {
|
||||
let mut ids: Vec<String> = Vec::new();
|
||||
for block_output in block_outputs {
|
||||
|
@ -331,32 +425,51 @@ fn get_ids_from_block_outputs(block_outputs: Vec<api::BlockOutputs>) -> Vec<Stri
|
|||
|
||||
pub fn ban_peer(base_addr: &String, api_server_port: u16, peer_addr: &String) -> Result<(), Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/peers/{}/ban", base_addr, api_server_port, peer_addr
|
||||
"http://{}:{}/v1/peers/{}/ban",
|
||||
base_addr, api_server_port, peer_addr
|
||||
);
|
||||
api::client::post(url.as_str(), &"").map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn unban_peer(base_addr: &String, api_server_port: u16, peer_addr: &String) -> Result<(), Error> {
|
||||
pub fn unban_peer(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
peer_addr: &String,
|
||||
) -> Result<(), Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/peers/{}/unban",
|
||||
base_addr,
|
||||
api_server_port,
|
||||
peer_addr
|
||||
base_addr, api_server_port, peer_addr
|
||||
);
|
||||
api::client::post(url.as_str(), &"").map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn get_peer(base_addr: &String, api_server_port: u16, peer_addr: &String) -> Result<p2p::PeerData, Error> {
|
||||
let url = format!("http://{}:{}/v1/peers/{}", base_addr, api_server_port, peer_addr);
|
||||
pub fn get_peer(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
peer_addr: &String,
|
||||
) -> Result<p2p::PeerData, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/peers/{}",
|
||||
base_addr, api_server_port, peer_addr
|
||||
);
|
||||
api::client::get::<p2p::PeerData>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn get_connected_peers(base_addr: &String, api_server_port: u16) -> Result<Vec<p2p::PeerInfo>, Error> {
|
||||
let url = format!("http://{}:{}/v1/peers/connected", base_addr, api_server_port);
|
||||
pub fn get_connected_peers(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
) -> Result<Vec<p2p::PeerInfo>, Error> {
|
||||
let url = format!(
|
||||
"http://{}:{}/v1/peers/connected",
|
||||
base_addr, api_server_port
|
||||
);
|
||||
api::client::get::<Vec<p2p::PeerInfo>>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
||||
pub fn get_all_peers(base_addr: &String, api_server_port: u16) -> Result<Vec<p2p::PeerData>, Error> {
|
||||
pub fn get_all_peers(
|
||||
base_addr: &String,
|
||||
api_server_port: u16,
|
||||
) -> Result<Vec<p2p::PeerData>, Error> {
|
||||
let url = format!("http://{}:{}/v1/peers/all", base_addr, api_server_port);
|
||||
api::client::get::<Vec<p2p::PeerData>>(url.as_str()).map_err(|e| Error::API(e))
|
||||
}
|
||||
|
|
|
@ -119,7 +119,6 @@ impl Default for LocalServerContainerConfig {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/// A top-level container to hold everything that might be running
|
||||
/// on a server, i.e. server, wallet in send or receive mode
|
||||
|
||||
|
@ -151,7 +150,7 @@ pub struct LocalServerContainer {
|
|||
pub working_dir: String,
|
||||
|
||||
// Wallet configuration
|
||||
pub wallet_config:WalletConfig,
|
||||
pub wallet_config: WalletConfig,
|
||||
}
|
||||
|
||||
impl LocalServerContainer {
|
||||
|
@ -166,19 +165,17 @@ impl LocalServerContainer {
|
|||
wallet_config.api_listen_port = format!("{}", config.wallet_port);
|
||||
wallet_config.check_node_api_http_addr = config.wallet_validating_node_url.clone();
|
||||
wallet_config.data_file_dir = working_dir.clone();
|
||||
Ok(
|
||||
LocalServerContainer {
|
||||
config: config,
|
||||
p2p_server_stats: None,
|
||||
api_server: None,
|
||||
server_is_running: false,
|
||||
server_is_mining: false,
|
||||
wallet_is_running: false,
|
||||
working_dir: working_dir,
|
||||
peer_list: Vec::new(),
|
||||
wallet_config:wallet_config,
|
||||
},
|
||||
)
|
||||
Ok(LocalServerContainer {
|
||||
config: config,
|
||||
p2p_server_stats: None,
|
||||
api_server: None,
|
||||
server_is_running: false,
|
||||
server_is_mining: false,
|
||||
wallet_is_running: false,
|
||||
working_dir: working_dir,
|
||||
peer_list: Vec::new(),
|
||||
wallet_config: wallet_config,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn run_server(&mut self, duration_in_seconds: u64) -> grin::ServerStats {
|
||||
|
@ -192,21 +189,19 @@ impl LocalServerContainer {
|
|||
seeds = vec![self.config.seed_addr.to_string()];
|
||||
}
|
||||
|
||||
let s = grin::Server::new(
|
||||
grin::ServerConfig {
|
||||
api_http_addr: api_addr,
|
||||
db_root: format!("{}/.grin", self.working_dir),
|
||||
p2p_config: p2p::P2PConfig {
|
||||
port: self.config.p2p_server_port,
|
||||
..p2p::P2PConfig::default()
|
||||
},
|
||||
seeds: Some(seeds),
|
||||
seeding_type: seeding_type,
|
||||
chain_type: core::global::ChainTypes::AutomatedTesting,
|
||||
skip_sync_wait:Some(true),
|
||||
..Default::default()
|
||||
let s = grin::Server::new(grin::ServerConfig {
|
||||
api_http_addr: api_addr,
|
||||
db_root: format!("{}/.grin", self.working_dir),
|
||||
p2p_config: p2p::P2PConfig {
|
||||
port: self.config.p2p_server_port,
|
||||
..p2p::P2PConfig::default()
|
||||
},
|
||||
).unwrap();
|
||||
seeds: Some(seeds),
|
||||
seeding_type: seeding_type,
|
||||
chain_type: core::global::ChainTypes::AutomatedTesting,
|
||||
skip_sync_wait: Some(true),
|
||||
..Default::default()
|
||||
}).unwrap();
|
||||
|
||||
self.p2p_server_stats = Some(s.get_server_stats().unwrap());
|
||||
|
||||
|
@ -262,7 +257,6 @@ impl LocalServerContainer {
|
|||
|
||||
let _seed = blake2::blake2b::blake2b(32, &[], seed.as_bytes());
|
||||
|
||||
|
||||
println!(
|
||||
"Starting the Grin wallet receiving daemon on {} ",
|
||||
self.config.wallet_port
|
||||
|
@ -271,57 +265,59 @@ impl LocalServerContainer {
|
|||
self.wallet_config = WalletConfig::default();
|
||||
|
||||
self.wallet_config.api_listen_port = format!("{}", self.config.wallet_port);
|
||||
self.wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone();
|
||||
self.wallet_config.check_node_api_http_addr =
|
||||
self.config.wallet_validating_node_url.clone();
|
||||
self.wallet_config.data_file_dir = self.working_dir.clone();
|
||||
|
||||
let _=fs::create_dir_all(self.wallet_config.clone().data_file_dir);
|
||||
let _ = fs::create_dir_all(self.wallet_config.clone().data_file_dir);
|
||||
wallet::WalletSeed::init_file(&self.wallet_config);
|
||||
|
||||
let wallet_seed =
|
||||
wallet::WalletSeed::from_file(&self.wallet_config).expect("Failed to read wallet seed file.");
|
||||
let wallet_seed = wallet::WalletSeed::from_file(&self.wallet_config)
|
||||
.expect("Failed to read wallet seed file.");
|
||||
|
||||
let keychain = wallet_seed.derive_keychain("grin_test").expect(
|
||||
"Failed to derive keychain from seed file and passphrase.",
|
||||
);
|
||||
let keychain = wallet_seed
|
||||
.derive_keychain("grin_test")
|
||||
.expect("Failed to derive keychain from seed file and passphrase.");
|
||||
|
||||
wallet::server::start_rest_apis(self.wallet_config.clone(), keychain);
|
||||
self.wallet_is_running = true;
|
||||
}
|
||||
|
||||
pub fn get_wallet_seed(config: &WalletConfig) -> wallet::WalletSeed {
|
||||
let _=fs::create_dir_all(config.clone().data_file_dir);
|
||||
let _ = fs::create_dir_all(config.clone().data_file_dir);
|
||||
wallet::WalletSeed::init_file(config);
|
||||
let wallet_seed =
|
||||
wallet::WalletSeed::from_file(config).expect("Failed to read wallet seed file.");
|
||||
wallet_seed
|
||||
}
|
||||
|
||||
pub fn get_wallet_info(config: &WalletConfig, wallet_seed: &wallet::WalletSeed) -> wallet::WalletInfo {
|
||||
let keychain = wallet_seed.derive_keychain("grin_test").expect(
|
||||
"Failed to derive keychain from seed file and passphrase.",
|
||||
);
|
||||
pub fn get_wallet_info(
|
||||
config: &WalletConfig,
|
||||
wallet_seed: &wallet::WalletSeed,
|
||||
) -> wallet::WalletInfo {
|
||||
let keychain = wallet_seed
|
||||
.derive_keychain("grin_test")
|
||||
.expect("Failed to derive keychain from seed file and passphrase.");
|
||||
|
||||
wallet::retrieve_info(config, &keychain)
|
||||
|
||||
}
|
||||
|
||||
|
||||
pub fn send_amount_to(config: &WalletConfig,
|
||||
amount:&str,
|
||||
pub fn send_amount_to(
|
||||
config: &WalletConfig,
|
||||
amount: &str,
|
||||
minimum_confirmations: u64,
|
||||
selection_strategy:&str,
|
||||
dest: &str){
|
||||
|
||||
let amount = core::core::amount_from_hr_string(amount).expect(
|
||||
"Could not parse amount as a number with optional decimal point.",
|
||||
);
|
||||
selection_strategy: &str,
|
||||
dest: &str,
|
||||
) {
|
||||
let amount = core::core::amount_from_hr_string(amount)
|
||||
.expect("Could not parse amount as a number with optional decimal point.");
|
||||
|
||||
let wallet_seed =
|
||||
wallet::WalletSeed::from_file(config).expect("Failed to read wallet seed file.");
|
||||
|
||||
let mut keychain = wallet_seed.derive_keychain("grin_test").expect(
|
||||
"Failed to derive keychain from seed file and passphrase.",
|
||||
);
|
||||
let mut keychain = wallet_seed
|
||||
.derive_keychain("grin_test")
|
||||
.expect("Failed to derive keychain from seed file and passphrase.");
|
||||
let max_outputs = 500;
|
||||
let result = wallet::issue_send_tx(
|
||||
config,
|
||||
|
@ -331,27 +327,25 @@ impl LocalServerContainer {
|
|||
dest.to_string(),
|
||||
max_outputs,
|
||||
selection_strategy == "all",
|
||||
);
|
||||
);
|
||||
match result {
|
||||
Ok(_) => {
|
||||
println!(
|
||||
"Tx sent: {} grin to {} (strategy '{}')",
|
||||
core::core::amount_to_hr_string(amount),
|
||||
dest,
|
||||
selection_strategy,
|
||||
)
|
||||
}
|
||||
Err(e) => match e.kind() {
|
||||
wallet::ErrorKind::NotEnoughFunds(available) => {
|
||||
println!(
|
||||
"Tx not sent: insufficient funds (max: {})",
|
||||
core::core::amount_to_hr_string(available),
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
println!("Tx not sent to {}: {:?}", dest, e);
|
||||
}
|
||||
}
|
||||
Ok(_) => println!(
|
||||
"Tx sent: {} grin to {} (strategy '{}')",
|
||||
core::core::amount_to_hr_string(amount),
|
||||
dest,
|
||||
selection_strategy,
|
||||
),
|
||||
Err(e) => match e.kind() {
|
||||
wallet::ErrorKind::NotEnoughFunds(available) => {
|
||||
println!(
|
||||
"Tx not sent: insufficient funds (max: {})",
|
||||
core::core::amount_to_hr_string(available),
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
println!("Tx not sent to {}: {:?}", dest, e);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -456,20 +450,15 @@ impl LocalServerContainerPool {
|
|||
|
||||
server_config.name = String::from(format!(
|
||||
"{}/{}-{}",
|
||||
self.config.base_name,
|
||||
self.config.base_name,
|
||||
server_config.p2p_server_port
|
||||
self.config.base_name, self.config.base_name, server_config.p2p_server_port
|
||||
));
|
||||
|
||||
|
||||
// Use self as coinbase wallet
|
||||
server_config.coinbase_wallet_address = String::from(format!(
|
||||
"http://{}:{}",
|
||||
server_config.base_addr,
|
||||
server_config.wallet_port
|
||||
server_config.base_addr, server_config.wallet_port
|
||||
));
|
||||
|
||||
|
||||
self.next_p2p_port += 1;
|
||||
self.next_api_port += 1;
|
||||
self.next_wallet_port += 1;
|
||||
|
@ -480,8 +469,7 @@ impl LocalServerContainerPool {
|
|||
|
||||
let _server_address = format!(
|
||||
"{}:{}",
|
||||
server_config.base_addr,
|
||||
server_config.p2p_server_port
|
||||
server_config.base_addr, server_config.p2p_server_port
|
||||
);
|
||||
|
||||
let server_container = LocalServerContainer::new(server_config.clone()).unwrap();
|
||||
|
|
|
@ -101,8 +101,7 @@ fn simulate_seeding() {
|
|||
server_config.is_seeding = false;
|
||||
server_config.seed_addr = String::from(format!(
|
||||
"{}:{}",
|
||||
server_config.base_addr,
|
||||
server_config.p2p_server_port
|
||||
server_config.base_addr, server_config.p2p_server_port
|
||||
));
|
||||
|
||||
for _ in 0..4 {
|
||||
|
@ -153,8 +152,7 @@ fn simulate_parallel_mining() {
|
|||
server_config.is_seeding = false;
|
||||
server_config.seed_addr = String::from(format!(
|
||||
"{}:{}",
|
||||
server_config.base_addr,
|
||||
server_config.p2p_server_port
|
||||
server_config.base_addr, server_config.p2p_server_port
|
||||
));
|
||||
|
||||
// And create 4 more, then let them run for a while
|
||||
|
@ -169,8 +167,8 @@ fn simulate_parallel_mining() {
|
|||
let _ = pool.run_all_servers();
|
||||
|
||||
// Check mining difficulty here?, though I'd think it's more valuable
|
||||
// to simply output it. Can at least see the evolution of the difficulty target
|
||||
// in the debug log output for now
|
||||
// to simply output it. Can at least see the evolution of the difficulty target
|
||||
// in the debug log output for now
|
||||
}
|
||||
|
||||
// TODO: Convert these tests to newer framework format
|
||||
|
@ -190,20 +188,18 @@ fn a_simulate_block_propagation() {
|
|||
// instantiates 5 servers on different ports
|
||||
let mut servers = vec![];
|
||||
for n in 0..5 {
|
||||
let s = grin::Server::new(
|
||||
grin::ServerConfig {
|
||||
api_http_addr: format!("127.0.0.1:{}", 19000 + n),
|
||||
db_root: format!("target/{}/grin-prop-{}", test_name_dir, n),
|
||||
p2p_config: p2p::P2PConfig {
|
||||
port: 18000 + n,
|
||||
..p2p::P2PConfig::default()
|
||||
},
|
||||
seeding_type: grin::Seeding::List,
|
||||
seeds: Some(vec!["127.0.0.1:18000".to_string()]),
|
||||
chain_type: core::global::ChainTypes::AutomatedTesting,
|
||||
..Default::default()
|
||||
let s = grin::Server::new(grin::ServerConfig {
|
||||
api_http_addr: format!("127.0.0.1:{}", 19000 + n),
|
||||
db_root: format!("target/{}/grin-prop-{}", test_name_dir, n),
|
||||
p2p_config: p2p::P2PConfig {
|
||||
port: 18000 + n,
|
||||
..p2p::P2PConfig::default()
|
||||
},
|
||||
).unwrap();
|
||||
seeding_type: grin::Seeding::List,
|
||||
seeds: Some(vec!["127.0.0.1:18000".to_string()]),
|
||||
chain_type: core::global::ChainTypes::AutomatedTesting,
|
||||
..Default::default()
|
||||
}).unwrap();
|
||||
servers.push(s);
|
||||
}
|
||||
|
||||
|
|
|
@ -19,19 +19,19 @@ extern crate slog;
|
|||
|
||||
extern crate grin_api as api;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_config as config;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_pow as pow;
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_wallet as wallet;
|
||||
extern crate grin_config as config;
|
||||
|
||||
mod framework;
|
||||
|
||||
use std::{thread, time};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use framework::{LocalServerContainer,LocalServerContainerConfig};
|
||||
use framework::{LocalServerContainer, LocalServerContainerConfig};
|
||||
|
||||
use util::LOGGER;
|
||||
|
||||
|
@ -51,12 +51,12 @@ fn basic_wallet_transactions() {
|
|||
// Run a separate coinbase wallet for coinbase transactions
|
||||
let mut coinbase_config = LocalServerContainerConfig::default();
|
||||
coinbase_config.name = String::from("coinbase_wallet");
|
||||
coinbase_config.wallet_validating_node_url=String::from("http://127.0.0.1:30001");
|
||||
coinbase_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001");
|
||||
coinbase_config.wallet_port = 10002;
|
||||
let coinbase_wallet = Arc::new(Mutex::new(LocalServerContainer::new(coinbase_config).unwrap()));
|
||||
let coinbase_wallet_config = {
|
||||
coinbase_wallet.lock().unwrap().wallet_config.clone()
|
||||
};
|
||||
let coinbase_wallet = Arc::new(Mutex::new(
|
||||
LocalServerContainer::new(coinbase_config).unwrap(),
|
||||
));
|
||||
let coinbase_wallet_config = { coinbase_wallet.lock().unwrap().wallet_config.clone() };
|
||||
|
||||
let coinbase_seed = LocalServerContainer::get_wallet_seed(&coinbase_wallet_config);
|
||||
|
||||
|
@ -67,13 +67,11 @@ fn basic_wallet_transactions() {
|
|||
|
||||
let mut recp_config = LocalServerContainerConfig::default();
|
||||
recp_config.name = String::from("target_wallet");
|
||||
recp_config.wallet_validating_node_url=String::from("http://127.0.0.1:30001");
|
||||
recp_config.wallet_validating_node_url = String::from("http://127.0.0.1:30001");
|
||||
recp_config.wallet_port = 20002;
|
||||
let target_wallet = Arc::new(Mutex::new(LocalServerContainer::new(recp_config).unwrap()));
|
||||
let target_wallet_cloned = target_wallet.clone();
|
||||
let recp_wallet_config = {
|
||||
target_wallet.lock().unwrap().wallet_config.clone()
|
||||
};
|
||||
let recp_wallet_config = { target_wallet.lock().unwrap().wallet_config.clone() };
|
||||
|
||||
let recp_seed = LocalServerContainer::get_wallet_seed(&recp_wallet_config);
|
||||
//Start up a second wallet, to receive
|
||||
|
@ -90,54 +88,83 @@ fn basic_wallet_transactions() {
|
|||
server_config.api_server_port = 30001;
|
||||
server_config.start_miner = true;
|
||||
server_config.start_wallet = false;
|
||||
server_config.coinbase_wallet_address = String::from(format!(
|
||||
"http://{}:{}",
|
||||
server_config.base_addr,
|
||||
10002
|
||||
));
|
||||
server_config.coinbase_wallet_address =
|
||||
String::from(format!("http://{}:{}", server_config.base_addr, 10002));
|
||||
let mut server_one = LocalServerContainer::new(server_config).unwrap();
|
||||
server_one.run_server(120);
|
||||
});
|
||||
|
||||
//Wait until we have some funds to send
|
||||
let mut coinbase_info = LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
let mut coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
let mut slept_time = 0;
|
||||
while coinbase_info.amount_currently_spendable < 100000000000{
|
||||
while coinbase_info.amount_currently_spendable < 100000000000 {
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
slept_time+=500;
|
||||
slept_time += 500;
|
||||
if slept_time > 10000 {
|
||||
panic!("Coinbase not confirming in time");
|
||||
}
|
||||
coinbase_info = LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
}
|
||||
warn!(LOGGER, "Sending 50 Grins to recipient wallet");
|
||||
LocalServerContainer::send_amount_to(&coinbase_wallet_config, "50.00", 1, "not_all", "http://127.0.0.1:20002");
|
||||
LocalServerContainer::send_amount_to(
|
||||
&coinbase_wallet_config,
|
||||
"50.00",
|
||||
1,
|
||||
"not_all",
|
||||
"http://127.0.0.1:20002",
|
||||
);
|
||||
|
||||
//Wait for a confirmation
|
||||
thread::sleep(time::Duration::from_millis(3000));
|
||||
let coinbase_info = LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
let coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
println!("Coinbase wallet info: {:?}", coinbase_info);
|
||||
|
||||
let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed);
|
||||
println!("Recipient wallet info: {:?}", recipient_info);
|
||||
|
||||
assert!(recipient_info.data_confirmed && recipient_info.amount_currently_spendable==50000000000);
|
||||
assert!(
|
||||
recipient_info.data_confirmed && recipient_info.amount_currently_spendable == 50000000000
|
||||
);
|
||||
|
||||
warn!(LOGGER, "Sending many small transactions to recipient wallet");
|
||||
warn!(
|
||||
LOGGER,
|
||||
"Sending many small transactions to recipient wallet"
|
||||
);
|
||||
for _ in 0..10 {
|
||||
LocalServerContainer::send_amount_to(&coinbase_wallet_config, "1.00", 1, "not_all", "http://127.0.0.1:20002");
|
||||
LocalServerContainer::send_amount_to(
|
||||
&coinbase_wallet_config,
|
||||
"1.00",
|
||||
1,
|
||||
"not_all",
|
||||
"http://127.0.0.1:20002",
|
||||
);
|
||||
}
|
||||
|
||||
thread::sleep(time::Duration::from_millis(10000));
|
||||
let recipient_info = LocalServerContainer::get_wallet_info(&recp_wallet_config, &recp_seed);
|
||||
println!("Recipient wallet info post little sends: {:?}", recipient_info);
|
||||
println!(
|
||||
"Recipient wallet info post little sends: {:?}",
|
||||
recipient_info
|
||||
);
|
||||
|
||||
assert!(recipient_info.data_confirmed && recipient_info.amount_currently_spendable==60000000000);
|
||||
assert!(
|
||||
recipient_info.data_confirmed && recipient_info.amount_currently_spendable == 60000000000
|
||||
);
|
||||
//send some cash right back
|
||||
LocalServerContainer::send_amount_to(&recp_wallet_config, "25.00", 1, "all", "http://127.0.0.1:10002");
|
||||
LocalServerContainer::send_amount_to(
|
||||
&recp_wallet_config,
|
||||
"25.00",
|
||||
1,
|
||||
"all",
|
||||
"http://127.0.0.1:10002",
|
||||
);
|
||||
|
||||
thread::sleep(time::Duration::from_millis(5000));
|
||||
|
||||
let coinbase_info = LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
let coinbase_info =
|
||||
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
|
||||
println!("Coinbase wallet info final: {:?}", coinbase_info);
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@ use util;
|
|||
use util::secp::{self, Secp256k1};
|
||||
use util::secp::constants::SECRET_KEY_SIZE;
|
||||
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Serialize, Deserialize)]
|
||||
pub struct BlindingFactor([u8; SECRET_KEY_SIZE]);
|
||||
|
||||
|
@ -65,8 +64,7 @@ impl BlindingFactor {
|
|||
// and secp lib checks this
|
||||
Ok(secp::key::ZERO_KEY)
|
||||
} else {
|
||||
secp::key::SecretKey::from_slice(secp, &self.0)
|
||||
.map_err(|e| Error::Secp(e))
|
||||
secp::key::SecretKey::from_slice(secp, &self.0).map_err(|e| Error::Secp(e))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -86,10 +84,7 @@ impl BlindingFactor {
|
|||
let blind_1 = BlindingFactor::from_secret_key(skey_1);
|
||||
let blind_2 = BlindingFactor::from_secret_key(skey_2);
|
||||
|
||||
Ok(SplitBlindingFactor {
|
||||
blind_1,
|
||||
blind_2,
|
||||
})
|
||||
Ok(SplitBlindingFactor { blind_1, blind_2 })
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -218,8 +218,8 @@ impl ExtendedKey {
|
|||
let derived = blake2b(64, b"Grin/MW Seed", seed);
|
||||
let slice = derived.as_bytes();
|
||||
|
||||
let key = SecretKey::from_slice(&secp, &slice[0..32])
|
||||
.expect("Error deriving key (from_slice)");
|
||||
let key =
|
||||
SecretKey::from_slice(&secp, &slice[0..32]).expect("Error deriving key (from_slice)");
|
||||
|
||||
let mut chain_code: [u8; 32] = Default::default();
|
||||
(&mut chain_code).copy_from_slice(&slice[32..64]);
|
||||
|
@ -295,7 +295,6 @@ impl ExtendedKey {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use serde_json;
|
||||
|
@ -341,10 +340,7 @@ mod test {
|
|||
let identifier = from_hex("6f7c1a053ca54592e783");
|
||||
let n_child = 0;
|
||||
assert_eq!(extk.key, secret_key);
|
||||
assert_eq!(
|
||||
extk.key_id,
|
||||
Identifier::from_bytes(identifier.as_slice())
|
||||
);
|
||||
assert_eq!(extk.key_id, Identifier::from_bytes(identifier.as_slice()));
|
||||
assert_eq!(
|
||||
extk.root_key_id,
|
||||
Identifier::from_bytes(identifier.as_slice())
|
||||
|
|
|
@ -19,8 +19,8 @@ use std::{error, fmt};
|
|||
|
||||
use util::secp;
|
||||
use util::secp::{Message, Secp256k1, Signature};
|
||||
use util::secp::key::{SecretKey, PublicKey};
|
||||
use util::secp::pedersen::{Commitment, ProofMessage, ProofInfo, RangeProof};
|
||||
use util::secp::key::{PublicKey, SecretKey};
|
||||
use util::secp::pedersen::{Commitment, ProofInfo, ProofMessage, RangeProof};
|
||||
use util::secp::aggsig;
|
||||
use util::logger::LOGGER;
|
||||
use util::kernel_sig_msg;
|
||||
|
@ -82,7 +82,7 @@ pub struct AggSigTxContext {
|
|||
pub struct Keychain {
|
||||
secp: Secp256k1,
|
||||
extkey: extkey::ExtendedKey,
|
||||
pub aggsig_contexts: Arc<RwLock<Option<HashMap<Uuid,AggSigTxContext>>>>,
|
||||
pub aggsig_contexts: Arc<RwLock<Option<HashMap<Uuid, AggSigTxContext>>>>,
|
||||
key_overrides: HashMap<Identifier, SecretKey>,
|
||||
key_derivation_cache: Arc<RwLock<HashMap<Identifier, u32>>>,
|
||||
}
|
||||
|
@ -133,7 +133,11 @@ impl Keychain {
|
|||
fn derived_key(&self, key_id: &Identifier) -> Result<SecretKey, Error> {
|
||||
// first check our overrides and just return the key if we have one in there
|
||||
if let Some(key) = self.key_overrides.get(key_id) {
|
||||
trace!(LOGGER, "... Derived Key (using override) key_id: {}", key_id);
|
||||
trace!(
|
||||
LOGGER,
|
||||
"... Derived Key (using override) key_id: {}",
|
||||
key_id
|
||||
);
|
||||
return Ok(*key);
|
||||
}
|
||||
|
||||
|
@ -149,8 +153,13 @@ impl Keychain {
|
|||
{
|
||||
let cache = self.key_derivation_cache.read().unwrap();
|
||||
if let Some(derivation) = cache.get(key_id) {
|
||||
trace!(LOGGER, "... Derived Key (cache hit) key_id: {}, derivation: {}", key_id, derivation);
|
||||
return Ok(self.derived_key_from_index(*derivation)?)
|
||||
trace!(
|
||||
LOGGER,
|
||||
"... Derived Key (cache hit) key_id: {}, derivation: {}",
|
||||
key_id,
|
||||
derivation
|
||||
);
|
||||
return Ok(self.derived_key_from_index(*derivation)?);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -180,19 +189,17 @@ impl Keychain {
|
|||
}
|
||||
}
|
||||
|
||||
Err(Error::KeyDerivation(
|
||||
format!("failed to derive child_key for {:?}", key_id),
|
||||
))
|
||||
Err(Error::KeyDerivation(format!(
|
||||
"failed to derive child_key for {:?}",
|
||||
key_id
|
||||
)))
|
||||
}
|
||||
|
||||
// if we know the derivation index we can just straight to deriving the key
|
||||
fn derived_key_from_index(
|
||||
&self,
|
||||
derivation: u32,
|
||||
) -> Result<extkey::ChildKey, Error> {
|
||||
fn derived_key_from_index(&self, derivation: u32) -> Result<extkey::ChildKey, Error> {
|
||||
trace!(LOGGER, "Derived Key (fast) by derivation: {}", derivation);
|
||||
let child_key = self.extkey.derive(&self.secp, derivation)?;
|
||||
return Ok(child_key)
|
||||
return Ok(child_key);
|
||||
}
|
||||
|
||||
pub fn commit(&self, amount: u64, key_id: &Identifier) -> Result<Commitment, Error> {
|
||||
|
@ -201,11 +208,7 @@ impl Keychain {
|
|||
Ok(commit)
|
||||
}
|
||||
|
||||
pub fn commit_with_key_index(
|
||||
&self,
|
||||
amount: u64,
|
||||
derivation: u32,
|
||||
) -> Result<Commitment, Error> {
|
||||
pub fn commit_with_key_index(&self, amount: u64, derivation: u32) -> Result<Commitment, Error> {
|
||||
let child_key = self.derived_key_from_index(derivation)?;
|
||||
let commit = self.secp.commit(amount, child_key.key)?;
|
||||
Ok(commit)
|
||||
|
@ -217,7 +220,7 @@ impl Keychain {
|
|||
Ok(commit)
|
||||
}
|
||||
|
||||
pub fn switch_commit_from_index(&self, index:u32) -> Result<Commitment, Error> {
|
||||
pub fn switch_commit_from_index(&self, index: u32) -> Result<Commitment, Error> {
|
||||
// just do this directly, because cache seems really slow for wallet reconstruct
|
||||
let skey = self.extkey.derive(&self.secp, index)?;
|
||||
let skey = skey.key;
|
||||
|
@ -252,7 +255,9 @@ impl Keychain {
|
|||
} else {
|
||||
if msg.len() != 64 {
|
||||
error!(LOGGER, "Bullet proof message must be 64 bytes.");
|
||||
return Err(Error::RangeProof("Bullet proof message must be 64 bytes".to_string()));
|
||||
return Err(Error::RangeProof(
|
||||
"Bullet proof message must be 64 bytes".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
return Ok(self.secp.bullet_proof(amount, skey, extra_data, Some(msg)));
|
||||
|
@ -262,14 +267,14 @@ impl Keychain {
|
|||
secp: &Secp256k1,
|
||||
commit: Commitment,
|
||||
proof: RangeProof,
|
||||
extra_data: Option<Vec<u8>>)
|
||||
-> Result<(), secp::Error> {
|
||||
let result = secp.verify_bullet_proof(commit, proof, extra_data);
|
||||
match result {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
extra_data: Option<Vec<u8>>,
|
||||
) -> Result<(), secp::Error> {
|
||||
let result = secp.verify_bullet_proof(commit, proof, extra_data);
|
||||
match result {
|
||||
Ok(_) => Ok(()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn rewind_range_proof(
|
||||
&self,
|
||||
|
@ -279,9 +284,10 @@ impl Keychain {
|
|||
proof: RangeProof,
|
||||
) -> Result<ProofInfo, Error> {
|
||||
let nonce = self.derived_key(key_id)?;
|
||||
let proof_message = self.secp.unwind_bullet_proof(commit, nonce, extra_data, proof);
|
||||
let proof_message = self.secp
|
||||
.unwind_bullet_proof(commit, nonce, extra_data, proof);
|
||||
let proof_info = match proof_message {
|
||||
Ok(p) => ProofInfo {
|
||||
Ok(p) => ProofInfo {
|
||||
success: true,
|
||||
value: 0,
|
||||
message: p,
|
||||
|
@ -300,7 +306,7 @@ impl Keychain {
|
|||
max: 0,
|
||||
exp: 0,
|
||||
mantissa: 0,
|
||||
}
|
||||
},
|
||||
};
|
||||
return Ok(proof_info);
|
||||
}
|
||||
|
@ -334,26 +340,34 @@ impl Keychain {
|
|||
Ok(BlindingFactor::from_secret_key(sum))
|
||||
}
|
||||
|
||||
pub fn aggsig_create_context(&self, transaction_id: &Uuid, sec_key:SecretKey)
|
||||
-> Result<(), Error>{
|
||||
pub fn aggsig_create_context(
|
||||
&self,
|
||||
transaction_id: &Uuid,
|
||||
sec_key: SecretKey,
|
||||
) -> Result<(), Error> {
|
||||
let mut contexts = self.aggsig_contexts.write().unwrap();
|
||||
if contexts.is_none() {
|
||||
*contexts = Some(HashMap::new())
|
||||
}
|
||||
if contexts.as_mut().unwrap().contains_key(transaction_id) {
|
||||
return Err(Error::Transaction(String::from("Duplication transaction id")));
|
||||
return Err(Error::Transaction(String::from(
|
||||
"Duplication transaction id",
|
||||
)));
|
||||
}
|
||||
contexts.as_mut().unwrap().insert(transaction_id.clone(), AggSigTxContext{
|
||||
sec_key: sec_key,
|
||||
sec_nonce: aggsig::export_secnonce_single(&self.secp).unwrap(),
|
||||
output_ids: vec![],
|
||||
});
|
||||
contexts.as_mut().unwrap().insert(
|
||||
transaction_id.clone(),
|
||||
AggSigTxContext {
|
||||
sec_key: sec_key,
|
||||
sec_nonce: aggsig::export_secnonce_single(&self.secp).unwrap(),
|
||||
output_ids: vec![],
|
||||
},
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Tracks an output contributing to my excess value (if it needs to
|
||||
/// be kept between invocations
|
||||
pub fn aggsig_add_output(&self, transaction_id: &Uuid, output_id:&Identifier){
|
||||
pub fn aggsig_add_output(&self, transaction_id: &Uuid, output_id: &Identifier) {
|
||||
let mut agg_contexts = self.aggsig_contexts.write().unwrap();
|
||||
let mut agg_contexts_local = agg_contexts.as_mut().unwrap().clone();
|
||||
let mut agg_context = agg_contexts_local.get(transaction_id).unwrap().clone();
|
||||
|
@ -374,68 +388,87 @@ impl Keychain {
|
|||
/// Returns private key, private nonce
|
||||
pub fn aggsig_get_private_keys(&self, transaction_id: &Uuid) -> (SecretKey, SecretKey) {
|
||||
let contexts = self.aggsig_contexts.clone();
|
||||
let contexts_read=contexts.read().unwrap();
|
||||
let contexts_read = contexts.read().unwrap();
|
||||
let agg_context = contexts_read.as_ref().unwrap();
|
||||
let agg_context_return = agg_context.get(transaction_id);
|
||||
(agg_context_return.unwrap().sec_key.clone(),
|
||||
agg_context_return.unwrap().sec_nonce.clone())
|
||||
(
|
||||
agg_context_return.unwrap().sec_key.clone(),
|
||||
agg_context_return.unwrap().sec_nonce.clone(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Returns public key, public nonce
|
||||
pub fn aggsig_get_public_keys(&self, transaction_id: &Uuid) -> (PublicKey, PublicKey) {
|
||||
let contexts = self.aggsig_contexts.clone();
|
||||
let contexts_read=contexts.read().unwrap();
|
||||
let contexts_read = contexts.read().unwrap();
|
||||
let agg_context = contexts_read.as_ref().unwrap();
|
||||
let agg_context_return = agg_context.get(transaction_id);
|
||||
(PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_key).unwrap(),
|
||||
PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_nonce).unwrap())
|
||||
(
|
||||
PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_key).unwrap(),
|
||||
PublicKey::from_secret_key(&self.secp, &agg_context_return.unwrap().sec_nonce).unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Note 'secnonce' here is used to perform the signature, while 'pubnonce' just allows you to
|
||||
/// provide a custom public nonce to include while calculating e
|
||||
/// nonce_sum is the sum used to decide whether secnonce should be inverted during sig time
|
||||
pub fn aggsig_sign_single(&self,
|
||||
pub fn aggsig_sign_single(
|
||||
&self,
|
||||
transaction_id: &Uuid,
|
||||
msg: &Message,
|
||||
secnonce:Option<&SecretKey>,
|
||||
secnonce: Option<&SecretKey>,
|
||||
pubnonce: Option<&PublicKey>,
|
||||
nonce_sum: Option<&PublicKey>) -> Result<Signature, Error> {
|
||||
nonce_sum: Option<&PublicKey>,
|
||||
) -> Result<Signature, Error> {
|
||||
let contexts = self.aggsig_contexts.clone();
|
||||
let contexts_read=contexts.read().unwrap();
|
||||
let contexts_read = contexts.read().unwrap();
|
||||
let agg_context = contexts_read.as_ref().unwrap();
|
||||
let agg_context_return = agg_context.get(transaction_id);
|
||||
let sig = aggsig::sign_single(&self.secp, msg, &agg_context_return.unwrap().sec_key, secnonce, pubnonce, nonce_sum)?;
|
||||
let sig = aggsig::sign_single(
|
||||
&self.secp,
|
||||
msg,
|
||||
&agg_context_return.unwrap().sec_key,
|
||||
secnonce,
|
||||
pubnonce,
|
||||
nonce_sum,
|
||||
)?;
|
||||
Ok(sig)
|
||||
}
|
||||
|
||||
//Verifies an aggsig signature
|
||||
pub fn aggsig_verify_single(&self,
|
||||
pub fn aggsig_verify_single(
|
||||
&self,
|
||||
sig: &Signature,
|
||||
msg: &Message,
|
||||
pubnonce:Option<&PublicKey>,
|
||||
pubkey:&PublicKey,
|
||||
is_partial:bool) -> bool {
|
||||
pubnonce: Option<&PublicKey>,
|
||||
pubkey: &PublicKey,
|
||||
is_partial: bool,
|
||||
) -> bool {
|
||||
aggsig::verify_single(&self.secp, sig, msg, pubnonce, pubkey, is_partial)
|
||||
}
|
||||
|
||||
//Verifies other final sig corresponds with what we're expecting
|
||||
pub fn aggsig_verify_final_sig_build_msg(&self,
|
||||
pub fn aggsig_verify_final_sig_build_msg(
|
||||
&self,
|
||||
sig: &Signature,
|
||||
pubkey: &PublicKey,
|
||||
fee: u64,
|
||||
lock_height:u64) -> bool {
|
||||
lock_height: u64,
|
||||
) -> bool {
|
||||
let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height)).unwrap();
|
||||
self.aggsig_verify_single(sig, &msg, None, pubkey, true)
|
||||
}
|
||||
|
||||
//Verifies other party's sig corresponds with what we're expecting
|
||||
pub fn aggsig_verify_partial_sig(&self,
|
||||
pub fn aggsig_verify_partial_sig(
|
||||
&self,
|
||||
transaction_id: &Uuid,
|
||||
sig: &Signature,
|
||||
other_pub_nonce:&PublicKey,
|
||||
pubkey:&PublicKey,
|
||||
other_pub_nonce: &PublicKey,
|
||||
pubkey: &PublicKey,
|
||||
fee: u64,
|
||||
lock_height:u64) -> bool {
|
||||
lock_height: u64,
|
||||
) -> bool {
|
||||
let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id);
|
||||
let mut nonce_sum = other_pub_nonce.clone();
|
||||
let _ = nonce_sum.add_exp_assign(&self.secp, &sec_nonce);
|
||||
|
@ -449,7 +482,8 @@ impl Keychain {
|
|||
transaction_id: &Uuid,
|
||||
other_pub_nonce: &PublicKey,
|
||||
fee: u64,
|
||||
lock_height: u64) -> Result<Signature, Error>{
|
||||
lock_height: u64,
|
||||
) -> Result<Signature, Error> {
|
||||
// Add public nonces kR*G + kS*G
|
||||
let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id);
|
||||
let mut nonce_sum = other_pub_nonce.clone();
|
||||
|
@ -457,7 +491,13 @@ impl Keychain {
|
|||
let msg = secp::Message::from_slice(&kernel_sig_msg(fee, lock_height))?;
|
||||
|
||||
//Now calculate signature using message M=fee, nonce in e=nonce_sum
|
||||
self.aggsig_sign_single(transaction_id, &msg, Some(&sec_nonce), Some(&nonce_sum), Some(&nonce_sum))
|
||||
self.aggsig_sign_single(
|
||||
transaction_id,
|
||||
&msg,
|
||||
Some(&sec_nonce),
|
||||
Some(&nonce_sum),
|
||||
Some(&nonce_sum),
|
||||
)
|
||||
}
|
||||
|
||||
/// Helper function to calculate final signature
|
||||
|
@ -466,7 +506,8 @@ impl Keychain {
|
|||
transaction_id: &Uuid,
|
||||
their_sig: &Signature,
|
||||
our_sig: &Signature,
|
||||
their_pub_nonce: &PublicKey) -> Result<Signature, Error> {
|
||||
their_pub_nonce: &PublicKey,
|
||||
) -> Result<Signature, Error> {
|
||||
// Add public nonces kR*G + kS*G
|
||||
let (_, sec_nonce) = self.aggsig_get_private_keys(transaction_id);
|
||||
let mut nonce_sum = their_pub_nonce.clone();
|
||||
|
@ -500,13 +541,13 @@ impl Keychain {
|
|||
|
||||
/// Verifies a sig given a commitment
|
||||
pub fn aggsig_verify_single_from_commit(
|
||||
secp:&Secp256k1,
|
||||
secp: &Secp256k1,
|
||||
sig: &Signature,
|
||||
msg: &Message,
|
||||
commit: &Commitment,
|
||||
) -> bool {
|
||||
// Extract the pubkey, unfortunately we need this hack for now, (we just hope one is valid)
|
||||
// TODO: Create better secp256k1 API to do this
|
||||
// Extract the pubkey, unfortunately we need this hack for now, (we just hope
|
||||
// one is valid) TODO: Create better secp256k1 API to do this
|
||||
let pubkeys = commit.to_two_pubkeys(secp);
|
||||
let mut valid = false;
|
||||
for i in 0..pubkeys.len() {
|
||||
|
@ -562,7 +603,6 @@ mod test {
|
|||
use util::secp::pedersen::ProofMessage;
|
||||
use util::secp::key::SecretKey;
|
||||
|
||||
|
||||
#[test]
|
||||
fn test_key_derivation() {
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
|
@ -591,8 +631,12 @@ mod test {
|
|||
let mut msg = ProofMessage::from_bytes(&[0u8; 64]);
|
||||
let extra_data = [99u8; 64];
|
||||
|
||||
let proof = keychain.range_proof(5, &key_id, commit, Some(extra_data.to_vec().clone()), msg).unwrap();
|
||||
let proof_info = keychain.rewind_range_proof(&key_id, commit, Some(extra_data.to_vec().clone()), proof).unwrap();
|
||||
let proof = keychain
|
||||
.range_proof(5, &key_id, commit, Some(extra_data.to_vec().clone()), msg)
|
||||
.unwrap();
|
||||
let proof_info = keychain
|
||||
.rewind_range_proof(&key_id, commit, Some(extra_data.to_vec().clone()), proof)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(proof_info.success, true);
|
||||
|
||||
|
@ -610,8 +654,8 @@ mod test {
|
|||
let proof_info = keychain
|
||||
.rewind_range_proof(&key_id2, commit, Some(extra_data.to_vec().clone()), proof)
|
||||
.unwrap();
|
||||
// With bullet proofs, if you provide the wrong nonce you'll get gibberish back as opposed
|
||||
// to a failure to recover the message
|
||||
// With bullet proofs, if you provide the wrong nonce you'll get gibberish back
|
||||
// as opposed to a failure to recover the message
|
||||
assert_ne!(
|
||||
proof_info.message,
|
||||
secp::pedersen::ProofMessage::from_bytes(&[0; secp::constants::BULLET_PROOF_MSG_SIZE])
|
||||
|
@ -638,7 +682,12 @@ mod test {
|
|||
let commit3 = keychain.commit(4, &key_id).unwrap();
|
||||
let wrong_extra_data = [98u8; 64];
|
||||
let should_err = keychain
|
||||
.rewind_range_proof(&key_id, commit3, Some(wrong_extra_data.to_vec().clone()), proof)
|
||||
.rewind_range_proof(
|
||||
&key_id,
|
||||
commit3,
|
||||
Some(wrong_extra_data.to_vec().clone()),
|
||||
proof,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(proof_info.success, false);
|
||||
|
@ -656,20 +705,16 @@ mod test {
|
|||
let skey1 = SecretKey::from_slice(
|
||||
&keychain.secp,
|
||||
&[
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 1,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 1,
|
||||
],
|
||||
).unwrap();
|
||||
|
||||
let skey2 = SecretKey::from_slice(
|
||||
&keychain.secp,
|
||||
&[
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 2,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 2,
|
||||
],
|
||||
).unwrap();
|
||||
|
||||
|
@ -684,10 +729,10 @@ mod test {
|
|||
let commit_3 = keychain.secp.commit(0, skey3).unwrap();
|
||||
|
||||
// now sum commitments for keys 1 and 2
|
||||
let sum = keychain.secp.commit_sum(
|
||||
vec![commit_1.clone(), commit_2.clone()],
|
||||
vec![],
|
||||
).unwrap();
|
||||
let sum = keychain
|
||||
.secp
|
||||
.commit_sum(vec![commit_1.clone(), commit_2.clone()], vec![])
|
||||
.unwrap();
|
||||
|
||||
// confirm the commitment for key 3 matches the sum of the commitments 1 and 2
|
||||
assert_eq!(sum, commit_3);
|
||||
|
@ -695,10 +740,11 @@ mod test {
|
|||
// now check we can sum keys up using keychain.blind_sum()
|
||||
// in the same way (convenience function)
|
||||
assert_eq!(
|
||||
keychain.blind_sum(&BlindSum::new()
|
||||
.add_blinding_factor(BlindingFactor::from_secret_key(skey1))
|
||||
.add_blinding_factor(BlindingFactor::from_secret_key(skey2))
|
||||
).unwrap(),
|
||||
keychain
|
||||
.blind_sum(&BlindSum::new()
|
||||
.add_blinding_factor(BlindingFactor::from_secret_key(skey1))
|
||||
.add_blinding_factor(BlindingFactor::from_secret_key(skey2)))
|
||||
.unwrap(),
|
||||
BlindingFactor::from_secret_key(skey3),
|
||||
);
|
||||
}
|
||||
|
@ -714,41 +760,41 @@ mod test {
|
|||
// Calculate the kernel excess here for convenience.
|
||||
// Normally this would happen during transaction building.
|
||||
let kernel_excess = {
|
||||
let skey1 = sender_keychain.derived_key(
|
||||
&sender_keychain.derive_key_id(1).unwrap(),
|
||||
).unwrap();
|
||||
let skey1 = sender_keychain
|
||||
.derived_key(&sender_keychain.derive_key_id(1).unwrap())
|
||||
.unwrap();
|
||||
|
||||
let skey2 = receiver_keychain.derived_key(
|
||||
&receiver_keychain.derive_key_id(1).unwrap(),
|
||||
).unwrap();
|
||||
let skey2 = receiver_keychain
|
||||
.derived_key(&receiver_keychain.derive_key_id(1).unwrap())
|
||||
.unwrap();
|
||||
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let blinding_factor = keychain.blind_sum(
|
||||
&BlindSum::new()
|
||||
let blinding_factor = keychain
|
||||
.blind_sum(&BlindSum::new()
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(skey1))
|
||||
.add_blinding_factor(BlindingFactor::from_secret_key(skey2))
|
||||
).unwrap();
|
||||
.add_blinding_factor(BlindingFactor::from_secret_key(skey2)))
|
||||
.unwrap();
|
||||
|
||||
keychain.secp.commit(
|
||||
0,
|
||||
blinding_factor.secret_key(&keychain.secp).unwrap(),
|
||||
).unwrap()
|
||||
keychain
|
||||
.secp
|
||||
.commit(0, blinding_factor.secret_key(&keychain.secp).unwrap())
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
// sender starts the tx interaction
|
||||
let (sender_pub_excess, sender_pub_nonce) = {
|
||||
let keychain = sender_keychain.clone();
|
||||
|
||||
let skey = keychain.derived_key(
|
||||
&keychain.derive_key_id(1).unwrap(),
|
||||
).unwrap();
|
||||
let skey = keychain
|
||||
.derived_key(&keychain.derive_key_id(1).unwrap())
|
||||
.unwrap();
|
||||
|
||||
// dealing with an input here so we need to negate the blinding_factor
|
||||
// rather than use it as is
|
||||
let blinding_factor = keychain.blind_sum(
|
||||
&BlindSum::new()
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(skey))
|
||||
).unwrap();
|
||||
let blinding_factor = keychain
|
||||
.blind_sum(&BlindSum::new()
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(skey)))
|
||||
.unwrap();
|
||||
|
||||
let blind = blinding_factor.secret_key(&keychain.secp()).unwrap();
|
||||
|
||||
|
@ -768,12 +814,9 @@ mod test {
|
|||
let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(&tx_id);
|
||||
keychain.aggsig_add_output(&tx_id, &key_id);
|
||||
|
||||
let sig_part = keychain.aggsig_calculate_partial_sig(
|
||||
&tx_id,
|
||||
&sender_pub_nonce,
|
||||
0,
|
||||
0,
|
||||
).unwrap();
|
||||
let sig_part = keychain
|
||||
.aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0)
|
||||
.unwrap();
|
||||
(pub_excess, pub_nonce, sig_part)
|
||||
};
|
||||
|
||||
|
@ -795,12 +838,9 @@ mod test {
|
|||
// now sender signs with their key
|
||||
let sender_sig_part = {
|
||||
let keychain = sender_keychain.clone();
|
||||
keychain.aggsig_calculate_partial_sig(
|
||||
&tx_id,
|
||||
&receiver_pub_nonce,
|
||||
0,
|
||||
0,
|
||||
).unwrap()
|
||||
keychain
|
||||
.aggsig_calculate_partial_sig(&tx_id, &receiver_pub_nonce, 0, 0)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
// check the receiver can verify the partial signature
|
||||
|
@ -823,23 +863,24 @@ mod test {
|
|||
let keychain = receiver_keychain.clone();
|
||||
|
||||
// Receiver recreates their partial sig (we do not maintain state from earlier)
|
||||
let our_sig_part = keychain.aggsig_calculate_partial_sig(
|
||||
&tx_id,
|
||||
&sender_pub_nonce,
|
||||
0,
|
||||
0,
|
||||
).unwrap();
|
||||
let our_sig_part = keychain
|
||||
.aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0)
|
||||
.unwrap();
|
||||
|
||||
// Receiver now generates final signature from the two parts
|
||||
let final_sig = keychain.aggsig_calculate_final_sig(
|
||||
&tx_id,
|
||||
&sender_sig_part,
|
||||
&our_sig_part,
|
||||
&sender_pub_nonce,
|
||||
).unwrap();
|
||||
let final_sig = keychain
|
||||
.aggsig_calculate_final_sig(
|
||||
&tx_id,
|
||||
&sender_sig_part,
|
||||
&our_sig_part,
|
||||
&sender_pub_nonce,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Receiver calculates the final public key (to verify sig later)
|
||||
let final_pubkey = keychain.aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess).unwrap();
|
||||
let final_pubkey = keychain
|
||||
.aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess)
|
||||
.unwrap();
|
||||
|
||||
(final_sig, final_pubkey)
|
||||
};
|
||||
|
@ -849,12 +890,8 @@ mod test {
|
|||
let keychain = receiver_keychain.clone();
|
||||
|
||||
// Receiver check the final signature verifies
|
||||
let sig_verifies = keychain.aggsig_verify_final_sig_build_msg(
|
||||
&final_sig,
|
||||
&final_pubkey,
|
||||
0,
|
||||
0,
|
||||
);
|
||||
let sig_verifies =
|
||||
keychain.aggsig_verify_final_sig_build_msg(&final_sig, &final_pubkey, 0, 0);
|
||||
assert!(sig_verifies);
|
||||
}
|
||||
|
||||
|
@ -862,12 +899,7 @@ mod test {
|
|||
{
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
|
||||
let msg = secp::Message::from_slice(
|
||||
&kernel_sig_msg(
|
||||
0,
|
||||
0,
|
||||
),
|
||||
).unwrap();
|
||||
let msg = secp::Message::from_slice(&kernel_sig_msg(0, 0)).unwrap();
|
||||
|
||||
let sig_verifies = Keychain::aggsig_verify_single_from_commit(
|
||||
&keychain.secp,
|
||||
|
@ -896,47 +928,47 @@ mod test {
|
|||
// Calculate the kernel excess here for convenience.
|
||||
// Normally this would happen during transaction building.
|
||||
let kernel_excess = {
|
||||
let skey1 = sender_keychain.derived_key(
|
||||
&sender_keychain.derive_key_id(1).unwrap(),
|
||||
).unwrap();
|
||||
let skey1 = sender_keychain
|
||||
.derived_key(&sender_keychain.derive_key_id(1).unwrap())
|
||||
.unwrap();
|
||||
|
||||
let skey2 = receiver_keychain.derived_key(
|
||||
&receiver_keychain.derive_key_id(1).unwrap(),
|
||||
).unwrap();
|
||||
let skey2 = receiver_keychain
|
||||
.derived_key(&receiver_keychain.derive_key_id(1).unwrap())
|
||||
.unwrap();
|
||||
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let blinding_factor = keychain.blind_sum(
|
||||
&BlindSum::new()
|
||||
let blinding_factor = keychain
|
||||
.blind_sum(&BlindSum::new()
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(skey1))
|
||||
.add_blinding_factor(BlindingFactor::from_secret_key(skey2))
|
||||
// subtract the kernel offset here like as would when
|
||||
// verifying a kernel signature
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset))
|
||||
).unwrap();
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset)))
|
||||
.unwrap();
|
||||
|
||||
keychain.secp.commit(
|
||||
0,
|
||||
blinding_factor.secret_key(&keychain.secp).unwrap(),
|
||||
).unwrap()
|
||||
keychain
|
||||
.secp
|
||||
.commit(0, blinding_factor.secret_key(&keychain.secp).unwrap())
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
// sender starts the tx interaction
|
||||
let (sender_pub_excess, sender_pub_nonce) = {
|
||||
let keychain = sender_keychain.clone();
|
||||
|
||||
let skey = keychain.derived_key(
|
||||
&keychain.derive_key_id(1).unwrap(),
|
||||
).unwrap();
|
||||
let skey = keychain
|
||||
.derived_key(&keychain.derive_key_id(1).unwrap())
|
||||
.unwrap();
|
||||
|
||||
// dealing with an input here so we need to negate the blinding_factor
|
||||
// rather than use it as is
|
||||
let blinding_factor = keychain.blind_sum(
|
||||
&BlindSum::new()
|
||||
let blinding_factor = keychain
|
||||
.blind_sum(&BlindSum::new()
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(skey))
|
||||
// subtract the kernel offset to create an aggsig context
|
||||
// with our "split" key
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset))
|
||||
).unwrap();
|
||||
.sub_blinding_factor(BlindingFactor::from_secret_key(kernel_offset)))
|
||||
.unwrap();
|
||||
|
||||
let blind = blinding_factor.secret_key(&keychain.secp()).unwrap();
|
||||
|
||||
|
@ -955,12 +987,9 @@ mod test {
|
|||
let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(&tx_id);
|
||||
keychain.aggsig_add_output(&tx_id, &key_id);
|
||||
|
||||
let sig_part = keychain.aggsig_calculate_partial_sig(
|
||||
&tx_id,
|
||||
&sender_pub_nonce,
|
||||
0,
|
||||
0,
|
||||
).unwrap();
|
||||
let sig_part = keychain
|
||||
.aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0)
|
||||
.unwrap();
|
||||
(pub_excess, pub_nonce, sig_part)
|
||||
};
|
||||
|
||||
|
@ -982,12 +1011,9 @@ mod test {
|
|||
// now sender signs with their key
|
||||
let sender_sig_part = {
|
||||
let keychain = sender_keychain.clone();
|
||||
keychain.aggsig_calculate_partial_sig(
|
||||
&tx_id,
|
||||
&receiver_pub_nonce,
|
||||
0,
|
||||
0,
|
||||
).unwrap()
|
||||
keychain
|
||||
.aggsig_calculate_partial_sig(&tx_id, &receiver_pub_nonce, 0, 0)
|
||||
.unwrap()
|
||||
};
|
||||
|
||||
// check the receiver can verify the partial signature
|
||||
|
@ -1010,23 +1036,24 @@ mod test {
|
|||
let keychain = receiver_keychain.clone();
|
||||
|
||||
// Receiver recreates their partial sig (we do not maintain state from earlier)
|
||||
let our_sig_part = keychain.aggsig_calculate_partial_sig(
|
||||
&tx_id,
|
||||
&sender_pub_nonce,
|
||||
0,
|
||||
0,
|
||||
).unwrap();
|
||||
let our_sig_part = keychain
|
||||
.aggsig_calculate_partial_sig(&tx_id, &sender_pub_nonce, 0, 0)
|
||||
.unwrap();
|
||||
|
||||
// Receiver now generates final signature from the two parts
|
||||
let final_sig = keychain.aggsig_calculate_final_sig(
|
||||
&tx_id,
|
||||
&sender_sig_part,
|
||||
&our_sig_part,
|
||||
&sender_pub_nonce,
|
||||
).unwrap();
|
||||
let final_sig = keychain
|
||||
.aggsig_calculate_final_sig(
|
||||
&tx_id,
|
||||
&sender_sig_part,
|
||||
&our_sig_part,
|
||||
&sender_pub_nonce,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Receiver calculates the final public key (to verify sig later)
|
||||
let final_pubkey = keychain.aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess).unwrap();
|
||||
let final_pubkey = keychain
|
||||
.aggsig_calculate_final_pubkey(&tx_id, &sender_pub_excess)
|
||||
.unwrap();
|
||||
|
||||
(final_sig, final_pubkey)
|
||||
};
|
||||
|
@ -1036,12 +1063,8 @@ mod test {
|
|||
let keychain = receiver_keychain.clone();
|
||||
|
||||
// Receiver check the final signature verifies
|
||||
let sig_verifies = keychain.aggsig_verify_final_sig_build_msg(
|
||||
&final_sig,
|
||||
&final_pubkey,
|
||||
0,
|
||||
0,
|
||||
);
|
||||
let sig_verifies =
|
||||
keychain.aggsig_verify_final_sig_build_msg(&final_sig, &final_pubkey, 0, 0);
|
||||
assert!(sig_verifies);
|
||||
}
|
||||
|
||||
|
@ -1049,12 +1072,7 @@ mod test {
|
|||
{
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
|
||||
let msg = secp::Message::from_slice(
|
||||
&kernel_sig_msg(
|
||||
0,
|
||||
0,
|
||||
),
|
||||
).unwrap();
|
||||
let msg = secp::Message::from_slice(&kernel_sig_msg(0, 0)).unwrap();
|
||||
|
||||
let sig_verifies = Keychain::aggsig_verify_single_from_commit(
|
||||
&keychain.secp,
|
||||
|
|
|
@ -18,13 +18,13 @@ extern crate blake2_rfc as blake2;
|
|||
extern crate byteorder;
|
||||
extern crate grin_util as util;
|
||||
extern crate rand;
|
||||
extern crate uuid;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate uuid;
|
||||
|
||||
mod blind;
|
||||
mod extkey;
|
||||
|
@ -32,4 +32,4 @@ mod extkey;
|
|||
pub use blind::{BlindSum, BlindingFactor};
|
||||
pub use extkey::{ExtendedKey, Identifier, IDENTIFIER_SIZE};
|
||||
pub mod keychain;
|
||||
pub use keychain::{Error, Keychain, AggSigTxContext};
|
||||
pub use keychain::{AggSigTxContext, Error, Keychain};
|
||||
|
|
112
p2p/src/conn.rs
112
p2p/src/conn.rs
|
@ -23,7 +23,7 @@
|
|||
use std::cmp;
|
||||
use std::fs::File;
|
||||
use std::io::{self, Read, Write};
|
||||
use std::sync::{Arc, Mutex, mpsc};
|
||||
use std::sync::{mpsc, Arc, Mutex};
|
||||
use std::net::TcpStream;
|
||||
use std::thread;
|
||||
use std::time;
|
||||
|
@ -64,13 +64,15 @@ pub struct Message<'a> {
|
|||
}
|
||||
|
||||
impl<'a> Message<'a> {
|
||||
|
||||
fn from_header(header: MsgHeader, conn: &'a mut TcpStream) -> Message<'a> {
|
||||
Message{header, conn}
|
||||
Message { header, conn }
|
||||
}
|
||||
|
||||
/// Read the message body from the underlying connection
|
||||
pub fn body<T>(&mut self) -> Result<T, Error> where T: ser::Readable {
|
||||
pub fn body<T>(&mut self) -> Result<T, Error>
|
||||
where
|
||||
T: ser::Readable,
|
||||
{
|
||||
read_body(&self.header, self.conn)
|
||||
}
|
||||
|
||||
|
@ -89,10 +91,10 @@ impl<'a> Message<'a> {
|
|||
/// Respond to the message with the provided message type and body
|
||||
pub fn respond<T>(self, resp_type: Type, body: T) -> Response<'a>
|
||||
where
|
||||
T: ser::Writeable
|
||||
T: ser::Writeable,
|
||||
{
|
||||
let body = ser::ser_vec(&body).unwrap();
|
||||
Response{
|
||||
Response {
|
||||
resp_type: resp_type,
|
||||
body: body,
|
||||
conn: self.conn,
|
||||
|
@ -111,7 +113,8 @@ pub struct Response<'a> {
|
|||
|
||||
impl<'a> Response<'a> {
|
||||
fn write(mut self) -> Result<(), Error> {
|
||||
let mut msg = ser::ser_vec(&MsgHeader::new(self.resp_type, self.body.len() as u64)).unwrap();
|
||||
let mut msg =
|
||||
ser::ser_vec(&MsgHeader::new(self.resp_type, self.body.len() as u64)).unwrap();
|
||||
msg.append(&mut self.body);
|
||||
write_all(&mut self.conn, &msg[..], 10000)?;
|
||||
if let Some(mut file) = self.attachment {
|
||||
|
@ -149,7 +152,7 @@ pub struct Tracker {
|
|||
impl Tracker {
|
||||
pub fn send<T>(&self, body: T, msg_type: Type) -> Result<(), Error>
|
||||
where
|
||||
T: ser::Writeable
|
||||
T: ser::Writeable,
|
||||
{
|
||||
let buf = write_to_buf(body, msg_type);
|
||||
self.send_channel.send(buf)?;
|
||||
|
@ -168,7 +171,9 @@ where
|
|||
let (close_tx, close_rx) = mpsc::channel();
|
||||
let (error_tx, error_rx) = mpsc::channel();
|
||||
|
||||
stream.set_nonblocking(true).expect("Non-blocking IO not available.");
|
||||
stream
|
||||
.set_nonblocking(true)
|
||||
.expect("Non-blocking IO not available.");
|
||||
poll(stream, handler, send_rx, error_tx, close_rx);
|
||||
|
||||
Tracker {
|
||||
|
@ -185,54 +190,67 @@ fn poll<H>(
|
|||
handler: H,
|
||||
send_rx: mpsc::Receiver<Vec<u8>>,
|
||||
error_tx: mpsc::Sender<Error>,
|
||||
close_rx: mpsc::Receiver<()>
|
||||
)
|
||||
where
|
||||
close_rx: mpsc::Receiver<()>,
|
||||
) where
|
||||
H: MessageHandler,
|
||||
{
|
||||
|
||||
let mut conn = conn;
|
||||
let _ = thread::Builder::new().name("peer".to_string()).spawn(move || {
|
||||
let sleep_time = time::Duration::from_millis(1);
|
||||
let _ = thread::Builder::new()
|
||||
.name("peer".to_string())
|
||||
.spawn(move || {
|
||||
let sleep_time = time::Duration::from_millis(1);
|
||||
|
||||
let conn = &mut conn;
|
||||
let mut retry_send = Err(());
|
||||
loop {
|
||||
// check the read end
|
||||
if let Some(h) = try_break!(error_tx, read_header(conn)) {
|
||||
let msg = Message::from_header(h, conn);
|
||||
debug!(LOGGER, "Received message header, type {:?}, len {}.", msg.header.msg_type, msg.header.msg_len);
|
||||
if let Some(Some(resp)) = try_break!(error_tx, handler.consume(msg)) {
|
||||
try_break!(error_tx, resp.write());
|
||||
let conn = &mut conn;
|
||||
let mut retry_send = Err(());
|
||||
loop {
|
||||
// check the read end
|
||||
if let Some(h) = try_break!(error_tx, read_header(conn)) {
|
||||
let msg = Message::from_header(h, conn);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Received message header, type {:?}, len {}.",
|
||||
msg.header.msg_type,
|
||||
msg.header.msg_len
|
||||
);
|
||||
if let Some(Some(resp)) = try_break!(error_tx, handler.consume(msg)) {
|
||||
try_break!(error_tx, resp.write());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// check the write end
|
||||
if let Ok::<Vec<u8>, ()>(data) = retry_send {
|
||||
if let None = try_break!(error_tx, conn.write_all(&data[..]).map_err(&From::from)) {
|
||||
retry_send = Ok(data);
|
||||
// check the write end
|
||||
if let Ok::<Vec<u8>, ()>(data) = retry_send {
|
||||
if let None =
|
||||
try_break!(error_tx, conn.write_all(&data[..]).map_err(&From::from))
|
||||
{
|
||||
retry_send = Ok(data);
|
||||
} else {
|
||||
retry_send = Err(());
|
||||
}
|
||||
} else if let Ok(data) = send_rx.try_recv() {
|
||||
if let None =
|
||||
try_break!(error_tx, conn.write_all(&data[..]).map_err(&From::from))
|
||||
{
|
||||
retry_send = Ok(data);
|
||||
} else {
|
||||
retry_send = Err(());
|
||||
}
|
||||
} else {
|
||||
retry_send = Err(());
|
||||
}
|
||||
} else if let Ok(data) = send_rx.try_recv() {
|
||||
if let None = try_break!(error_tx, conn.write_all(&data[..]).map_err(&From::from)) {
|
||||
retry_send = Ok(data);
|
||||
} else {
|
||||
retry_send = Err(());
|
||||
|
||||
// check the close channel
|
||||
if let Ok(_) = close_rx.try_recv() {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Connection close with {} initiated by us",
|
||||
conn.peer_addr()
|
||||
.map(|a| a.to_string())
|
||||
.unwrap_or("?".to_owned())
|
||||
);
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
retry_send = Err(());
|
||||
}
|
||||
|
||||
// check the close channel
|
||||
if let Ok(_) = close_rx.try_recv() {
|
||||
debug!(LOGGER,
|
||||
"Connection close with {} initiated by us",
|
||||
conn.peer_addr().map(|a| a.to_string()).unwrap_or("?".to_owned()));
|
||||
break;
|
||||
thread::sleep(sleep_time);
|
||||
}
|
||||
|
||||
thread::sleep(sleep_time);
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::net::{TcpStream, SocketAddr};
|
||||
use std::net::{SocketAddr, TcpStream};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use rand::Rng;
|
||||
|
@ -60,7 +60,6 @@ impl Handshake {
|
|||
self_addr: SocketAddr,
|
||||
conn: &mut TcpStream,
|
||||
) -> Result<PeerInfo, Error> {
|
||||
|
||||
// prepare the first part of the handshake
|
||||
let nonce = self.next_nonce();
|
||||
let peer_addr = match conn.peer_addr() {
|
||||
|
@ -115,7 +114,7 @@ impl Handshake {
|
|||
peer_info.addr,
|
||||
peer_info.user_agent,
|
||||
peer_info.capabilities
|
||||
);
|
||||
);
|
||||
// when more than one protocol version is supported, choosing should go here
|
||||
Ok(peer_info)
|
||||
}
|
||||
|
@ -126,7 +125,6 @@ impl Handshake {
|
|||
total_difficulty: Difficulty,
|
||||
conn: &mut TcpStream,
|
||||
) -> Result<PeerInfo, Error> {
|
||||
|
||||
let hand: Hand = read_message(conn, Type::Hand)?;
|
||||
|
||||
// all the reasons we could refuse this connection for
|
||||
|
@ -201,23 +199,23 @@ impl Handshake {
|
|||
// port reported by the connection is always incorrect for receiving
|
||||
// connections as it's dynamically allocated by the server.
|
||||
fn extract_ip(advertised: &SocketAddr, conn: &TcpStream) -> SocketAddr {
|
||||
match advertised {
|
||||
&SocketAddr::V4(v4sock) => {
|
||||
let ip = v4sock.ip();
|
||||
if ip.is_loopback() || ip.is_unspecified() {
|
||||
if let Ok(addr) = conn.peer_addr() {
|
||||
return SocketAddr::new(addr.ip(), advertised.port());
|
||||
}
|
||||
}
|
||||
}
|
||||
&SocketAddr::V6(v6sock) => {
|
||||
let ip = v6sock.ip();
|
||||
if ip.is_loopback() || ip.is_unspecified() {
|
||||
if let Ok(addr) = conn.peer_addr() {
|
||||
return SocketAddr::new(addr.ip(), advertised.port());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
advertised.clone()
|
||||
match advertised {
|
||||
&SocketAddr::V4(v4sock) => {
|
||||
let ip = v4sock.ip();
|
||||
if ip.is_loopback() || ip.is_unspecified() {
|
||||
if let Ok(addr) = conn.peer_addr() {
|
||||
return SocketAddr::new(addr.ip(), advertised.port());
|
||||
}
|
||||
}
|
||||
}
|
||||
&SocketAddr::V6(v6sock) => {
|
||||
let ip = v6sock.ip();
|
||||
if ip.is_loopback() || ip.is_unspecified() {
|
||||
if let Ok(addr) = conn.peer_addr() {
|
||||
return SocketAddr::new(addr.ip(), advertised.port());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
advertised.clone()
|
||||
}
|
||||
|
|
|
@ -49,9 +49,9 @@ mod serv;
|
|||
mod store;
|
||||
mod types;
|
||||
|
||||
pub use serv::{Server, DummyAdapter};
|
||||
pub use serv::{DummyAdapter, Server};
|
||||
pub use peers::Peers;
|
||||
pub use peer::Peer;
|
||||
pub use types::{Capabilities, Error, ChainAdapter, SumtreesRead, P2PConfig,
|
||||
PeerInfo, MAX_BLOCK_HEADERS, MAX_PEER_ADDRS};
|
||||
pub use types::{Capabilities, ChainAdapter, Error, P2PConfig, PeerInfo, SumtreesRead,
|
||||
MAX_BLOCK_HEADERS, MAX_PEER_ADDRS};
|
||||
pub use store::{PeerData, State};
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
//! Message types that transit over the network and related serialization code.
|
||||
|
||||
use std::io::{self, Read, Write};
|
||||
use std::net::{TcpStream, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6, TcpStream};
|
||||
use std::thread;
|
||||
use std::time;
|
||||
use num::FromPrimitive;
|
||||
|
@ -81,14 +81,13 @@ enum_from_primitive! {
|
|||
/// time is not guaranteed to be exact. To support cases where we want to poll
|
||||
/// instead of blocking, a `block_on_empty` boolean, when false, ensures
|
||||
/// `read_exact` returns early with a `io::ErrorKind::WouldBlock` if nothing
|
||||
/// has been read from the socket.
|
||||
/// has been read from the socket.
|
||||
pub fn read_exact(
|
||||
conn: &mut TcpStream,
|
||||
mut buf: &mut [u8],
|
||||
timeout: u32,
|
||||
block_on_empty: bool,
|
||||
) -> io::Result<()> {
|
||||
|
||||
let sleep_time = time::Duration::from_millis(1);
|
||||
let mut count = 0;
|
||||
|
||||
|
@ -116,7 +115,10 @@ pub fn read_exact(
|
|||
break;
|
||||
}
|
||||
if count > timeout {
|
||||
return Err(io::Error::new(io::ErrorKind::TimedOut, "reading from tcp stream"));
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::TimedOut,
|
||||
"reading from tcp stream",
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -124,14 +126,17 @@ pub fn read_exact(
|
|||
|
||||
/// Same as `read_exact` but for writing.
|
||||
pub fn write_all(conn: &mut Write, mut buf: &[u8], timeout: u32) -> io::Result<()> {
|
||||
|
||||
let sleep_time = time::Duration::from_millis(1);
|
||||
let mut count = 0;
|
||||
|
||||
while !buf.is_empty() {
|
||||
match conn.write(buf) {
|
||||
Ok(0) => return Err(io::Error::new(io::ErrorKind::WriteZero,
|
||||
"failed to write whole buffer")),
|
||||
Ok(0) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::WriteZero,
|
||||
"failed to write whole buffer",
|
||||
))
|
||||
}
|
||||
Ok(n) => buf = &buf[n..],
|
||||
Err(ref e) if e.kind() == io::ErrorKind::Interrupted => {}
|
||||
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {}
|
||||
|
@ -144,7 +149,10 @@ pub fn write_all(conn: &mut Write, mut buf: &[u8], timeout: u32) -> io::Result<(
|
|||
break;
|
||||
}
|
||||
if count > timeout {
|
||||
return Err(io::Error::new(io::ErrorKind::TimedOut, "reading from tcp stream"));
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::TimedOut,
|
||||
"reading from tcp stream",
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
|
@ -154,7 +162,6 @@ pub fn write_all(conn: &mut Write, mut buf: &[u8], timeout: u32) -> io::Result<(
|
|||
/// underlying stream is async. Typically headers will be polled for, so
|
||||
/// we do not want to block.
|
||||
pub fn read_header(conn: &mut TcpStream) -> Result<MsgHeader, Error> {
|
||||
|
||||
let mut head = vec![0u8; HEADER_LEN as usize];
|
||||
read_exact(conn, &mut head, 10000, false)?;
|
||||
let header = ser::deserialize::<MsgHeader>(&mut &head[..])?;
|
||||
|
@ -188,10 +195,7 @@ where
|
|||
read_body(&header, conn)
|
||||
}
|
||||
|
||||
pub fn write_to_buf<T>(
|
||||
msg: T,
|
||||
msg_type: Type,
|
||||
) -> Vec<u8>
|
||||
pub fn write_to_buf<T>(msg: T, msg_type: Type) -> Vec<u8>
|
||||
where
|
||||
T: Writeable,
|
||||
{
|
||||
|
@ -208,11 +212,7 @@ where
|
|||
msg_buf
|
||||
}
|
||||
|
||||
pub fn write_message<T>(
|
||||
conn: &mut TcpStream,
|
||||
msg: T,
|
||||
msg_type: Type,
|
||||
) -> Result<(), Error>
|
||||
pub fn write_message<T>(conn: &mut TcpStream, msg: T, msg_type: Type) -> Result<(), Error>
|
||||
where
|
||||
T: Writeable + 'static,
|
||||
{
|
||||
|
@ -597,11 +597,14 @@ impl Readable for Ping {
|
|||
Ok(diff) => diff,
|
||||
Err(_) => Difficulty::zero(),
|
||||
};
|
||||
let height = match reader.read_u64(){
|
||||
let height = match reader.read_u64() {
|
||||
Ok(h) => h,
|
||||
Err(_) => 0,
|
||||
};
|
||||
Ok(Ping { total_difficulty, height })
|
||||
Ok(Ping {
|
||||
total_difficulty,
|
||||
height,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -610,7 +613,7 @@ pub struct Pong {
|
|||
/// may be needed
|
||||
pub total_difficulty: Difficulty,
|
||||
/// height accumulated by sender
|
||||
pub height: u64
|
||||
pub height: u64,
|
||||
}
|
||||
|
||||
impl Writeable for Pong {
|
||||
|
@ -632,7 +635,10 @@ impl Readable for Pong {
|
|||
Ok(h) => h,
|
||||
Err(_) => 0,
|
||||
};
|
||||
Ok(Pong { total_difficulty, height })
|
||||
Ok(Pong {
|
||||
total_difficulty,
|
||||
height,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -641,8 +647,8 @@ impl Readable for Pong {
|
|||
pub struct SumtreesRequest {
|
||||
/// Hash of the block for which the sumtrees should be provided
|
||||
pub hash: Hash,
|
||||
/// Height of the corresponding block
|
||||
pub height: u64
|
||||
/// Height of the corresponding block
|
||||
pub height: u64,
|
||||
}
|
||||
|
||||
impl Writeable for SumtreesRequest {
|
||||
|
@ -667,7 +673,7 @@ impl Readable for SumtreesRequest {
|
|||
pub struct SumtreesArchive {
|
||||
/// Hash of the block for which the sumtrees are provided
|
||||
pub hash: Hash,
|
||||
/// Height of the corresponding block
|
||||
/// Height of the corresponding block
|
||||
pub height: u64,
|
||||
/// Output tree index the receiver should rewind to
|
||||
pub rewind_to_output: u64,
|
||||
|
@ -680,10 +686,13 @@ pub struct SumtreesArchive {
|
|||
impl Writeable for SumtreesArchive {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
self.hash.write(writer)?;
|
||||
ser_multiwrite!(writer, [write_u64, self.height],
|
||||
[write_u64, self.rewind_to_output],
|
||||
[write_u64, self.rewind_to_kernel],
|
||||
[write_u64, self.bytes]);
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
[write_u64, self.height],
|
||||
[write_u64, self.rewind_to_output],
|
||||
[write_u64, self.rewind_to_kernel],
|
||||
[write_u64, self.bytes]
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
@ -694,6 +703,12 @@ impl Readable for SumtreesArchive {
|
|||
let (height, rewind_to_output, rewind_to_kernel, bytes) =
|
||||
ser_multiread!(reader, read_u64, read_u64, read_u64, read_u64);
|
||||
|
||||
Ok(SumtreesArchive {hash, height, rewind_to_output, rewind_to_kernel, bytes})
|
||||
Ok(SumtreesArchive {
|
||||
hash,
|
||||
height,
|
||||
rewind_to_output,
|
||||
rewind_to_kernel,
|
||||
bytes,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
137
p2p/src/peer.rs
137
p2p/src/peer.rs
|
@ -41,7 +41,7 @@ pub struct Peer {
|
|||
state: Arc<RwLock<State>>,
|
||||
// set of all hashes known to this peer (so no need to send)
|
||||
tracking_adapter: TrackingAdapter,
|
||||
connection: Option<conn::Tracker>
|
||||
connection: Option<conn::Tracker>,
|
||||
}
|
||||
|
||||
unsafe impl Sync for Peer {}
|
||||
|
@ -65,7 +65,6 @@ impl Peer {
|
|||
hs: &Handshake,
|
||||
na: Arc<NetAdapter>,
|
||||
) -> Result<Peer, Error> {
|
||||
|
||||
let info = hs.accept(capab, total_difficulty, conn)?;
|
||||
Ok(Peer::new(info, na))
|
||||
}
|
||||
|
@ -78,7 +77,6 @@ impl Peer {
|
|||
hs: &Handshake,
|
||||
na: Arc<NetAdapter>,
|
||||
) -> Result<Peer, Error> {
|
||||
|
||||
let info = hs.initiate(capab, total_difficulty, self_addr, conn)?;
|
||||
Ok(Peer::new(info, na))
|
||||
}
|
||||
|
@ -96,31 +94,41 @@ impl Peer {
|
|||
let peer = format!("{}:{}", peer_addr.ip(), peer_addr.port());
|
||||
if let Some(ref denied) = config.peers_deny {
|
||||
if denied.contains(&peer) {
|
||||
debug!(LOGGER, "checking peer allowed/denied: {:?} explicitly denied", peer_addr);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"checking peer allowed/denied: {:?} explicitly denied", peer_addr
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(ref allowed) = config.peers_allow {
|
||||
if allowed.contains(&peer) {
|
||||
debug!(LOGGER, "checking peer allowed/denied: {:?} explicitly allowed", peer_addr);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"checking peer allowed/denied: {:?} explicitly allowed", peer_addr
|
||||
);
|
||||
return false;
|
||||
} else {
|
||||
debug!(LOGGER, "checking peer allowed/denied: {:?} not explicitly allowed, denying", peer_addr);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"checking peer allowed/denied: {:?} not explicitly allowed, denying", peer_addr
|
||||
);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// default to allowing peer connection if we do not explicitly allow or deny the peer
|
||||
// default to allowing peer connection if we do not explicitly allow or deny
|
||||
// the peer
|
||||
false
|
||||
}
|
||||
|
||||
/// Whether this peer is still connected.
|
||||
pub fn is_connected(&self) -> bool {
|
||||
if !self.check_connection() {
|
||||
return false
|
||||
return false;
|
||||
}
|
||||
let state = self.state.read().unwrap();
|
||||
*state == State::Connected
|
||||
*state == State::Connected
|
||||
}
|
||||
|
||||
/// Whether this peer has been banned.
|
||||
|
@ -136,10 +144,17 @@ impl Peer {
|
|||
*state = State::Banned;
|
||||
}
|
||||
|
||||
/// Send a ping to the remote peer, providing our local difficulty and height
|
||||
/// Send a ping to the remote peer, providing our local difficulty and
|
||||
/// height
|
||||
pub fn send_ping(&self, total_difficulty: Difficulty, height: u64) -> Result<(), Error> {
|
||||
let ping_msg = Ping{total_difficulty, height};
|
||||
self.connection.as_ref().unwrap().send(ping_msg, msg::Type::Ping)
|
||||
let ping_msg = Ping {
|
||||
total_difficulty,
|
||||
height,
|
||||
};
|
||||
self.connection
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send(ping_msg, msg::Type::Ping)
|
||||
}
|
||||
|
||||
/// Sends the provided block to the remote peer. The request may be dropped
|
||||
|
@ -161,8 +176,16 @@ impl Peer {
|
|||
|
||||
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<(), Error> {
|
||||
if !self.tracking_adapter.has(b.hash()) {
|
||||
debug!(LOGGER, "Send compact block {} to {}", b.hash(), self.info.addr);
|
||||
self.connection.as_ref().unwrap().send(b, msg::Type::CompactBlock)
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Send compact block {} to {}",
|
||||
b.hash(),
|
||||
self.info.addr
|
||||
);
|
||||
self.connection
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send(b, msg::Type::CompactBlock)
|
||||
} else {
|
||||
debug!(
|
||||
LOGGER,
|
||||
|
@ -177,7 +200,10 @@ impl Peer {
|
|||
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<(), Error> {
|
||||
if !self.tracking_adapter.has(bh.hash()) {
|
||||
debug!(LOGGER, "Send header {} to {}", bh.hash(), self.info.addr);
|
||||
self.connection.as_ref().unwrap().send(bh, msg::Type::Header)
|
||||
self.connection
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send(bh, msg::Type::Header)
|
||||
} else {
|
||||
debug!(
|
||||
LOGGER,
|
||||
|
@ -194,32 +220,51 @@ impl Peer {
|
|||
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
|
||||
if !self.tracking_adapter.has(tx.hash()) {
|
||||
debug!(LOGGER, "Send tx {} to {}", tx.hash(), self.info.addr);
|
||||
self.connection.as_ref().unwrap().send(tx, msg::Type::Transaction)
|
||||
self.connection
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send(tx, msg::Type::Transaction)
|
||||
} else {
|
||||
debug!(LOGGER, "Not sending tx {} to {} (already seen)", tx.hash(), self.info.addr);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Not sending tx {} to {} (already seen)",
|
||||
tx.hash(),
|
||||
self.info.addr
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends a request for block headers from the provided block locator
|
||||
pub fn send_header_request(&self, locator: Vec<Hash>) -> Result<(), Error> {
|
||||
self.connection.as_ref().unwrap().send(
|
||||
&Locator {
|
||||
hashes: locator,
|
||||
},
|
||||
msg::Type::GetHeaders)
|
||||
self.connection
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send(&Locator { hashes: locator }, msg::Type::GetHeaders)
|
||||
}
|
||||
|
||||
/// Sends a request for a specific block by hash
|
||||
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
|
||||
debug!(LOGGER, "Requesting block {} from peer {}.", h, self.info.addr);
|
||||
self.connection.as_ref().unwrap().send(&h, msg::Type::GetBlock)
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Requesting block {} from peer {}.", h, self.info.addr
|
||||
);
|
||||
self.connection
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send(&h, msg::Type::GetBlock)
|
||||
}
|
||||
|
||||
/// Sends a request for a specific compact block by hash
|
||||
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
|
||||
debug!(LOGGER, "Requesting compact block {} from {}", h, self.info.addr);
|
||||
self.connection.as_ref().unwrap().send(&h, msg::Type::GetCompactBlock)
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Requesting compact block {} from {}", h, self.info.addr
|
||||
);
|
||||
self.connection
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.send(&h, msg::Type::GetCompactBlock)
|
||||
}
|
||||
|
||||
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
|
||||
|
@ -228,14 +273,19 @@ impl Peer {
|
|||
&GetPeerAddrs {
|
||||
capabilities: capab,
|
||||
},
|
||||
msg::Type::GetPeerAddrs)
|
||||
msg::Type::GetPeerAddrs,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn send_sumtrees_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
|
||||
debug!(LOGGER, "Asking {} for sumtree archive at {} {}.",
|
||||
self.info.addr, height, hash);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Asking {} for sumtree archive at {} {}.", self.info.addr, height, hash
|
||||
);
|
||||
self.connection.as_ref().unwrap().send(
|
||||
&SumtreesRequest {hash, height }, msg::Type::SumtreesRequest)
|
||||
&SumtreesRequest { hash, height },
|
||||
msg::Type::SumtreesRequest,
|
||||
)
|
||||
}
|
||||
|
||||
/// Stops the peer, closing its connection
|
||||
|
@ -248,7 +298,10 @@ impl Peer {
|
|||
Ok(Error::Serialization(e)) => {
|
||||
let mut state = self.state.write().unwrap();
|
||||
*state = State::Banned;
|
||||
info!(LOGGER, "Client {} corrupted, ban ({:?}).", self.info.addr, e);
|
||||
info!(
|
||||
LOGGER,
|
||||
"Client {} corrupted, ban ({:?}).", self.info.addr, e
|
||||
);
|
||||
false
|
||||
}
|
||||
Ok(e) => {
|
||||
|
@ -339,11 +392,21 @@ impl ChainAdapter for TrackingAdapter {
|
|||
self.adapter.sumtrees_read(h)
|
||||
}
|
||||
|
||||
fn sumtrees_write(&self, h: Hash,
|
||||
rewind_to_output: u64, rewind_to_kernel: u64,
|
||||
sumtree_data: File, peer_addr: SocketAddr) -> bool {
|
||||
self.adapter.sumtrees_write(h, rewind_to_output, rewind_to_kernel,
|
||||
sumtree_data, peer_addr)
|
||||
fn sumtrees_write(
|
||||
&self,
|
||||
h: Hash,
|
||||
rewind_to_output: u64,
|
||||
rewind_to_kernel: u64,
|
||||
sumtree_data: File,
|
||||
peer_addr: SocketAddr,
|
||||
) -> bool {
|
||||
self.adapter.sumtrees_write(
|
||||
h,
|
||||
rewind_to_output,
|
||||
rewind_to_kernel,
|
||||
sumtree_data,
|
||||
peer_addr,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -356,7 +419,7 @@ impl NetAdapter for TrackingAdapter {
|
|||
self.adapter.peer_addrs_received(addrs)
|
||||
}
|
||||
|
||||
fn peer_difficulty(&self, addr: SocketAddr, diff: Difficulty, height:u64) {
|
||||
fn peer_difficulty(&self, addr: SocketAddr, diff: Difficulty, height: u64) {
|
||||
self.adapter.peer_difficulty(addr, diff, height)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ use util::LOGGER;
|
|||
use time;
|
||||
|
||||
use peer::Peer;
|
||||
use store::{PeerStore, PeerData, State};
|
||||
use store::{PeerData, PeerStore, State};
|
||||
use types::*;
|
||||
|
||||
pub struct Peers {
|
||||
|
@ -446,7 +446,10 @@ impl ChainAdapter for Peers {
|
|||
if !self.adapter.block_received(b, peer_addr) {
|
||||
// if the peer sent us a block that's intrinsically bad
|
||||
// they are either mistaken or manevolent, both of which require a ban
|
||||
debug!(LOGGER, "Received a bad block {} from {}, the peer will be banned", hash, peer_addr);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Received a bad block {} from {}, the peer will be banned", hash, peer_addr
|
||||
);
|
||||
self.ban_peer(&peer_addr);
|
||||
false
|
||||
} else {
|
||||
|
@ -458,7 +461,12 @@ impl ChainAdapter for Peers {
|
|||
if !self.adapter.compact_block_received(cb, peer_addr) {
|
||||
// if the peer sent us a block that's intrinsically bad
|
||||
// they are either mistaken or manevolent, both of which require a ban
|
||||
debug!(LOGGER, "Received a bad compact block {} from {}, the peer will be banned", hash, &peer_addr);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Received a bad compact block {} from {}, the peer will be banned",
|
||||
hash,
|
||||
&peer_addr
|
||||
);
|
||||
self.ban_peer(&peer_addr);
|
||||
false
|
||||
} else {
|
||||
|
@ -495,9 +503,17 @@ impl ChainAdapter for Peers {
|
|||
sumtree_data: File,
|
||||
peer_addr: SocketAddr,
|
||||
) -> bool {
|
||||
if !self.adapter.sumtrees_write(h, rewind_to_output, rewind_to_kernel,
|
||||
sumtree_data, peer_addr) {
|
||||
debug!(LOGGER, "Received a bad sumtree data from {}, the peer will be banned", &peer_addr);
|
||||
if !self.adapter.sumtrees_write(
|
||||
h,
|
||||
rewind_to_output,
|
||||
rewind_to_kernel,
|
||||
sumtree_data,
|
||||
peer_addr,
|
||||
) {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Received a bad sumtree data from {}, the peer will be banned", &peer_addr
|
||||
);
|
||||
self.ban_peer(&peer_addr);
|
||||
false
|
||||
} else {
|
||||
|
|
|
@ -33,7 +33,7 @@ pub struct Protocol {
|
|||
|
||||
impl Protocol {
|
||||
pub fn new(adapter: Arc<NetAdapter>, addr: SocketAddr) -> Protocol {
|
||||
Protocol{adapter, addr}
|
||||
Protocol { adapter, addr }
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,26 +42,24 @@ impl MessageHandler for Protocol {
|
|||
let adapter = &self.adapter;
|
||||
|
||||
match msg.header.msg_type {
|
||||
|
||||
Type::Ping => {
|
||||
let ping: Ping = msg.body()?;
|
||||
adapter.peer_difficulty(self.addr, ping.total_difficulty, ping.height);
|
||||
|
||||
Ok(Some(
|
||||
msg.respond(
|
||||
Type::Pong,
|
||||
Pong {
|
||||
total_difficulty: adapter.total_difficulty(),
|
||||
height: adapter.total_height(),
|
||||
})
|
||||
))
|
||||
Ok(Some(msg.respond(
|
||||
Type::Pong,
|
||||
Pong {
|
||||
total_difficulty: adapter.total_difficulty(),
|
||||
height: adapter.total_height(),
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
Type::Pong => {
|
||||
let pong: Pong = msg.body()?;
|
||||
adapter.peer_difficulty(self.addr, pong.total_difficulty, pong.height);
|
||||
Ok(None)
|
||||
},
|
||||
}
|
||||
|
||||
Type::Transaction => {
|
||||
let tx: core::Transaction = msg.body()?;
|
||||
|
@ -90,7 +88,6 @@ impl MessageHandler for Protocol {
|
|||
Ok(None)
|
||||
}
|
||||
|
||||
|
||||
Type::GetCompactBlock => {
|
||||
let h: Hash = msg.body()?;
|
||||
debug!(LOGGER, "handle_payload: GetCompactBlock: {}", h);
|
||||
|
@ -110,7 +107,7 @@ impl MessageHandler for Protocol {
|
|||
debug!(
|
||||
LOGGER,
|
||||
"handle_payload: GetCompactBlock: empty block, sending full block",
|
||||
);
|
||||
);
|
||||
|
||||
Ok(Some(msg.respond(Type::Block, b)))
|
||||
} else {
|
||||
|
@ -136,7 +133,10 @@ impl MessageHandler for Protocol {
|
|||
let headers = adapter.locate_headers(loc.hashes);
|
||||
|
||||
// serialize and send all the headers over
|
||||
Ok(Some(msg.respond(Type::Headers, Headers { headers: headers })))
|
||||
Ok(Some(msg.respond(
|
||||
Type::Headers,
|
||||
Headers { headers: headers },
|
||||
)))
|
||||
}
|
||||
|
||||
// "header first" block propagation - if we have not yet seen this block
|
||||
|
@ -160,13 +160,12 @@ impl MessageHandler for Protocol {
|
|||
Type::GetPeerAddrs => {
|
||||
let get_peers: GetPeerAddrs = msg.body()?;
|
||||
let peer_addrs = adapter.find_peer_addrs(get_peers.capabilities);
|
||||
Ok(Some(
|
||||
msg.respond(
|
||||
Type::PeerAddrs,
|
||||
PeerAddrs {
|
||||
peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(),
|
||||
})
|
||||
))
|
||||
Ok(Some(msg.respond(
|
||||
Type::PeerAddrs,
|
||||
PeerAddrs {
|
||||
peers: peer_addrs.iter().map(|sa| SockAddr(*sa)).collect(),
|
||||
},
|
||||
)))
|
||||
}
|
||||
|
||||
Type::PeerAddrs => {
|
||||
|
@ -177,8 +176,10 @@ impl MessageHandler for Protocol {
|
|||
|
||||
Type::SumtreesRequest => {
|
||||
let sm_req: SumtreesRequest = msg.body()?;
|
||||
debug!(LOGGER, "handle_payload: sumtree req for {} at {}",
|
||||
sm_req.hash, sm_req.height);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"handle_payload: sumtree req for {} at {}", sm_req.hash, sm_req.height
|
||||
);
|
||||
|
||||
let sumtrees = self.adapter.sumtrees_read(sm_req.hash);
|
||||
|
||||
|
@ -192,7 +193,8 @@ impl MessageHandler for Protocol {
|
|||
rewind_to_output: sumtrees.output_index,
|
||||
rewind_to_kernel: sumtrees.kernel_index,
|
||||
bytes: file_sz,
|
||||
});
|
||||
},
|
||||
);
|
||||
resp.add_attachment(sumtrees.reader);
|
||||
Ok(Some(resp))
|
||||
} else {
|
||||
|
@ -202,22 +204,31 @@ impl MessageHandler for Protocol {
|
|||
|
||||
Type::SumtreesArchive => {
|
||||
let sm_arch: SumtreesArchive = msg.body()?;
|
||||
debug!(LOGGER, "handle_payload: sumtree archive for {} at {} rewind to {}/{}",
|
||||
sm_arch.hash, sm_arch.height,
|
||||
sm_arch.rewind_to_output, sm_arch.rewind_to_kernel);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"handle_payload: sumtree archive for {} at {} rewind to {}/{}",
|
||||
sm_arch.hash,
|
||||
sm_arch.height,
|
||||
sm_arch.rewind_to_output,
|
||||
sm_arch.rewind_to_kernel
|
||||
);
|
||||
|
||||
let mut tmp = env::temp_dir();
|
||||
tmp.push("sumtree.zip");
|
||||
{
|
||||
let mut tmp_zip = File::create(tmp.clone())?;
|
||||
msg.copy_attachment(sm_arch.bytes as usize, &mut tmp_zip)?;
|
||||
msg.copy_attachment(sm_arch.bytes as usize, &mut tmp_zip)?;
|
||||
tmp_zip.sync_all()?;
|
||||
}
|
||||
|
||||
let tmp_zip = File::open(tmp)?;
|
||||
self.adapter.sumtrees_write(
|
||||
sm_arch.hash, sm_arch.rewind_to_output,
|
||||
sm_arch.rewind_to_kernel, tmp_zip, self.addr);
|
||||
sm_arch.hash,
|
||||
sm_arch.rewind_to_output,
|
||||
sm_arch.rewind_to_kernel,
|
||||
tmp_zip,
|
||||
self.addr,
|
||||
);
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
use std::fs::File;
|
||||
use std::io;
|
||||
use std::net::{TcpListener, TcpStream, SocketAddr, Shutdown};
|
||||
use std::net::{Shutdown, SocketAddr, TcpListener, TcpStream};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::thread;
|
||||
|
@ -45,7 +45,6 @@ unsafe impl Send for Server {}
|
|||
|
||||
// TODO TLS
|
||||
impl Server {
|
||||
|
||||
/// Creates a new idle p2p server with no peers
|
||||
pub fn new(
|
||||
db_root: String,
|
||||
|
@ -55,7 +54,6 @@ impl Server {
|
|||
genesis: Hash,
|
||||
stop: Arc<AtomicBool>,
|
||||
) -> Result<Server, Error> {
|
||||
|
||||
Ok(Server {
|
||||
config: config.clone(),
|
||||
capabilities: capab,
|
||||
|
@ -71,8 +69,9 @@ impl Server {
|
|||
// start peer monitoring thread
|
||||
let peers_inner = self.peers.clone();
|
||||
let stop = self.stop.clone();
|
||||
let _ = thread::Builder::new().name("p2p-monitor".to_string()).spawn(move || {
|
||||
loop {
|
||||
let _ = thread::Builder::new()
|
||||
.name("p2p-monitor".to_string())
|
||||
.spawn(move || loop {
|
||||
let total_diff = peers_inner.total_difficulty();
|
||||
let total_height = peers_inner.total_height();
|
||||
peers_inner.check_all(total_diff, total_height);
|
||||
|
@ -80,8 +79,7 @@ impl Server {
|
|||
if stop.load(Ordering::Relaxed) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// start TCP listener and handle incoming connections
|
||||
let addr = SocketAddr::new(self.config.host, self.config.port);
|
||||
|
@ -98,7 +96,8 @@ impl Server {
|
|||
LOGGER,
|
||||
"Error accepting peer {}: {:?}",
|
||||
peer_addr.to_string(),
|
||||
e);
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -207,10 +206,16 @@ impl ChainAdapter for DummyAdapter {
|
|||
0
|
||||
}
|
||||
fn transaction_received(&self, _: core::Transaction) {}
|
||||
fn compact_block_received(&self, _cb: core::CompactBlock, _addr: SocketAddr) -> bool { true }
|
||||
fn header_received(&self, _bh: core::BlockHeader, _addr: SocketAddr) -> bool { true }
|
||||
fn block_received(&self, _: core::Block, _: SocketAddr) -> bool { true }
|
||||
fn headers_received(&self, _: Vec<core::BlockHeader>, _:SocketAddr) {}
|
||||
fn compact_block_received(&self, _cb: core::CompactBlock, _addr: SocketAddr) -> bool {
|
||||
true
|
||||
}
|
||||
fn header_received(&self, _bh: core::BlockHeader, _addr: SocketAddr) -> bool {
|
||||
true
|
||||
}
|
||||
fn block_received(&self, _: core::Block, _: SocketAddr) -> bool {
|
||||
true
|
||||
}
|
||||
fn headers_received(&self, _: Vec<core::BlockHeader>, _: SocketAddr) {}
|
||||
fn locate_headers(&self, _: Vec<Hash>) -> Vec<core::BlockHeader> {
|
||||
vec![]
|
||||
}
|
||||
|
@ -221,9 +226,14 @@ impl ChainAdapter for DummyAdapter {
|
|||
unimplemented!()
|
||||
}
|
||||
|
||||
fn sumtrees_write(&self, _h: Hash,
|
||||
_rewind_to_output: u64, _rewind_to_kernel: u64,
|
||||
_sumtree_data: File, _peer_addr: SocketAddr) -> bool {
|
||||
fn sumtrees_write(
|
||||
&self,
|
||||
_h: Hash,
|
||||
_rewind_to_output: u64,
|
||||
_rewind_to_kernel: u64,
|
||||
_sumtree_data: File,
|
||||
_peer_addr: SocketAddr,
|
||||
) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
@ -233,5 +243,5 @@ impl NetAdapter for DummyAdapter {
|
|||
vec![]
|
||||
}
|
||||
fn peer_addrs_received(&self, _: Vec<SocketAddr>) {}
|
||||
fn peer_difficulty(&self, _: SocketAddr, _: Difficulty, _:u64) {}
|
||||
fn peer_difficulty(&self, _: SocketAddr, _: Difficulty, _: u64) {}
|
||||
}
|
||||
|
|
|
@ -155,5 +155,8 @@ impl PeerStore {
|
|||
}
|
||||
|
||||
fn peer_key(peer_addr: SocketAddr) -> Vec<u8> {
|
||||
to_key(PEER_PREFIX, &mut format!("{}:{}", peer_addr.ip(), peer_addr.port()).into_bytes())
|
||||
to_key(
|
||||
PEER_PREFIX,
|
||||
&mut format!("{}:{}", peer_addr.ip(), peer_addr.port()).into_bytes(),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -119,7 +119,9 @@ bitflags! {
|
|||
/// Can provide a list of healthy peers
|
||||
const PEER_LIST = 0b00000100;
|
||||
|
||||
const FULL_NODE = Capabilities::FULL_HIST.bits | Capabilities::UTXO_HIST.bits | Capabilities::PEER_LIST.bits;
|
||||
const FULL_NODE = Capabilities::FULL_HIST.bits
|
||||
| Capabilities::UTXO_HIST.bits
|
||||
| Capabilities::PEER_LIST.bits;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,9 +201,14 @@ pub trait ChainAdapter: Sync + Send {
|
|||
/// If we're willing to accept that new state, the data stream will be
|
||||
/// read as a zip file, unzipped and the resulting state files should be
|
||||
/// rewound to the provided indexes.
|
||||
fn sumtrees_write(&self, h: Hash,
|
||||
rewind_to_output: u64, rewind_to_kernel: u64,
|
||||
sumtree_data: File, peer_addr: SocketAddr) -> bool;
|
||||
fn sumtrees_write(
|
||||
&self,
|
||||
h: Hash,
|
||||
rewind_to_output: u64,
|
||||
rewind_to_kernel: u64,
|
||||
sumtree_data: File,
|
||||
peer_addr: SocketAddr,
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
/// Additional methods required by the protocol that don't need to be
|
||||
|
|
|
@ -47,19 +47,19 @@ fn peer_handshake() {
|
|||
peers_deny: None,
|
||||
};
|
||||
let net_adapter = Arc::new(p2p::DummyAdapter {});
|
||||
let server = Arc::new(p2p::Server::new(
|
||||
".grin".to_owned(),
|
||||
p2p::Capabilities::UNKNOWN,
|
||||
p2p_conf.clone(),
|
||||
net_adapter.clone(),
|
||||
Hash::from_vec(vec![]),
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
).unwrap());
|
||||
let server = Arc::new(
|
||||
p2p::Server::new(
|
||||
".grin".to_owned(),
|
||||
p2p::Capabilities::UNKNOWN,
|
||||
p2p_conf.clone(),
|
||||
net_adapter.clone(),
|
||||
Hash::from_vec(vec![]),
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
).unwrap(),
|
||||
);
|
||||
|
||||
let p2p_inner = server.clone();
|
||||
let _ = thread::spawn(move || {
|
||||
p2p_inner.listen()
|
||||
});
|
||||
let _ = thread::spawn(move || p2p_inner.listen());
|
||||
|
||||
thread::sleep(time::Duration::from_secs(1));
|
||||
|
||||
|
@ -81,7 +81,7 @@ fn peer_handshake() {
|
|||
|
||||
peer.send_ping(Difficulty::one(), 0).unwrap();
|
||||
thread::sleep(time::Duration::from_secs(1));
|
||||
|
||||
|
||||
let server_peer = server.peers.get_connected_peer(&my_addr).unwrap();
|
||||
let server_peer = server_peer.read().unwrap();
|
||||
assert_eq!(server_peer.info.total_difficulty, Difficulty::one());
|
||||
|
|
|
@ -12,13 +12,12 @@ use std::clone::Clone;
|
|||
use std::sync::RwLock;
|
||||
|
||||
use core::core::{block, hash, transaction};
|
||||
use core::core::{OutputFeatures, Input, OutputIdentifier};
|
||||
use core::core::{Input, OutputFeatures, OutputIdentifier};
|
||||
use core::global;
|
||||
use core::core::hash::Hashed;
|
||||
use types::{BlockChain, PoolError};
|
||||
use util::secp::pedersen::Commitment;
|
||||
|
||||
|
||||
/// A DummyUtxoSet for mocking up the chain
|
||||
pub struct DummyUtxoSet {
|
||||
outputs: HashMap<Commitment, transaction::Output>,
|
||||
|
@ -119,10 +118,7 @@ impl BlockChain for DummyChainImpl {
|
|||
}
|
||||
let block_hash = input.block_hash.expect("requires a block hash");
|
||||
let headers = self.block_headers.read().unwrap();
|
||||
if let Some(h) = headers
|
||||
.iter()
|
||||
.find(|x| x.hash() == block_hash)
|
||||
{
|
||||
if let Some(h) = headers.iter().find(|x| x.hash() == block_hash) {
|
||||
if h.height + global::coinbase_maturity() < height {
|
||||
return Ok(());
|
||||
}
|
||||
|
|
|
@ -130,9 +130,7 @@ impl fmt::Debug for Edge {
|
|||
write!(
|
||||
f,
|
||||
"Edge {{source: {:?}, destination: {:?}, commitment: {:?}}}",
|
||||
self.source,
|
||||
self.destination,
|
||||
self.output
|
||||
self.source, self.destination, self.output
|
||||
)
|
||||
}
|
||||
}
|
||||
|
@ -193,14 +191,14 @@ impl DirectedGraph {
|
|||
let mut new_vertices: Vec<PoolEntry> = vec![];
|
||||
|
||||
// first find the set of all destinations from the edges in the graph
|
||||
// a root is a vertex that is not a destination of any edge
|
||||
// a root is a vertex that is not a destination of any edge
|
||||
let destinations = self.edges
|
||||
.values()
|
||||
.filter_map(|edge| edge.destination)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
// now iterate over the current non-root vertices
|
||||
// and check if it is now a root based on the set of edge destinations
|
||||
// and check if it is now a root based on the set of edge destinations
|
||||
for x in &self.vertices {
|
||||
if destinations.contains(&x.transaction_hash) {
|
||||
new_vertices.push(x.clone());
|
||||
|
@ -309,11 +307,8 @@ mod tests {
|
|||
|
||||
let output_commit = keychain.commit(70, &key_id1).unwrap();
|
||||
let switch_commit = keychain.switch_commit(&key_id1).unwrap();
|
||||
let switch_commit_hash = SwitchCommitHash::from_switch_commit(
|
||||
switch_commit,
|
||||
&keychain,
|
||||
&key_id1,
|
||||
);
|
||||
let switch_commit_hash =
|
||||
SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id1);
|
||||
|
||||
let inputs = vec![
|
||||
core::transaction::Input::new(
|
||||
|
@ -336,7 +331,13 @@ mod tests {
|
|||
commit: output_commit,
|
||||
switch_commit_hash: switch_commit_hash,
|
||||
proof: keychain
|
||||
.range_proof(100, &key_id1, output_commit, Some(switch_commit_hash.as_ref().to_vec()), msg)
|
||||
.range_proof(
|
||||
100,
|
||||
&key_id1,
|
||||
output_commit,
|
||||
Some(switch_commit_hash.as_ref().to_vec()),
|
||||
msg,
|
||||
)
|
||||
.unwrap(),
|
||||
};
|
||||
|
||||
|
@ -344,11 +345,8 @@ mod tests {
|
|||
.with_fee(5)
|
||||
.with_lock_height(0);
|
||||
|
||||
let test_transaction = core::transaction::Transaction::new(
|
||||
inputs,
|
||||
vec![output],
|
||||
vec![kernel],
|
||||
);
|
||||
let test_transaction =
|
||||
core::transaction::Transaction::new(inputs, vec![output], vec![kernel]);
|
||||
|
||||
let test_pool_entry = PoolEntry::new(&test_transaction);
|
||||
|
||||
|
|
280
pool/src/pool.rs
280
pool/src/pool.rs
|
@ -51,11 +51,7 @@ where
|
|||
T: BlockChain,
|
||||
{
|
||||
/// Create a new transaction pool
|
||||
pub fn new(
|
||||
config: PoolConfig,
|
||||
chain: Arc<T>,
|
||||
adapter: Arc<PoolAdapter>,
|
||||
) -> TransactionPool<T> {
|
||||
pub fn new(config: PoolConfig, chain: Arc<T>, adapter: Arc<PoolAdapter>) -> TransactionPool<T> {
|
||||
TransactionPool {
|
||||
config: config,
|
||||
transactions: HashMap::new(),
|
||||
|
@ -129,29 +125,26 @@ where
|
|||
// unspent set, represented by blockchain unspents - pool spents, for an
|
||||
// output designated by output_commitment.
|
||||
fn search_blockchain_unspents(&self, output_ref: &OutputIdentifier) -> Option<Parent> {
|
||||
self.blockchain
|
||||
.is_unspent(output_ref)
|
||||
.ok()
|
||||
.map(|_| {
|
||||
match self.pool.get_blockchain_spent(&output_ref.commit) {
|
||||
Some(x) => {
|
||||
let other_tx = x.destination_hash().unwrap();
|
||||
Parent::AlreadySpent { other_tx }
|
||||
}
|
||||
None => Parent::BlockTransaction,
|
||||
self.blockchain.is_unspent(output_ref).ok().map(|_| {
|
||||
match self.pool.get_blockchain_spent(&output_ref.commit) {
|
||||
Some(x) => {
|
||||
let other_tx = x.destination_hash().unwrap();
|
||||
Parent::AlreadySpent { other_tx }
|
||||
}
|
||||
})
|
||||
None => Parent::BlockTransaction,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// search_pool_spents is the second half of pool input detection, after the
|
||||
// available_outputs have been checked. This returns either a
|
||||
// Parent::AlreadySpent or None.
|
||||
// available_outputs have been checked. This returns either a
|
||||
// Parent::AlreadySpent or None.
|
||||
fn search_pool_spents(&self, output_commitment: &Commitment) -> Option<Parent> {
|
||||
self.pool.get_internal_spent(output_commitment).map(|x| {
|
||||
Parent::AlreadySpent {
|
||||
self.pool
|
||||
.get_internal_spent(output_commitment)
|
||||
.map(|x| Parent::AlreadySpent {
|
||||
other_tx: x.destination_hash().unwrap(),
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the number of transactions in the pool
|
||||
|
@ -189,14 +182,14 @@ where
|
|||
tx.validate().map_err(|e| PoolError::InvalidTx(e))?;
|
||||
|
||||
// The first check involves ensuring that an identical transaction is
|
||||
// not already in the pool's transaction set.
|
||||
// A non-authoritative similar check should be performed under the
|
||||
// pool's read lock before we get to this point, which would catch the
|
||||
// majority of duplicate cases. The race condition is caught here.
|
||||
// TODO: When the transaction identifier is finalized, the assumptions
|
||||
// here may change depending on the exact coverage of the identifier.
|
||||
// The current tx.hash() method, for example, does not cover changes
|
||||
// to fees or other elements of the signature preimage.
|
||||
// not already in the pool's transaction set.
|
||||
// A non-authoritative similar check should be performed under the
|
||||
// pool's read lock before we get to this point, which would catch the
|
||||
// majority of duplicate cases. The race condition is caught here.
|
||||
// TODO: When the transaction identifier is finalized, the assumptions
|
||||
// here may change depending on the exact coverage of the identifier.
|
||||
// The current tx.hash() method, for example, does not cover changes
|
||||
// to fees or other elements of the signature preimage.
|
||||
let tx_hash = graph::transaction_identifier(&tx);
|
||||
if self.transactions.contains_key(&tx_hash) {
|
||||
return Err(PoolError::AlreadyInPool);
|
||||
|
@ -243,11 +236,11 @@ where
|
|||
let is_orphan = orphan_refs.len() > 0;
|
||||
|
||||
// Next we examine the outputs this transaction creates and ensure
|
||||
// that they do not already exist.
|
||||
// I believe its worth preventing duplicate outputs from being
|
||||
// accepted, even though it is possible for them to be mined
|
||||
// with strict ordering. In the future, if desirable, this could
|
||||
// be node policy config or more intelligent.
|
||||
// that they do not already exist.
|
||||
// I believe its worth preventing duplicate outputs from being
|
||||
// accepted, even though it is possible for them to be mined
|
||||
// with strict ordering. In the future, if desirable, this could
|
||||
// be node policy config or more intelligent.
|
||||
for output in &tx.outputs {
|
||||
self.check_duplicate_outputs(output, is_orphan)?
|
||||
}
|
||||
|
@ -283,13 +276,13 @@ where
|
|||
Ok(())
|
||||
} else {
|
||||
// At this point, we're pretty sure the transaction is an orphan,
|
||||
// but we have to explicitly check for double spends against the
|
||||
// orphans set; we do not check this as part of the connectivity
|
||||
// checking above.
|
||||
// First, any references resolved to the pool need to be compared
|
||||
// against active orphan pool_connections.
|
||||
// Note that pool_connections here also does double duty to
|
||||
// account for blockchain connections.
|
||||
// but we have to explicitly check for double spends against the
|
||||
// orphans set; we do not check this as part of the connectivity
|
||||
// checking above.
|
||||
// First, any references resolved to the pool need to be compared
|
||||
// against active orphan pool_connections.
|
||||
// Note that pool_connections here also does double duty to
|
||||
// account for blockchain connections.
|
||||
for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) {
|
||||
match self.orphans
|
||||
.get_external_spent_output(&pool_ref.output_commitment())
|
||||
|
@ -306,9 +299,9 @@ where
|
|||
}
|
||||
|
||||
// Next, we have to consider the possibility of double spends
|
||||
// within the orphans set.
|
||||
// We also have to distinguish now between missing and internal
|
||||
// references.
|
||||
// within the orphans set.
|
||||
// We also have to distinguish now between missing and internal
|
||||
// references.
|
||||
let missing_refs = self.resolve_orphan_refs(tx_hash, &mut orphan_refs)?;
|
||||
|
||||
// We have passed all failure modes.
|
||||
|
@ -347,7 +340,6 @@ where
|
|||
});
|
||||
}
|
||||
|
||||
|
||||
// Check for existence of this output in the pool
|
||||
match self.pool.find_output(&output.commitment()) {
|
||||
Some(x) => {
|
||||
|
@ -360,9 +352,8 @@ where
|
|||
None => {}
|
||||
};
|
||||
|
||||
|
||||
// If the transaction might go into orphans, perform the same
|
||||
// checks as above but against the orphan set instead.
|
||||
// checks as above but against the orphan set instead.
|
||||
if is_orphan {
|
||||
// Checking against orphan outputs
|
||||
match self.orphans.find_output(&output.commitment()) {
|
||||
|
@ -376,7 +367,7 @@ where
|
|||
None => {}
|
||||
};
|
||||
// No need to check pool connections since those are covered
|
||||
// by pool unspents and blockchain connections.
|
||||
// by pool unspents and blockchain connections.
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -414,9 +405,9 @@ where
|
|||
}
|
||||
None => {
|
||||
// The reference does not resolve to anything.
|
||||
// Make sure this missing_output has not already
|
||||
// been claimed, then add this entry to
|
||||
// missing_refs
|
||||
// Make sure this missing_output has not already
|
||||
// been claimed, then add this entry to
|
||||
// missing_refs
|
||||
match self.orphans.get_unknown_output(&orphan_commitment) {
|
||||
Some(x) => {
|
||||
return Err(PoolError::DoubleSpend {
|
||||
|
@ -464,34 +455,34 @@ where
|
|||
block: &block::Block,
|
||||
) -> Result<Vec<Box<transaction::Transaction>>, PoolError> {
|
||||
// If this pool has been kept in sync correctly, serializing all
|
||||
// updates, then the inputs must consume only members of the blockchain
|
||||
// utxo set.
|
||||
// If the block has been resolved properly and reduced fully to its
|
||||
// canonical form, no inputs may consume outputs generated by previous
|
||||
// transactions in the block; they would be cut-through. TODO: If this
|
||||
// is not consensus enforced, then logic must be added here to account
|
||||
// for that.
|
||||
// Based on this, we operate under the following algorithm:
|
||||
// For each block input, we examine the pool transaction, if any, that
|
||||
// consumes the same blockchain output.
|
||||
// If one exists, we mark the transaction and then examine its
|
||||
// children. Recursively, we mark each child until a child is
|
||||
// fully satisfied by outputs in the updated utxo view (after
|
||||
// reconciliation of the block), or there are no more children.
|
||||
//
|
||||
// Additionally, to protect our invariant dictating no duplicate
|
||||
// outputs, each output generated by the new utxo set is checked
|
||||
// against outputs generated by the pool and the corresponding
|
||||
// transactions are also marked.
|
||||
//
|
||||
// After marking concludes, sweeping begins. In order, the marked
|
||||
// transactions are removed, the vertexes corresponding to the
|
||||
// transactions are removed, all the marked transactions' outputs are
|
||||
// removed, and all remaining non-blockchain inputs are returned to the
|
||||
// unspent_outputs set.
|
||||
//
|
||||
// After the pool has been successfully processed, an orphans
|
||||
// reconciliation job is triggered.
|
||||
// updates, then the inputs must consume only members of the blockchain
|
||||
// utxo set.
|
||||
// If the block has been resolved properly and reduced fully to its
|
||||
// canonical form, no inputs may consume outputs generated by previous
|
||||
// transactions in the block; they would be cut-through. TODO: If this
|
||||
// is not consensus enforced, then logic must be added here to account
|
||||
// for that.
|
||||
// Based on this, we operate under the following algorithm:
|
||||
// For each block input, we examine the pool transaction, if any, that
|
||||
// consumes the same blockchain output.
|
||||
// If one exists, we mark the transaction and then examine its
|
||||
// children. Recursively, we mark each child until a child is
|
||||
// fully satisfied by outputs in the updated utxo view (after
|
||||
// reconciliation of the block), or there are no more children.
|
||||
//
|
||||
// Additionally, to protect our invariant dictating no duplicate
|
||||
// outputs, each output generated by the new utxo set is checked
|
||||
// against outputs generated by the pool and the corresponding
|
||||
// transactions are also marked.
|
||||
//
|
||||
// After marking concludes, sweeping begins. In order, the marked
|
||||
// transactions are removed, the vertexes corresponding to the
|
||||
// transactions are removed, all the marked transactions' outputs are
|
||||
// removed, and all remaining non-blockchain inputs are returned to the
|
||||
// unspent_outputs set.
|
||||
//
|
||||
// After the pool has been successfully processed, an orphans
|
||||
// reconciliation job is triggered.
|
||||
let mut marked_transactions: HashSet<hash::Hash> = HashSet::new();
|
||||
|
||||
{
|
||||
|
@ -504,7 +495,7 @@ where
|
|||
.collect();
|
||||
|
||||
// find all outputs that conflict - potential for duplicates so use a HashSet
|
||||
// here
|
||||
// here
|
||||
let conflicting_outputs: HashSet<hash::Hash> = block
|
||||
.outputs
|
||||
.iter()
|
||||
|
@ -517,7 +508,7 @@ where
|
|||
.collect();
|
||||
|
||||
// now iterate over all conflicting hashes from both txs and outputs
|
||||
// we can just use the union of the two sets here to remove duplicates
|
||||
// we can just use the union of the two sets here to remove duplicates
|
||||
for &txh in conflicting_txs.union(&conflicting_outputs) {
|
||||
self.mark_transaction(txh, &mut marked_transactions);
|
||||
}
|
||||
|
@ -585,7 +576,7 @@ where
|
|||
}
|
||||
|
||||
// final step is to update the pool to reflect the new set of roots
|
||||
// a tx that was non-root may now be root based on the txs removed
|
||||
// a tx that was non-root may now be root based on the txs removed
|
||||
self.pool.update_roots();
|
||||
|
||||
removed_txs
|
||||
|
@ -617,9 +608,9 @@ where
|
|||
return Err(PoolError::OverCapacity);
|
||||
}
|
||||
|
||||
// for a basic transaction (1 input, 2 outputs) -
|
||||
// (-1 * 1) + (4 * 2) + 1 = 8
|
||||
// 8 * 10 = 80
|
||||
// for a basic transaction (1 input, 2 outputs) -
|
||||
// (-1 * 1) + (4 * 2) + 1 = 8
|
||||
// 8 * 10 = 80
|
||||
if self.config.accept_fee_base > 0 {
|
||||
let mut tx_weight = -1 * (tx.inputs.len() as i32) + (4 * tx.outputs.len() as i32) + 1;
|
||||
if tx_weight < 1 {
|
||||
|
@ -773,7 +764,7 @@ mod tests {
|
|||
};
|
||||
|
||||
// To test DoubleSpend and AlreadyInPool conditions, we need to add
|
||||
// a valid transaction.
|
||||
// a valid transaction.
|
||||
let valid_transaction = test_transaction(vec![5, 6], vec![9]);
|
||||
|
||||
match write_pool.add_to_memory_pool(test_source(), valid_transaction.clone()) {
|
||||
|
@ -782,7 +773,7 @@ mod tests {
|
|||
};
|
||||
|
||||
// Now, test a DoubleSpend by consuming the same blockchain unspent
|
||||
// as valid_transaction:
|
||||
// as valid_transaction:
|
||||
let double_spend_transaction = test_transaction(vec![6], vec![2]);
|
||||
|
||||
match write_pool.add_to_memory_pool(test_source(), double_spend_transaction) {
|
||||
|
@ -824,7 +815,7 @@ mod tests {
|
|||
assert_eq!(write_pool.total_size(), 1);
|
||||
|
||||
// now attempt to add a timelocked tx to the pool
|
||||
// should fail as invalid based on current height
|
||||
// should fail as invalid based on current height
|
||||
let timelocked_tx_1 = timelocked_transaction(vec![9], vec![5], 10);
|
||||
match write_pool.add_to_memory_pool(test_source(), timelocked_tx_1) {
|
||||
Err(PoolError::ImmatureTransaction {
|
||||
|
@ -867,14 +858,10 @@ mod tests {
|
|||
};
|
||||
chain_ref.store_head_header(&head_header);
|
||||
|
||||
let txn = test_transaction_with_coinbase_input(
|
||||
15,
|
||||
coinbase_header.hash(),
|
||||
vec![10, 3],
|
||||
);
|
||||
let txn = test_transaction_with_coinbase_input(15, coinbase_header.hash(), vec![10, 3]);
|
||||
let result = write_pool.add_to_memory_pool(test_source(), txn);
|
||||
match result {
|
||||
Err(InvalidTx(transaction::Error::ImmatureCoinbase)) => {},
|
||||
Err(InvalidTx(transaction::Error::ImmatureCoinbase)) => {}
|
||||
_ => panic!("expected ImmatureCoinbase error here"),
|
||||
};
|
||||
|
||||
|
@ -884,11 +871,7 @@ mod tests {
|
|||
};
|
||||
chain_ref.store_head_header(&head_header);
|
||||
|
||||
let txn = test_transaction_with_coinbase_input(
|
||||
15,
|
||||
coinbase_header.hash(),
|
||||
vec![10, 3],
|
||||
);
|
||||
let txn = test_transaction_with_coinbase_input(15, coinbase_header.hash(), vec![10, 3]);
|
||||
let result = write_pool.add_to_memory_pool(test_source(), txn);
|
||||
match result {
|
||||
Ok(_) => {}
|
||||
|
@ -920,8 +903,8 @@ mod tests {
|
|||
let pool = RwLock::new(test_setup(&chain_ref));
|
||||
|
||||
// now create two txs
|
||||
// tx1 spends the UTXO
|
||||
// tx2 spends output from tx1
|
||||
// tx1 spends the UTXO
|
||||
// tx2 spends output from tx1
|
||||
let tx1 = test_transaction(vec![100], vec![90]);
|
||||
let tx2 = test_transaction(vec![90], vec![80]);
|
||||
|
||||
|
@ -930,7 +913,7 @@ mod tests {
|
|||
assert_eq!(write_pool.total_size(), 0);
|
||||
|
||||
// now add both txs to the pool (tx2 spends tx1 with zero confirmations)
|
||||
// both should be accepted if tx1 added before tx2
|
||||
// both should be accepted if tx1 added before tx2
|
||||
write_pool.add_to_memory_pool(test_source(), tx1).unwrap();
|
||||
write_pool.add_to_memory_pool(test_source(), tx2).unwrap();
|
||||
|
||||
|
@ -944,7 +927,7 @@ mod tests {
|
|||
txs = mineable_txs.drain(..).map(|x| *x).collect();
|
||||
|
||||
// confirm we can preparing both txs for mining here
|
||||
// one root tx in the pool, and one non-root vertex in the pool
|
||||
// one root tx in the pool, and one non-root vertex in the pool
|
||||
assert_eq!(txs.len(), 2);
|
||||
}
|
||||
|
||||
|
@ -964,7 +947,7 @@ mod tests {
|
|||
chain_ref.apply_block(&block);
|
||||
|
||||
// now reconcile the block
|
||||
// we should evict both txs here
|
||||
// we should evict both txs here
|
||||
{
|
||||
let mut write_pool = pool.write().unwrap();
|
||||
let evicted_transactions = write_pool.reconcile_block(&block).unwrap();
|
||||
|
@ -972,7 +955,7 @@ mod tests {
|
|||
}
|
||||
|
||||
// check the pool is consistent after reconciling the block
|
||||
// we should have zero txs in the pool (neither roots nor non-roots)
|
||||
// we should have zero txs in the pool (neither roots nor non-roots)
|
||||
{
|
||||
let read_pool = pool.write().unwrap();
|
||||
assert_eq!(read_pool.pool.len_vertices(), 0);
|
||||
|
@ -1003,26 +986,26 @@ mod tests {
|
|||
let pool = RwLock::new(test_setup(&chain_ref));
|
||||
|
||||
// Preparation: We will introduce a three root pool transactions.
|
||||
// 1. A transaction that should be invalidated because it is exactly
|
||||
// contained in the block.
|
||||
// 2. A transaction that should be invalidated because the input is
|
||||
// consumed in the block, although it is not exactly consumed.
|
||||
// 3. A transaction that should remain after block reconciliation.
|
||||
// 1. A transaction that should be invalidated because it is exactly
|
||||
// contained in the block.
|
||||
// 2. A transaction that should be invalidated because the input is
|
||||
// consumed in the block, although it is not exactly consumed.
|
||||
// 3. A transaction that should remain after block reconciliation.
|
||||
let block_transaction = test_transaction(vec![10], vec![8]);
|
||||
let conflict_transaction = test_transaction(vec![20], vec![12, 6]);
|
||||
let valid_transaction = test_transaction(vec![30], vec![13, 15]);
|
||||
|
||||
// We will also introduce a few children:
|
||||
// 4. A transaction that descends from transaction 1, that is in
|
||||
// turn exactly contained in the block.
|
||||
// 4. A transaction that descends from transaction 1, that is in
|
||||
// turn exactly contained in the block.
|
||||
let block_child = test_transaction(vec![8], vec![5, 1]);
|
||||
// 5. A transaction that descends from transaction 4, that is not
|
||||
// contained in the block at all and should be valid after
|
||||
// reconciliation.
|
||||
// contained in the block at all and should be valid after
|
||||
// reconciliation.
|
||||
let pool_child = test_transaction(vec![5], vec![3]);
|
||||
// 6. A transaction that descends from transaction 2 that does not
|
||||
// conflict with anything in the block in any way, but should be
|
||||
// invalidated (orphaned).
|
||||
// conflict with anything in the block in any way, but should be
|
||||
// invalidated (orphaned).
|
||||
let conflict_child = test_transaction(vec![12], vec![2]);
|
||||
// 7. A transaction that descends from transaction 2 that should be
|
||||
// valid due to its inputs being satisfied by the block.
|
||||
|
@ -1107,7 +1090,6 @@ mod tests {
|
|||
// check the specific transactions that were evicted.
|
||||
}
|
||||
|
||||
|
||||
// Using the pool's methods to validate a few end conditions.
|
||||
{
|
||||
let read_pool = pool.read().unwrap();
|
||||
|
@ -1263,12 +1245,8 @@ mod tests {
|
|||
) -> transaction::Transaction {
|
||||
let keychain = keychain_for_tests();
|
||||
|
||||
let input_sum = input_values
|
||||
.iter()
|
||||
.sum::<u64>() as i64;
|
||||
let output_sum = output_values
|
||||
.iter()
|
||||
.sum::<u64>() as i64;
|
||||
let input_sum = input_values.iter().sum::<u64>() as i64;
|
||||
let output_sum = output_values.iter().sum::<u64>() as i64;
|
||||
|
||||
let fees: i64 = input_sum - output_sum;
|
||||
assert!(fees >= 0);
|
||||
|
@ -1296,9 +1274,7 @@ mod tests {
|
|||
) -> transaction::Transaction {
|
||||
let keychain = keychain_for_tests();
|
||||
|
||||
let output_sum = output_values
|
||||
.iter()
|
||||
.sum::<u64>() as i64;
|
||||
let output_sum = output_values.iter().sum::<u64>() as i64;
|
||||
|
||||
let fees: i64 = input_value as i64 - output_sum;
|
||||
assert!(fees >= 0);
|
||||
|
@ -1309,18 +1285,16 @@ mod tests {
|
|||
node: Hash::zero(),
|
||||
root: Hash::zero(),
|
||||
peaks: vec![Hash::zero()],
|
||||
.. MerkleProof::default()
|
||||
..MerkleProof::default()
|
||||
};
|
||||
|
||||
let key_id = keychain.derive_key_id(input_value as u32).unwrap();
|
||||
tx_elements.push(
|
||||
build::coinbase_input(
|
||||
input_value,
|
||||
input_block_hash,
|
||||
merkle_proof,
|
||||
key_id,
|
||||
),
|
||||
);
|
||||
tx_elements.push(build::coinbase_input(
|
||||
input_value,
|
||||
input_block_hash,
|
||||
merkle_proof,
|
||||
key_id,
|
||||
));
|
||||
|
||||
for output_value in output_values {
|
||||
let key_id = keychain.derive_key_id(output_value as u32).unwrap();
|
||||
|
@ -1367,13 +1341,18 @@ mod tests {
|
|||
let key_id = keychain.derive_key_id(value as u32).unwrap();
|
||||
let commit = keychain.commit(value, &key_id).unwrap();
|
||||
let switch_commit = keychain.switch_commit(&key_id).unwrap();
|
||||
let switch_commit_hash = SwitchCommitHash::from_switch_commit(
|
||||
switch_commit,
|
||||
&keychain,
|
||||
&key_id,
|
||||
);
|
||||
let switch_commit_hash =
|
||||
SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id);
|
||||
let msg = secp::pedersen::ProofMessage::empty();
|
||||
let proof = keychain.range_proof(value, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg).unwrap();
|
||||
let proof = keychain
|
||||
.range_proof(
|
||||
value,
|
||||
&key_id,
|
||||
commit,
|
||||
Some(switch_commit_hash.as_ref().to_vec()),
|
||||
msg,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
transaction::Output {
|
||||
features: transaction::OutputFeatures::DEFAULT_OUTPUT,
|
||||
|
@ -1389,13 +1368,18 @@ mod tests {
|
|||
let key_id = keychain.derive_key_id(value as u32).unwrap();
|
||||
let commit = keychain.commit(value, &key_id).unwrap();
|
||||
let switch_commit = keychain.switch_commit(&key_id).unwrap();
|
||||
let switch_commit_hash = SwitchCommitHash::from_switch_commit(
|
||||
switch_commit,
|
||||
&keychain,
|
||||
&key_id,
|
||||
);
|
||||
let switch_commit_hash =
|
||||
SwitchCommitHash::from_switch_commit(switch_commit, &keychain, &key_id);
|
||||
let msg = secp::pedersen::ProofMessage::empty();
|
||||
let proof = keychain.range_proof(value, &key_id, commit, Some(switch_commit_hash.as_ref().to_vec()), msg).unwrap();
|
||||
let proof = keychain
|
||||
.range_proof(
|
||||
value,
|
||||
&key_id,
|
||||
commit,
|
||||
Some(switch_commit_hash.as_ref().to_vec()),
|
||||
msg,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
transaction::Output {
|
||||
features: transaction::OutputFeatures::COINBASE_OUTPUT,
|
||||
|
|
|
@ -52,7 +52,7 @@ impl Default for PoolConfig {
|
|||
}
|
||||
|
||||
fn default_accept_fee_base() -> u64 {
|
||||
consensus::MILLI_GRIN
|
||||
consensus::MILLI_GRIN
|
||||
}
|
||||
fn default_max_pool_size() -> usize {
|
||||
50_000
|
||||
|
@ -86,15 +86,11 @@ impl fmt::Debug for Parent {
|
|||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match self {
|
||||
&Parent::Unknown => write!(f, "Parent: Unknown"),
|
||||
&Parent::BlockTransaction => {
|
||||
write!(f, "Parent: Block Transaction")
|
||||
}
|
||||
&Parent::BlockTransaction => write!(f, "Parent: Block Transaction"),
|
||||
&Parent::PoolTransaction { tx_ref: x } => {
|
||||
write!(f, "Parent: Pool Transaction ({:?})", x)
|
||||
}
|
||||
&Parent::AlreadySpent { other_tx: x } => {
|
||||
write!(f, "Parent: Already Spent By {:?}", x)
|
||||
}
|
||||
&Parent::AlreadySpent { other_tx: x } => write!(f, "Parent: Already Spent By {:?}", x),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -259,7 +255,7 @@ impl Pool {
|
|||
}
|
||||
|
||||
// Adding the transaction to the vertices list along with internal
|
||||
// pool edges
|
||||
// pool edges
|
||||
self.graph.add_entry(pool_entry, pool_refs);
|
||||
|
||||
// Adding the new unspents to the unspent map
|
||||
|
@ -421,12 +417,11 @@ impl Orphans {
|
|||
}
|
||||
|
||||
// if missing_refs is the same length as orphan_refs, we have
|
||||
// no orphan-orphan links for this transaction and it is a
|
||||
// root transaction of the orphans set
|
||||
// no orphan-orphan links for this transaction and it is a
|
||||
// root transaction of the orphans set
|
||||
self.graph
|
||||
.add_vertex_only(orphan_entry, is_missing.len() == orphan_refs.len());
|
||||
|
||||
|
||||
// Adding the new unspents to the unspent map
|
||||
for unspent_output in new_unspents.drain(..) {
|
||||
self.available_outputs
|
||||
|
|
|
@ -60,10 +60,12 @@ impl Cuckoo {
|
|||
let hashed = blake2::blake2b::blake2b(32, &[], header);
|
||||
let hashed = hashed.as_bytes();
|
||||
Cuckoo {
|
||||
v: [u8_to_u64(hashed, 0),
|
||||
u8_to_u64(hashed, 8),
|
||||
u8_to_u64(hashed, 16),
|
||||
u8_to_u64(hashed, 24)],
|
||||
v: [
|
||||
u8_to_u64(hashed, 0),
|
||||
u8_to_u64(hashed, 8),
|
||||
u8_to_u64(hashed, 16),
|
||||
u8_to_u64(hashed, 24),
|
||||
],
|
||||
size: size,
|
||||
mask: (1 << sizeshift) / 2 - 1,
|
||||
}
|
||||
|
@ -305,7 +307,6 @@ impl Miner {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/// Utility to transform a 8 bytes of a byte array into a u64.
|
||||
fn u8_to_u64(p: &[u8], i: usize) -> u64 {
|
||||
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24
|
||||
|
@ -319,181 +320,34 @@ mod test {
|
|||
use core::core::Proof;
|
||||
|
||||
static V1: [u32; 42] = [
|
||||
0x3bbd,
|
||||
0x4e96,
|
||||
0x1013b,
|
||||
0x1172b,
|
||||
0x1371b,
|
||||
0x13e6a,
|
||||
0x1aaa6,
|
||||
0x1b575,
|
||||
0x1e237,
|
||||
0x1ee88,
|
||||
0x22f94,
|
||||
0x24223,
|
||||
0x25b4f,
|
||||
0x2e9f3,
|
||||
0x33b49,
|
||||
0x34063,
|
||||
0x3454a,
|
||||
0x3c081,
|
||||
0x3d08e,
|
||||
0x3d863,
|
||||
0x4285a,
|
||||
0x42f22,
|
||||
0x43122,
|
||||
0x4b853,
|
||||
0x4cd0c,
|
||||
0x4f280,
|
||||
0x557d5,
|
||||
0x562cf,
|
||||
0x58e59,
|
||||
0x59a62,
|
||||
0x5b568,
|
||||
0x644b9,
|
||||
0x657e9,
|
||||
0x66337,
|
||||
0x6821c,
|
||||
0x7866f,
|
||||
0x7e14b,
|
||||
0x7ec7c,
|
||||
0x7eed7,
|
||||
0x80643,
|
||||
0x8628c,
|
||||
0x8949e
|
||||
0x3bbd, 0x4e96, 0x1013b, 0x1172b, 0x1371b, 0x13e6a, 0x1aaa6, 0x1b575, 0x1e237, 0x1ee88,
|
||||
0x22f94, 0x24223, 0x25b4f, 0x2e9f3, 0x33b49, 0x34063, 0x3454a, 0x3c081, 0x3d08e, 0x3d863,
|
||||
0x4285a, 0x42f22, 0x43122, 0x4b853, 0x4cd0c, 0x4f280, 0x557d5, 0x562cf, 0x58e59, 0x59a62,
|
||||
0x5b568, 0x644b9, 0x657e9, 0x66337, 0x6821c, 0x7866f, 0x7e14b, 0x7ec7c, 0x7eed7, 0x80643,
|
||||
0x8628c, 0x8949e,
|
||||
];
|
||||
static V2: [u32; 42] = [
|
||||
0x5e3a,
|
||||
0x8a8b,
|
||||
0x103d8,
|
||||
0x1374b,
|
||||
0x14780,
|
||||
0x16110,
|
||||
0x1b571,
|
||||
0x1c351,
|
||||
0x1c826,
|
||||
0x28228,
|
||||
0x2909f,
|
||||
0x29516,
|
||||
0x2c1c4,
|
||||
0x334eb,
|
||||
0x34cdd,
|
||||
0x38a2c,
|
||||
0x3ad23,
|
||||
0x45ac5,
|
||||
0x46afe,
|
||||
0x50f43,
|
||||
0x51ed6,
|
||||
0x52ddd,
|
||||
0x54a82,
|
||||
0x5a46b,
|
||||
0x5dbdb,
|
||||
0x60f6f,
|
||||
0x60fcd,
|
||||
0x61c78,
|
||||
0x63899,
|
||||
0x64dab,
|
||||
0x6affc,
|
||||
0x6b569,
|
||||
0x72639,
|
||||
0x73987,
|
||||
0x78806,
|
||||
0x7b98e,
|
||||
0x7c7d7,
|
||||
0x7ddd4,
|
||||
0x7fa88,
|
||||
0x8277c,
|
||||
0x832d9,
|
||||
0x8ba6f
|
||||
0x5e3a, 0x8a8b, 0x103d8, 0x1374b, 0x14780, 0x16110, 0x1b571, 0x1c351, 0x1c826, 0x28228,
|
||||
0x2909f, 0x29516, 0x2c1c4, 0x334eb, 0x34cdd, 0x38a2c, 0x3ad23, 0x45ac5, 0x46afe, 0x50f43,
|
||||
0x51ed6, 0x52ddd, 0x54a82, 0x5a46b, 0x5dbdb, 0x60f6f, 0x60fcd, 0x61c78, 0x63899, 0x64dab,
|
||||
0x6affc, 0x6b569, 0x72639, 0x73987, 0x78806, 0x7b98e, 0x7c7d7, 0x7ddd4, 0x7fa88, 0x8277c,
|
||||
0x832d9, 0x8ba6f,
|
||||
];
|
||||
static V3: [u32; 42] = [
|
||||
0x308b,
|
||||
0x9004,
|
||||
0x91fc,
|
||||
0x983e,
|
||||
0x9d67,
|
||||
0xa293,
|
||||
0xb4cb,
|
||||
0xb6c8,
|
||||
0xccc8,
|
||||
0xdddc,
|
||||
0xf04d,
|
||||
0x1372f,
|
||||
0x16ec9,
|
||||
0x17b61,
|
||||
0x17d03,
|
||||
0x1e3bc,
|
||||
0x1fb0f,
|
||||
0x29e6e,
|
||||
0x2a2ca,
|
||||
0x2a719,
|
||||
0x3a078,
|
||||
0x3b7cc,
|
||||
0x3c71d,
|
||||
0x40daa,
|
||||
0x43e17,
|
||||
0x46adc,
|
||||
0x4b359,
|
||||
0x4c3aa,
|
||||
0x4ce92,
|
||||
0x4d06e,
|
||||
0x51140,
|
||||
0x565ac,
|
||||
0x56b1f,
|
||||
0x58a8b,
|
||||
0x5e410,
|
||||
0x5e607,
|
||||
0x5ebb5,
|
||||
0x5f8ae,
|
||||
0x7aeac,
|
||||
0x7b902,
|
||||
0x7d6af,
|
||||
0x7f400
|
||||
0x308b, 0x9004, 0x91fc, 0x983e, 0x9d67, 0xa293, 0xb4cb, 0xb6c8, 0xccc8, 0xdddc, 0xf04d,
|
||||
0x1372f, 0x16ec9, 0x17b61, 0x17d03, 0x1e3bc, 0x1fb0f, 0x29e6e, 0x2a2ca, 0x2a719, 0x3a078,
|
||||
0x3b7cc, 0x3c71d, 0x40daa, 0x43e17, 0x46adc, 0x4b359, 0x4c3aa, 0x4ce92, 0x4d06e, 0x51140,
|
||||
0x565ac, 0x56b1f, 0x58a8b, 0x5e410, 0x5e607, 0x5ebb5, 0x5f8ae, 0x7aeac, 0x7b902, 0x7d6af,
|
||||
0x7f400,
|
||||
];
|
||||
// cuckoo28 at 50% edges of letter 'u'
|
||||
static V4: [u32; 42] = [
|
||||
0xf7243,
|
||||
0x11f130,
|
||||
0x193812,
|
||||
0x23b565,
|
||||
0x279ac3,
|
||||
0x69b270,
|
||||
0xe0778f,
|
||||
0xef51fc,
|
||||
0x10bf6e8,
|
||||
0x13ccf7d,
|
||||
0x1551177,
|
||||
0x1b6cfd2,
|
||||
0x1f872c3,
|
||||
0x2075681,
|
||||
0x2e23ccc,
|
||||
0x2e4c0aa,
|
||||
0x2f607f1,
|
||||
0x3007eeb,
|
||||
0x3407e9a,
|
||||
0x35423f9,
|
||||
0x39e48bf,
|
||||
0x45e3bf6,
|
||||
0x46aa484,
|
||||
0x47c0fe1,
|
||||
0x4b1d5a6,
|
||||
0x4bae0ba,
|
||||
0x4dfdbaf,
|
||||
0x5048eda,
|
||||
0x537da6b,
|
||||
0x5402887,
|
||||
0x56b8897,
|
||||
0x5bd8e8b,
|
||||
0x622de20,
|
||||
0x62be5ce,
|
||||
0x62d538e,
|
||||
0x6464518,
|
||||
0x650a6d5,
|
||||
0x66ec4fa,
|
||||
0x66f9476,
|
||||
0x6b1e5f6,
|
||||
0x6fd5d88,
|
||||
0x701f37b
|
||||
0xf7243, 0x11f130, 0x193812, 0x23b565, 0x279ac3, 0x69b270, 0xe0778f, 0xef51fc, 0x10bf6e8,
|
||||
0x13ccf7d, 0x1551177, 0x1b6cfd2, 0x1f872c3, 0x2075681, 0x2e23ccc, 0x2e4c0aa, 0x2f607f1,
|
||||
0x3007eeb, 0x3407e9a, 0x35423f9, 0x39e48bf, 0x45e3bf6, 0x46aa484, 0x47c0fe1, 0x4b1d5a6,
|
||||
0x4bae0ba, 0x4dfdbaf, 0x5048eda, 0x537da6b, 0x5402887, 0x56b8897, 0x5bd8e8b, 0x622de20,
|
||||
0x62be5ce, 0x62d538e, 0x6464518, 0x650a6d5, 0x66ec4fa, 0x66f9476, 0x6b1e5f6, 0x6fd5d88,
|
||||
0x701f37b,
|
||||
];
|
||||
|
||||
/// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few
|
||||
|
@ -532,15 +386,12 @@ mod test {
|
|||
fn validate_fail() {
|
||||
// edge checks
|
||||
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75));
|
||||
assert!(!Cuckoo::new(&[49], 20)
|
||||
.verify(Proof::new(vec![0xffff; 42]), 75));
|
||||
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0xffff; 42]), 75));
|
||||
// wrong data for proof
|
||||
assert!(!Cuckoo::new(&[50], 20)
|
||||
.verify(Proof::new(V1.to_vec().clone()), 75));
|
||||
assert!(!Cuckoo::new(&[50], 20).verify(Proof::new(V1.to_vec().clone()), 75));
|
||||
let mut test_header = [0; 32];
|
||||
test_header[0] = 24;
|
||||
assert!(!Cuckoo::new(&test_header, 20)
|
||||
.verify(Proof::new(V4.to_vec().clone()), 50));
|
||||
assert!(!Cuckoo::new(&test_header, 20).verify(Proof::new(V4.to_vec().clone()), 50));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -76,7 +76,7 @@ pub trait MiningWorker {
|
|||
/// satisfies the requirements of the header.
|
||||
pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
|
||||
// make sure the pow hash shows a difficulty at least as large as the target
|
||||
// difficulty
|
||||
// difficulty
|
||||
if bh.difficulty > bh.pow.clone().to_difficulty() {
|
||||
return false;
|
||||
}
|
||||
|
@ -129,35 +129,35 @@ pub fn pow_size<T: MiningWorker + ?Sized>(
|
|||
) -> Result<(), Error> {
|
||||
let start_nonce = bh.nonce;
|
||||
|
||||
// set the nonce for faster solution finding in user testing
|
||||
if bh.height == 0 && global::is_user_testing_mode() {
|
||||
bh.nonce = global::get_genesis_nonce();
|
||||
// set the nonce for faster solution finding in user testing
|
||||
if bh.height == 0 && global::is_user_testing_mode() {
|
||||
bh.nonce = global::get_genesis_nonce();
|
||||
}
|
||||
|
||||
// try to find a cuckoo cycle on that header hash
|
||||
loop {
|
||||
// can be trivially optimized by avoiding re-serialization every time but this
|
||||
// is not meant as a fast miner implementation
|
||||
let pow_hash = bh.hash();
|
||||
// try to find a cuckoo cycle on that header hash
|
||||
loop {
|
||||
// can be trivially optimized by avoiding re-serialization every time but this
|
||||
// is not meant as a fast miner implementation
|
||||
let pow_hash = bh.hash();
|
||||
|
||||
// if we found a cycle (not guaranteed) and the proof hash is higher that the
|
||||
// diff, we're all good
|
||||
if let Ok(proof) = miner.mine(&pow_hash[..]) {
|
||||
if proof.clone().to_difficulty() >= diff {
|
||||
bh.pow = proof.clone();
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
// if we found a cycle (not guaranteed) and the proof hash is higher that the
|
||||
// diff, we're all good
|
||||
if let Ok(proof) = miner.mine(&pow_hash[..]) {
|
||||
if proof.clone().to_difficulty() >= diff {
|
||||
bh.pow = proof.clone();
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// otherwise increment the nonce
|
||||
bh.nonce += 1;
|
||||
// otherwise increment the nonce
|
||||
bh.nonce += 1;
|
||||
|
||||
// and if we're back where we started, update the time (changes the hash as
|
||||
// well)
|
||||
if bh.nonce == start_nonce {
|
||||
bh.timestamp = time::at_utc(time::Timespec { sec: 0, nsec: 0 });
|
||||
}
|
||||
}
|
||||
// and if we're back where we started, update the time (changes the hash as
|
||||
// well)
|
||||
if bh.nonce == start_nonce {
|
||||
bh.timestamp = time::at_utc(time::Timespec { sec: 0, nsec: 0 });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -84,9 +84,9 @@ impl PluginMiner {
|
|||
}
|
||||
|
||||
// First, load and query the plugins in the given directory
|
||||
// These should all be stored in 'plugins' at the moment relative
|
||||
// to the executable path, though they should appear somewhere else
|
||||
// when packaging is more//thought out
|
||||
// These should all be stored in 'plugins' at the moment relative
|
||||
// to the executable path, though they should appear somewhere else
|
||||
// when packaging is more//thought out
|
||||
|
||||
let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap();
|
||||
|
||||
|
@ -117,7 +117,7 @@ impl PluginMiner {
|
|||
let mut index = 0;
|
||||
for f in plugin_vec_filters {
|
||||
// So this is built dynamically based on the plugin implementation
|
||||
// type and the consensus sizeshift
|
||||
// type and the consensus sizeshift
|
||||
let filter = format!("{}_{}", f, sz);
|
||||
|
||||
let caps = plugin_manager.get_available_plugins(&filter).unwrap();
|
||||
|
@ -135,17 +135,25 @@ impl PluginMiner {
|
|||
if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config {
|
||||
if let Some(dp) = l[index].device_parameters.clone() {
|
||||
for (device, param_map) in dp.into_iter() {
|
||||
for (param_name, param_value) in param_map.into_iter(){
|
||||
for (param_name, param_value) in param_map.into_iter() {
|
||||
let device_id = match device.parse::<u32>() {
|
||||
Ok(n) => n,
|
||||
Err(e) => {
|
||||
error!(LOGGER, "Error initializing mining plugin: {:?}", e);
|
||||
panic!("Unable to init mining plugin.");
|
||||
},
|
||||
}
|
||||
};
|
||||
debug!(LOGGER, "Cuckoo Plugin {}: Setting mining parameter {} to {} on Device {}",
|
||||
index, param_name, param_value, device_id);
|
||||
config.parameter_list.push((param_name, device_id, param_value));
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Cuckoo Plugin {}: Setting mining parameter {} to {} on Device {}",
|
||||
index,
|
||||
param_name,
|
||||
param_value,
|
||||
device_id
|
||||
);
|
||||
config
|
||||
.parameter_list
|
||||
.push((param_name, device_id, param_value));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -154,7 +162,7 @@ impl PluginMiner {
|
|||
index += 1;
|
||||
}
|
||||
// Store this config now, because we just want one instance
|
||||
// of the plugin lib per invocation now
|
||||
// of the plugin lib per invocation now
|
||||
*loaded_config_ref = Some(cuckoo_configs.clone());
|
||||
|
||||
// this will load the associated plugin
|
||||
|
|
|
@ -22,7 +22,7 @@ pub struct CuckooMinerPluginConfig {
|
|||
pub type_filter: String,
|
||||
|
||||
/// device params
|
||||
pub device_parameters: Option<HashMap<String, HashMap<String, u32>>>
|
||||
pub device_parameters: Option<HashMap<String, HashMap<String, u32>>>,
|
||||
}
|
||||
|
||||
impl Default for CuckooMinerPluginConfig {
|
||||
|
|
|
@ -38,7 +38,6 @@ pub fn show_status(config: &ServerConfig) {
|
|||
writeln!(e, "Last block hash: {}", status.tip.last_block_pushed).unwrap();
|
||||
writeln!(e, "Previous block hash: {}", status.tip.prev_block_to_last).unwrap();
|
||||
writeln!(e, "Total difficulty: {}", status.tip.total_difficulty).unwrap();
|
||||
|
||||
}
|
||||
Err(_) => writeln!(
|
||||
e,
|
||||
|
@ -81,10 +80,7 @@ pub fn unban_peer(config: &ServerConfig, peer_addr: &SocketAddr) {
|
|||
|
||||
pub fn list_connected_peers(config: &ServerConfig) {
|
||||
let mut e = term::stdout().unwrap();
|
||||
let url = format!(
|
||||
"http://{}/v1/peers/connected",
|
||||
config.api_http_addr
|
||||
);
|
||||
let url = format!("http://{}/v1/peers/connected", config.api_http_addr);
|
||||
match api::client::get::<Vec<p2p::PeerInfo>>(url.as_str()).map_err(|e| Error::API(e)) {
|
||||
Ok(connected_peers) => {
|
||||
let mut index = 0;
|
||||
|
@ -98,7 +94,7 @@ pub fn list_connected_peers(config: &ServerConfig) {
|
|||
println!();
|
||||
index = index + 1;
|
||||
}
|
||||
},
|
||||
}
|
||||
Err(_) => writeln!(e, "Failed to get connected peers").unwrap(),
|
||||
};
|
||||
e.reset().unwrap();
|
||||
|
|
|
@ -429,7 +429,11 @@ fn wallet_command(wallet_args: &ArgMatches, global_config: GlobalConfig) {
|
|||
wallet_config.check_node_api_http_addr = sa.to_string().clone();
|
||||
}
|
||||
|
||||
let key_derivations: u32 = wallet_args.value_of("key_derivations").unwrap().parse().unwrap();
|
||||
let key_derivations: u32 = wallet_args
|
||||
.value_of("key_derivations")
|
||||
.unwrap()
|
||||
.parse()
|
||||
.unwrap();
|
||||
|
||||
let mut show_spent = false;
|
||||
if wallet_args.is_present("show_spent") {
|
||||
|
@ -515,29 +519,29 @@ fn wallet_command(wallet_args: &ArgMatches, global_config: GlobalConfig) {
|
|||
dest,
|
||||
selection_strategy,
|
||||
),
|
||||
Err(e) => match e.kind() {
|
||||
wallet::ErrorKind::NotEnoughFunds(available) => {
|
||||
error!(
|
||||
LOGGER,
|
||||
"Tx not sent: insufficient funds (max: {})",
|
||||
amount_to_hr_string(available),
|
||||
);
|
||||
}
|
||||
wallet::ErrorKind::FeeExceedsAmount {
|
||||
sender_amount,
|
||||
recipient_fee,
|
||||
} => {
|
||||
error!(
|
||||
Err(e) => match e.kind() {
|
||||
wallet::ErrorKind::NotEnoughFunds(available) => {
|
||||
error!(
|
||||
LOGGER,
|
||||
"Tx not sent: insufficient funds (max: {})",
|
||||
amount_to_hr_string(available),
|
||||
);
|
||||
}
|
||||
wallet::ErrorKind::FeeExceedsAmount {
|
||||
sender_amount,
|
||||
recipient_fee,
|
||||
} => {
|
||||
error!(
|
||||
LOGGER,
|
||||
"Recipient rejected the transfer because transaction fee ({}) exceeded amount ({}).",
|
||||
amount_to_hr_string(recipient_fee),
|
||||
amount_to_hr_string(sender_amount)
|
||||
);
|
||||
}
|
||||
_ => {
|
||||
error!(LOGGER, "Tx not sent: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
error!(LOGGER, "Tx not sent: {:?}", e);
|
||||
}
|
||||
},
|
||||
};
|
||||
}
|
||||
("burn", Some(send_args)) => {
|
||||
|
|
|
@ -34,7 +34,6 @@ extern crate serde_derive;
|
|||
#[macro_use]
|
||||
extern crate slog;
|
||||
|
||||
|
||||
pub mod pmmr;
|
||||
pub mod types;
|
||||
|
||||
|
|
|
@ -13,15 +13,15 @@
|
|||
|
||||
//! Implementation of the persistent Backend for the prunable MMR tree.
|
||||
|
||||
use std::fs::{self};
|
||||
use std::io::{self};
|
||||
use std::fs;
|
||||
use std::io;
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use core::core::pmmr::{self, Backend};
|
||||
use core::ser::{self, PMMRable, Readable, Writeable, Reader, Writer};
|
||||
use core::ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
|
||||
use core::core::hash::Hash;
|
||||
use util::LOGGER;
|
||||
use types::{AppendOnlyFile, RemoveLog, read_ordered_vec, write_vec};
|
||||
use types::{read_ordered_vec, write_vec, AppendOnlyFile, RemoveLog};
|
||||
|
||||
const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin";
|
||||
const PMMR_DATA_FILE: &'static str = "pmmr_data.bin";
|
||||
|
@ -31,7 +31,8 @@ const PMMR_PRUNED_FILE: &'static str = "pmmr_pruned.bin";
|
|||
/// Maximum number of nodes in the remove log before it gets flushed
|
||||
pub const RM_LOG_MAX_NODES: usize = 10000;
|
||||
|
||||
/// Metadata for the PMMR backend's AppendOnlyFile, which can be serialized and stored
|
||||
/// Metadata for the PMMR backend's AppendOnlyFile, which can be serialized and
|
||||
/// stored
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)]
|
||||
pub struct PMMRFileMetadata {
|
||||
/// last written index of the hash file
|
||||
|
@ -124,8 +125,7 @@ where
|
|||
Err(e) => {
|
||||
error!(
|
||||
LOGGER,
|
||||
"Corrupted storage, could not read an entry from hash store: {:?}",
|
||||
e
|
||||
"Corrupted storage, could not read an entry from hash store: {:?}", e
|
||||
);
|
||||
return None;
|
||||
}
|
||||
|
@ -152,8 +152,8 @@ where
|
|||
}
|
||||
|
||||
// Optionally read flatfile storage to get data element
|
||||
let flatfile_pos = pmmr::n_leaves(position)
|
||||
- 1 - self.pruned_nodes.get_leaf_shift(position).unwrap();
|
||||
let flatfile_pos =
|
||||
pmmr::n_leaves(position) - 1 - self.pruned_nodes.get_leaf_shift(position).unwrap();
|
||||
let record_len = T::len();
|
||||
let file_offset = flatfile_pos as usize * T::len();
|
||||
let data = self.data_file.read(file_offset, record_len);
|
||||
|
@ -196,9 +196,9 @@ where
|
|||
|
||||
/// Remove Hashes by insertion position
|
||||
fn remove(&mut self, positions: Vec<u64>, index: u32) -> Result<(), String> {
|
||||
self.rm_log.append(positions, index).map_err(|e| {
|
||||
format!("Could not write to log storage, disk full? {:?}", e)
|
||||
})
|
||||
self.rm_log
|
||||
.append(positions, index)
|
||||
.map_err(|e| format!("Could not write to log storage, disk full? {:?}", e))
|
||||
}
|
||||
|
||||
/// Return data file path
|
||||
|
@ -216,12 +216,14 @@ where
|
|||
pub fn new(data_dir: String, file_md: Option<PMMRFileMetadata>) -> io::Result<PMMRBackend<T>> {
|
||||
let (hash_to_pos, data_to_pos) = match file_md {
|
||||
Some(m) => (m.last_hash_file_pos, m.last_data_file_pos),
|
||||
None => (0,0)
|
||||
None => (0, 0),
|
||||
};
|
||||
let hash_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE), hash_to_pos)?;
|
||||
let hash_file =
|
||||
AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_HASH_FILE), hash_to_pos)?;
|
||||
let rm_log = RemoveLog::open(format!("{}/{}", data_dir, PMMR_RM_LOG_FILE))?;
|
||||
let prune_list = read_ordered_vec(format!("{}/{}", data_dir, PMMR_PRUNED_FILE), 8)?;
|
||||
let data_file = AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE), data_to_pos)?;
|
||||
let data_file =
|
||||
AppendOnlyFile::open(format!("{}/{}", data_dir, PMMR_DATA_FILE), data_to_pos)?;
|
||||
|
||||
Ok(PMMRBackend {
|
||||
data_dir: data_dir,
|
||||
|
@ -262,15 +264,15 @@ where
|
|||
pub fn sync(&mut self) -> io::Result<()> {
|
||||
if let Err(e) = self.hash_file.flush() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Interrupted,
|
||||
format!("Could not write to log hash storage, disk full? {:?}", e),
|
||||
));
|
||||
io::ErrorKind::Interrupted,
|
||||
format!("Could not write to log hash storage, disk full? {:?}", e),
|
||||
));
|
||||
}
|
||||
if let Err(e) = self.data_file.flush() {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Interrupted,
|
||||
format!("Could not write to log data storage, disk full? {:?}", e),
|
||||
));
|
||||
io::ErrorKind::Interrupted,
|
||||
format!("Could not write to log data storage, disk full? {:?}", e),
|
||||
));
|
||||
}
|
||||
self.rm_log.flush()?;
|
||||
Ok(())
|
||||
|
@ -292,7 +294,7 @@ where
|
|||
pub fn last_file_positions(&self) -> PMMRFileMetadata {
|
||||
PMMRFileMetadata {
|
||||
last_hash_file_pos: self.hash_file.last_buffer_pos() as u64,
|
||||
last_data_file_pos: self.data_file.last_buffer_pos() as u64
|
||||
last_data_file_pos: self.data_file.last_buffer_pos() as u64,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -338,13 +340,11 @@ where
|
|||
// remove list
|
||||
let tmp_prune_file_hash = format!("{}/{}.hashprune", self.data_dir, PMMR_HASH_FILE);
|
||||
let record_len = 32;
|
||||
let to_rm = filter_map_vec!(self.rm_log.removed, |&(pos, idx)| {
|
||||
if idx < cutoff_index {
|
||||
let shift = self.pruned_nodes.get_shift(pos);
|
||||
Some((pos - 1 - shift.unwrap()) * record_len)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
let to_rm = filter_map_vec!(self.rm_log.removed, |&(pos, idx)| if idx < cutoff_index {
|
||||
let shift = self.pruned_nodes.get_shift(pos);
|
||||
Some((pos - 1 - shift.unwrap()) * record_len)
|
||||
} else {
|
||||
None
|
||||
});
|
||||
self.hash_file
|
||||
.save_prune(tmp_prune_file_hash.clone(), to_rm, record_len)?;
|
||||
|
@ -390,7 +390,8 @@ where
|
|||
self.data_file = AppendOnlyFile::open(format!("{}/{}", self.data_dir, PMMR_DATA_FILE), 0)?;
|
||||
|
||||
// 6. truncate the rm log
|
||||
self.rm_log.removed = self.rm_log.removed
|
||||
self.rm_log.removed = self.rm_log
|
||||
.removed
|
||||
.iter()
|
||||
.filter(|&&(_, idx)| idx >= cutoff_index)
|
||||
.map(|x| *x)
|
||||
|
|
|
@ -42,7 +42,7 @@ pub struct AppendOnlyFile {
|
|||
mmap: Option<memmap::Mmap>,
|
||||
buffer_start: usize,
|
||||
buffer: Vec<u8>,
|
||||
buffer_start_bak: usize
|
||||
buffer_start_bak: usize,
|
||||
}
|
||||
|
||||
impl AppendOnlyFile {
|
||||
|
@ -128,7 +128,7 @@ impl AppendOnlyFile {
|
|||
pub fn read(&self, offset: usize, length: usize) -> Vec<u8> {
|
||||
if offset >= self.buffer_start {
|
||||
let offset = offset - self.buffer_start;
|
||||
return self.buffer[offset..(offset+length)].to_vec();
|
||||
return self.buffer[offset..(offset + length)].to_vec();
|
||||
}
|
||||
if let None = self.mmap {
|
||||
return vec![];
|
||||
|
@ -150,7 +150,12 @@ impl AppendOnlyFile {
|
|||
|
||||
/// Saves a copy of the current file content, skipping data at the provided
|
||||
/// prune indices. The prune Vec must be ordered.
|
||||
pub fn save_prune(&self, target: String, prune_offs: Vec<u64>, prune_len: u64) -> io::Result<()> {
|
||||
pub fn save_prune(
|
||||
&self,
|
||||
target: String,
|
||||
prune_offs: Vec<u64>,
|
||||
prune_len: u64,
|
||||
) -> io::Result<()> {
|
||||
let mut reader = File::open(self.path.clone())?;
|
||||
let mut writer = File::create(target)?;
|
||||
|
||||
|
@ -303,8 +308,8 @@ impl RemoveLog {
|
|||
}
|
||||
}
|
||||
}
|
||||
let pos = match complete_list.binary_search(&(elmt,0)){
|
||||
Ok(idx) => idx+1,
|
||||
let pos = match complete_list.binary_search(&(elmt, 0)) {
|
||||
Ok(idx) => idx + 1,
|
||||
Err(idx) => idx,
|
||||
};
|
||||
complete_list.split_at(pos).0.len()
|
||||
|
|
|
@ -20,7 +20,7 @@ extern crate time;
|
|||
use std::fs;
|
||||
|
||||
use core::ser::*;
|
||||
use core::core::pmmr::{PMMR, Backend};
|
||||
use core::core::pmmr::{Backend, PMMR};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
|
||||
#[test]
|
||||
|
@ -38,21 +38,15 @@ fn pmmr_append() {
|
|||
|
||||
// check the resulting backend store and the computation of the root
|
||||
let node_hash = elems[0].hash();
|
||||
assert_eq!(
|
||||
backend.get(1, false).expect("").0,
|
||||
node_hash
|
||||
);
|
||||
assert_eq!(backend.get(1, false).expect("").0, node_hash);
|
||||
|
||||
let sum2 = elems[0].hash() + elems[1].hash();
|
||||
let sum4 = sum2
|
||||
+ (elems[2].hash() + elems[3].hash());
|
||||
let sum8 = sum4
|
||||
+ ((elems[4].hash() + elems[5].hash())
|
||||
+ (elems[6].hash() + elems[7].hash()));
|
||||
let sum4 = sum2 + (elems[2].hash() + elems[3].hash());
|
||||
let sum8 = sum4 + ((elems[4].hash() + elems[5].hash()) + (elems[6].hash() + elems[7].hash()));
|
||||
let sum9 = sum8 + elems[8].hash();
|
||||
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
assert_eq!(pmmr.root(), sum9);
|
||||
}
|
||||
|
||||
|
@ -71,13 +65,13 @@ fn pmmr_prune_compact() {
|
|||
// save the root
|
||||
let root: Hash;
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
root = pmmr.root();
|
||||
}
|
||||
|
||||
// pruning some choice nodes
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
pmmr.prune(4, 1).unwrap();
|
||||
pmmr.prune(5, 1).unwrap();
|
||||
|
@ -86,10 +80,13 @@ fn pmmr_prune_compact() {
|
|||
|
||||
// check the root and stored data
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
assert_eq!(root, pmmr.root());
|
||||
// check we can still retrieve same element from leaf index 2
|
||||
assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 2]));
|
||||
assert_eq!(
|
||||
pmmr.get(2, true).unwrap().1.unwrap(),
|
||||
TestElem([0, 0, 0, 2])
|
||||
);
|
||||
}
|
||||
|
||||
// compact
|
||||
|
@ -97,10 +94,16 @@ fn pmmr_prune_compact() {
|
|||
|
||||
// recheck the root and stored data
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
assert_eq!(root, pmmr.root());
|
||||
assert_eq!(pmmr.get(2, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 2]));
|
||||
assert_eq!(pmmr.get(11, true).unwrap().1.unwrap(), TestElem([0, 0, 0, 7]));
|
||||
assert_eq!(
|
||||
pmmr.get(2, true).unwrap().1.unwrap(),
|
||||
TestElem([0, 0, 0, 2])
|
||||
);
|
||||
assert_eq!(
|
||||
pmmr.get(11, true).unwrap().1.unwrap(),
|
||||
TestElem([0, 0, 0, 7])
|
||||
);
|
||||
}
|
||||
|
||||
teardown(data_dir);
|
||||
|
@ -120,7 +123,7 @@ fn pmmr_reload() {
|
|||
|
||||
// save the root and prune some nodes so we have prune data
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.dump(false);
|
||||
root = pmmr.root();
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
|
@ -134,7 +137,7 @@ fn pmmr_reload() {
|
|||
|
||||
// prune some more to get rm log data
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(5, 1).unwrap();
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
|
@ -143,11 +146,11 @@ fn pmmr_reload() {
|
|||
|
||||
// create a new backend and check everything is kosher
|
||||
{
|
||||
let mut backend:store::pmmr::PMMRBackend<TestElem> =
|
||||
let mut backend: store::pmmr::PMMRBackend<TestElem> =
|
||||
store::pmmr::PMMRBackend::new(data_dir.to_string(), None).unwrap();
|
||||
assert_eq!(backend.unpruned_size().unwrap(), mmr_size);
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
assert_eq!(root, pmmr.root());
|
||||
}
|
||||
assert_eq!(backend.get(5, false), None);
|
||||
|
@ -166,7 +169,7 @@ fn pmmr_rewind() {
|
|||
backend.sync().unwrap();
|
||||
let root1: Hash;
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
root1 = pmmr.root();
|
||||
}
|
||||
|
||||
|
@ -174,7 +177,7 @@ fn pmmr_rewind() {
|
|||
backend.sync().unwrap();
|
||||
let root2: Hash;
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
root2 = pmmr.root();
|
||||
}
|
||||
|
||||
|
@ -183,7 +186,7 @@ fn pmmr_rewind() {
|
|||
|
||||
// prune and compact the 2 first elements to spice things up
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
pmmr.prune(2, 1).unwrap();
|
||||
}
|
||||
|
@ -192,24 +195,24 @@ fn pmmr_rewind() {
|
|||
|
||||
// rewind and check the roots still match
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.rewind(9, 3).unwrap();
|
||||
assert_eq!(pmmr.root(), root2);
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
|
||||
assert_eq!(pmmr.root(), root2);
|
||||
}
|
||||
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, 10);
|
||||
pmmr.rewind(5, 3).unwrap();
|
||||
assert_eq!(pmmr.root(), root1);
|
||||
}
|
||||
backend.sync().unwrap();
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, 7);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, 7);
|
||||
assert_eq!(pmmr.root(), root1);
|
||||
}
|
||||
|
||||
|
@ -229,13 +232,13 @@ fn pmmr_compact_horizon() {
|
|||
|
||||
// save the root
|
||||
{
|
||||
let pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
root = pmmr.root();
|
||||
}
|
||||
|
||||
// pruning some choice nodes with an increasing block height
|
||||
{
|
||||
let mut pmmr:PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
let mut pmmr: PMMR<TestElem, _> = PMMR::at(&mut backend, mmr_size);
|
||||
pmmr.prune(1, 1).unwrap();
|
||||
pmmr.prune(2, 2).unwrap();
|
||||
pmmr.prune(4, 3).unwrap();
|
||||
|
@ -249,12 +252,13 @@ fn pmmr_compact_horizon() {
|
|||
// recheck stored data
|
||||
{
|
||||
// recreate backend
|
||||
let mut backend = store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
||||
let mut backend =
|
||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
||||
// 9 elements total, minus 2 compacted
|
||||
assert_eq!(backend.data_size().unwrap(), 7);
|
||||
// 15 nodes total, 2 pruned and compacted
|
||||
assert_eq!(backend.hash_size().unwrap(), 13);
|
||||
|
||||
|
||||
// compact some more
|
||||
backend.check_compact(1, 5).unwrap();
|
||||
}
|
||||
|
@ -262,7 +266,8 @@ fn pmmr_compact_horizon() {
|
|||
// recheck stored data
|
||||
{
|
||||
// recreate backend
|
||||
let backend = store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
||||
let backend =
|
||||
store::pmmr::PMMRBackend::<TestElem>::new(data_dir.to_string(), None).unwrap();
|
||||
// 9 elements total, minus 4 compacted
|
||||
assert_eq!(backend.data_size().unwrap(), 5);
|
||||
// 15 nodes total, 6 pruned and compacted
|
||||
|
@ -323,13 +328,11 @@ impl Writeable for TestElem {
|
|||
}
|
||||
impl Readable for TestElem {
|
||||
fn read(reader: &mut Reader) -> Result<TestElem, Error> {
|
||||
Ok(TestElem (
|
||||
[
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
]
|
||||
))
|
||||
Ok(TestElem([
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
reader.read_u32()?,
|
||||
]))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,12 +21,12 @@
|
|||
#![deny(unused_mut)]
|
||||
#![warn(missing_docs)]
|
||||
|
||||
extern crate byteorder;
|
||||
extern crate rand;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate slog_async;
|
||||
extern crate slog_term;
|
||||
extern crate byteorder;
|
||||
extern crate rand;
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
@ -50,7 +50,7 @@ pub mod secp_static;
|
|||
pub use secp_static::static_secp_instance;
|
||||
|
||||
pub mod types;
|
||||
pub use types::{LoggingConfig, LogLevel};
|
||||
pub use types::{LogLevel, LoggingConfig};
|
||||
|
||||
// other utils
|
||||
use std::cell::{Ref, RefCell};
|
||||
|
|
|
@ -16,7 +16,7 @@
|
|||
//! initialisation overhead
|
||||
|
||||
use std::sync::{Arc, Mutex};
|
||||
use rand::{thread_rng};
|
||||
use rand::thread_rng;
|
||||
use secp_ as secp;
|
||||
|
||||
lazy_static! {
|
||||
|
@ -27,8 +27,8 @@ lazy_static! {
|
|||
|
||||
/// Returns the static instance, but calls randomize on it as well
|
||||
/// (Recommended to avoid side channel attacks
|
||||
pub fn static_secp_instance()-> Arc<Mutex<secp::Secp256k1>>{
|
||||
let mut secp_inst=SECP256K1.lock().unwrap();
|
||||
pub fn static_secp_instance() -> Arc<Mutex<secp::Secp256k1>> {
|
||||
let mut secp_inst = SECP256K1.lock().unwrap();
|
||||
secp_inst.randomize(&mut thread_rng());
|
||||
SECP256K1.clone()
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ use std::fs::{self, File};
|
|||
use walkdir::WalkDir;
|
||||
|
||||
use zip_rs;
|
||||
use zip_rs::result::{ZipResult, ZipError};
|
||||
use zip_rs::result::{ZipError, ZipResult};
|
||||
use zip_rs::write::FileOptions;
|
||||
|
||||
/// Compress a source directory recursively into a zip file using the
|
||||
|
@ -29,8 +29,10 @@ use zip_rs::write::FileOptions;
|
|||
/// unwanted execution bits.
|
||||
pub fn compress(src_dir: &Path, dst_file: &File) -> ZipResult<()> {
|
||||
if !Path::new(src_dir).is_dir() {
|
||||
return Err(ZipError::Io(
|
||||
io::Error::new(io::ErrorKind::Other, "Source must be a directory.")));
|
||||
return Err(ZipError::Io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Source must be a directory.",
|
||||
)));
|
||||
}
|
||||
|
||||
let options = FileOptions::default()
|
||||
|
@ -61,7 +63,10 @@ pub fn compress(src_dir: &Path, dst_file: &File) -> ZipResult<()> {
|
|||
}
|
||||
|
||||
/// Decompress a source file into the provided destination path.
|
||||
pub fn decompress<R>(src_file: R, dest: &Path) -> ZipResult<()> where R: io::Read + io::Seek {
|
||||
pub fn decompress<R>(src_file: R, dest: &Path) -> ZipResult<()>
|
||||
where
|
||||
R: io::Read + io::Seek,
|
||||
{
|
||||
let mut archive = zip_rs::ZipArchive::new(src_file)?;
|
||||
|
||||
for i in 0..archive.len() {
|
||||
|
@ -85,10 +90,12 @@ pub fn decompress<R>(src_file: R, dest: &Path) -> ZipResult<()> where R: io::Rea
|
|||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
if let Some(mode) = file.unix_mode() {
|
||||
fs::set_permissions(&file_path.to_str().unwrap(), PermissionsExt::from_mode(mode))?;
|
||||
fs::set_permissions(
|
||||
&file_path.to_str().unwrap(),
|
||||
PermissionsExt::from_mode(mode),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_util as util;
|
||||
|
||||
use std::fs::{self, File};
|
||||
use std::path::Path;
|
||||
|
@ -30,12 +30,12 @@ fn zip_unzip() {
|
|||
let zip_file = File::create(zip_name).unwrap();
|
||||
zip::compress(&root.join("./to_zip"), &zip_file).unwrap();
|
||||
zip_file.sync_all();
|
||||
|
||||
|
||||
let zip_path = Path::new(zip_name);
|
||||
assert!(zip_path.exists());
|
||||
assert!(zip_path.is_file());
|
||||
assert!(zip_path.metadata().unwrap().len() > 300);
|
||||
|
||||
|
||||
fs::create_dir_all(root.join("./dezipped")).unwrap();
|
||||
let zip_file = File::open(zip_name).unwrap();
|
||||
zip::decompress(zip_file, &root.join("./dezipped")).unwrap();
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::collections::HashMap;
|
||||
use failure::{ResultExt};
|
||||
use failure::ResultExt;
|
||||
|
||||
use api;
|
||||
use types::*;
|
||||
|
@ -26,7 +26,6 @@ use util::secp::pedersen;
|
|||
use util;
|
||||
use util::LOGGER;
|
||||
|
||||
|
||||
// Transitions a local wallet output from Unconfirmed -> Unspent.
|
||||
fn mark_unspent_output(out: &mut OutputData) {
|
||||
match out.status {
|
||||
|
@ -53,23 +52,21 @@ pub fn refresh_outputs(config: &WalletConfig, keychain: &Keychain) -> Result<(),
|
|||
Ok(())
|
||||
}
|
||||
|
||||
// TODO - this might be slow if we have really old outputs that have never been refreshed
|
||||
// TODO - this might be slow if we have really old outputs that have never been
|
||||
// refreshed
|
||||
fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> Result<(), Error> {
|
||||
// build a local map of wallet outputs keyed by commit
|
||||
// and a list of outputs we want to query the node for
|
||||
let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new();
|
||||
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
|
||||
for out in wallet_data
|
||||
.outputs
|
||||
.values()
|
||||
.filter(|x| {
|
||||
x.root_key_id == keychain.root_key_id() &&
|
||||
x.block.is_none() &&
|
||||
x.status == OutputStatus::Unspent
|
||||
})
|
||||
{
|
||||
let commit = keychain.commit_with_key_index(out.value, out.n_child).context(ErrorKind::Keychain)?;
|
||||
wallet_outputs.insert(commit, out.key_id.clone());
|
||||
for out in wallet_data.outputs.values().filter(|x| {
|
||||
x.root_key_id == keychain.root_key_id() && x.block.is_none()
|
||||
&& x.status == OutputStatus::Unspent
|
||||
}) {
|
||||
let commit = keychain
|
||||
.commit_with_key_index(out.value, out.n_child)
|
||||
.context(ErrorKind::Keychain)?;
|
||||
wallet_outputs.insert(commit, out.key_id.clone());
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
@ -95,16 +92,11 @@ fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> R
|
|||
|
||||
let tip = get_tip_from_node(config)?;
|
||||
|
||||
let height_params = format!(
|
||||
"start_height={}&end_height={}",
|
||||
0,
|
||||
tip.height,
|
||||
);
|
||||
let height_params = format!("start_height={}&end_height={}", 0, tip.height,);
|
||||
let mut query_params = vec![height_params];
|
||||
query_params.append(&mut id_params);
|
||||
|
||||
let url =
|
||||
format!(
|
||||
let url = format!(
|
||||
"{}/v1/chain/utxos/byheight?{}",
|
||||
config.check_node_api_http_addr,
|
||||
query_params.join("&"),
|
||||
|
@ -114,17 +106,15 @@ fn refresh_missing_block_hashes(config: &WalletConfig, keychain: &Keychain) -> R
|
|||
let mut api_blocks: HashMap<pedersen::Commitment, api::BlockHeaderInfo> = HashMap::new();
|
||||
let mut api_merkle_proofs: HashMap<pedersen::Commitment, MerkleProofWrapper> = HashMap::new();
|
||||
match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) {
|
||||
Ok(blocks) => {
|
||||
for block in blocks {
|
||||
for out in block.outputs {
|
||||
api_blocks.insert(out.commit, block.header.clone());
|
||||
if let Some(merkle_proof) = out.merkle_proof {
|
||||
let wrapper = MerkleProofWrapper(merkle_proof);
|
||||
api_merkle_proofs.insert(out.commit, wrapper);
|
||||
}
|
||||
Ok(blocks) => for block in blocks {
|
||||
for out in block.outputs {
|
||||
api_blocks.insert(out.commit, block.header.clone());
|
||||
if let Some(merkle_proof) = out.merkle_proof {
|
||||
let wrapper = MerkleProofWrapper(merkle_proof);
|
||||
api_merkle_proofs.insert(out.commit, wrapper);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(e) => {
|
||||
// if we got anything other than 200 back from server, bye
|
||||
error!(LOGGER, "Refresh failed... unable to contact node: {}", e);
|
||||
|
@ -161,20 +151,18 @@ fn refresh_output_state(config: &WalletConfig, keychain: &Keychain) -> Result<()
|
|||
// build a local map of wallet outputs keyed by commit
|
||||
// and a list of outputs we want to query the node for
|
||||
let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new();
|
||||
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
|
||||
for out in wallet_data
|
||||
.outputs
|
||||
.values()
|
||||
.filter(|x| {
|
||||
x.root_key_id == keychain.root_key_id() &&
|
||||
x.status != OutputStatus::Spent
|
||||
})
|
||||
{
|
||||
let commit = keychain.commit_with_key_index(out.value, out.n_child).context(ErrorKind::Keychain)?;
|
||||
wallet_outputs.insert(commit, out.key_id.clone());
|
||||
};
|
||||
Ok(())
|
||||
});
|
||||
let _ =
|
||||
WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
|
||||
for out in wallet_data.outputs.values().filter(|x| {
|
||||
x.root_key_id == keychain.root_key_id() && x.status != OutputStatus::Spent
|
||||
}) {
|
||||
let commit = keychain
|
||||
.commit_with_key_index(out.value, out.n_child)
|
||||
.context(ErrorKind::Keychain)?;
|
||||
wallet_outputs.insert(commit, out.key_id.clone());
|
||||
}
|
||||
Ok(())
|
||||
});
|
||||
|
||||
// build the necessary query params -
|
||||
// ?id=xxx&id=yyy&id=zzz
|
||||
|
@ -211,18 +199,22 @@ fn refresh_output_state(config: &WalletConfig, keychain: &Keychain) -> Result<()
|
|||
// the corresponding api output (if it exists)
|
||||
// and refresh it in-place in the wallet.
|
||||
// Note: minimizing the time we spend holding the wallet lock.
|
||||
WalletData::with_wallet(&config.data_file_dir, |wallet_data| for commit in wallet_outputs.keys() {
|
||||
let id = wallet_outputs.get(&commit).unwrap();
|
||||
if let Entry::Occupied(mut output) = wallet_data.outputs.entry(id.to_hex()) {
|
||||
match api_utxos.get(&commit) {
|
||||
Some(_) => mark_unspent_output(&mut output.get_mut()),
|
||||
None => mark_spent_output(&mut output.get_mut()),
|
||||
};
|
||||
WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||
for commit in wallet_outputs.keys() {
|
||||
let id = wallet_outputs.get(&commit).unwrap();
|
||||
if let Entry::Occupied(mut output) = wallet_data.outputs.entry(id.to_hex()) {
|
||||
match api_utxos.get(&commit) {
|
||||
Some(_) => mark_unspent_output(&mut output.get_mut()),
|
||||
None => mark_spent_output(&mut output.get_mut()),
|
||||
};
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_tip_from_node(config: &WalletConfig) -> Result<api::Tip, Error> {
|
||||
let url = format!("{}/v1/chain", config.check_node_api_http_addr);
|
||||
api::client::get::<api::Tip>(url.as_str()).context(ErrorKind::Node).map_err(|e| e.into())
|
||||
api::client::get::<api::Tip>(url.as_str())
|
||||
.context(ErrorKind::Node)
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
|
|
@ -40,8 +40,7 @@ pub fn create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Erro
|
|||
has_error = true;
|
||||
error!(
|
||||
LOGGER,
|
||||
"Failed to get coinbase from {}. Run grin wallet listen",
|
||||
url
|
||||
"Failed to get coinbase from {}. Run grin wallet listen", url
|
||||
);
|
||||
}
|
||||
if has_error {
|
||||
|
@ -56,7 +55,8 @@ fn retry_backoff_forever<F, R>(f: F) -> Result<R, Error>
|
|||
where
|
||||
F: FnMut() -> Result<R, Error>,
|
||||
{
|
||||
let mut core = reactor::Core::new().context(ErrorKind::GenericError("Could not create reactor"))?;
|
||||
let mut core =
|
||||
reactor::Core::new().context(ErrorKind::GenericError("Could not create reactor"))?;
|
||||
let retry_strategy =
|
||||
FibonacciBackoff::from_millis(100).max_delay(time::Duration::from_secs(10));
|
||||
let retry_future = Retry::spawn(core.handle(), retry_strategy, f);
|
||||
|
@ -69,30 +69,38 @@ pub fn send_partial_tx(url: &str, partial_tx: &PartialTx) -> Result<PartialTx, E
|
|||
}
|
||||
|
||||
fn single_send_partial_tx(url: &str, partial_tx: &PartialTx) -> Result<PartialTx, Error> {
|
||||
let mut core = reactor::Core::new().context(ErrorKind::Hyper)?;
|
||||
let mut core = reactor::Core::new().context(ErrorKind::Hyper)?;
|
||||
let client = hyper::Client::new(&core.handle());
|
||||
|
||||
let mut req = Request::new(Method::Post, url.parse::<hyper::Uri>().context(ErrorKind::Hyper)?);
|
||||
let mut req = Request::new(
|
||||
Method::Post,
|
||||
url.parse::<hyper::Uri>().context(ErrorKind::Hyper)?,
|
||||
);
|
||||
req.headers_mut().set(ContentType::json());
|
||||
let json = serde_json::to_string(&partial_tx).context(ErrorKind::Hyper)?;
|
||||
req.set_body(json);
|
||||
|
||||
let work = client.request(req).and_then(|res| {
|
||||
res.body().concat2().and_then(move |body| {
|
||||
let partial_tx: PartialTx = serde_json::from_slice(&body).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
let partial_tx: PartialTx =
|
||||
serde_json::from_slice(&body).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
Ok(partial_tx)
|
||||
})
|
||||
});
|
||||
let res = core.run(work).context(ErrorKind::Hyper)?;
|
||||
Ok(res)
|
||||
let res = core.run(work).context(ErrorKind::Hyper)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Makes a single request to the wallet API to create a new coinbase output.
|
||||
fn single_create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Error> {
|
||||
let mut core = reactor::Core::new().context(ErrorKind::GenericError("Could not create reactor"))?;
|
||||
let mut core =
|
||||
reactor::Core::new().context(ErrorKind::GenericError("Could not create reactor"))?;
|
||||
let client = hyper::Client::new(&core.handle());
|
||||
|
||||
let mut req = Request::new(Method::Post, url.parse::<hyper::Uri>().context(ErrorKind::Uri)?);
|
||||
let mut req = Request::new(
|
||||
Method::Post,
|
||||
url.parse::<hyper::Uri>().context(ErrorKind::Uri)?,
|
||||
);
|
||||
req.headers_mut().set(ContentType::json());
|
||||
let json = serde_json::to_string(&block_fees).context(ErrorKind::Format)?;
|
||||
req.set_body(json);
|
||||
|
@ -105,6 +113,7 @@ fn single_create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, E
|
|||
})
|
||||
});
|
||||
|
||||
let res = core.run(work).context(ErrorKind::GenericError("Could not run core"))?;
|
||||
let res = core.run(work)
|
||||
.context(ErrorKind::GenericError("Could not run core"))?;
|
||||
Ok(res)
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ use types::*;
|
|||
use util;
|
||||
use failure::{Fail, ResultExt};
|
||||
|
||||
|
||||
pub struct CoinbaseHandler {
|
||||
pub config: WalletConfig,
|
||||
pub keychain: Keychain,
|
||||
|
@ -35,22 +34,21 @@ pub struct CoinbaseHandler {
|
|||
impl CoinbaseHandler {
|
||||
fn build_coinbase(&self, block_fees: &BlockFees) -> Result<CbData, Error> {
|
||||
let (out, kern, block_fees) = receive_coinbase(&self.config, &self.keychain, block_fees)
|
||||
.map_err(|e| {
|
||||
api::Error::Internal(format!("Error building coinbase: {:?}", e))
|
||||
}).context(ErrorKind::Node)?;
|
||||
.map_err(|e| api::Error::Internal(format!("Error building coinbase: {:?}", e)))
|
||||
.context(ErrorKind::Node)?;
|
||||
|
||||
let out_bin = ser::ser_vec(&out).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing output: {:?}", e))
|
||||
}).context(ErrorKind::Node)?;
|
||||
let out_bin = ser::ser_vec(&out)
|
||||
.map_err(|e| api::Error::Internal(format!("Error serializing output: {:?}", e)))
|
||||
.context(ErrorKind::Node)?;
|
||||
|
||||
let kern_bin = ser::ser_vec(&kern).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing kernel: {:?}", e))
|
||||
}).context(ErrorKind::Node)?;
|
||||
let kern_bin = ser::ser_vec(&kern)
|
||||
.map_err(|e| api::Error::Internal(format!("Error serializing kernel: {:?}", e)))
|
||||
.context(ErrorKind::Node)?;
|
||||
|
||||
let key_id_bin = match block_fees.key_id {
|
||||
Some(key_id) => ser::ser_vec(&key_id).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing kernel: {:?}", e))
|
||||
}).context(ErrorKind::Node)?,
|
||||
Some(key_id) => ser::ser_vec(&key_id)
|
||||
.map_err(|e| api::Error::Internal(format!("Error serializing kernel: {:?}", e)))
|
||||
.context(ErrorKind::Node)?,
|
||||
None => vec![],
|
||||
};
|
||||
|
||||
|
|
|
@ -15,14 +15,15 @@
|
|||
use checker;
|
||||
use keychain::Keychain;
|
||||
use core::core::amount_to_hr_string;
|
||||
use types::{WalletConfig, WalletData, OutputStatus, WalletInfo};
|
||||
use types::{OutputStatus, WalletConfig, WalletData, WalletInfo};
|
||||
use prettytable;
|
||||
|
||||
pub fn show_info(config: &WalletConfig, keychain: &Keychain) {
|
||||
let wallet_info = retrieve_info(config, keychain);
|
||||
println!("\n____ Wallet Summary Info at {} ({}) ____\n",
|
||||
wallet_info.current_height,
|
||||
wallet_info.data_confirmed_from);
|
||||
println!(
|
||||
"\n____ Wallet Summary Info at {} ({}) ____\n",
|
||||
wallet_info.current_height, wallet_info.data_confirmed_from
|
||||
);
|
||||
let mut table = table!(
|
||||
[bFG->"Total", FG->amount_to_hr_string(wallet_info.total)],
|
||||
[bFY->"Awaiting Confirmation", FY->amount_to_hr_string(wallet_info.amount_awaiting_confirmation)],
|
||||
|
@ -37,9 +38,9 @@ pub fn show_info(config: &WalletConfig, keychain: &Keychain) {
|
|||
|
||||
if !wallet_info.data_confirmed {
|
||||
println!(
|
||||
"\nWARNING: Failed to verify wallet contents with grin server. \
|
||||
Above info is maybe not fully updated or invalid! \
|
||||
Check that your `grin server` is OK, or see `wallet help restore`"
|
||||
"\nWARNING: Failed to verify wallet contents with grin server. \
|
||||
Above info is maybe not fully updated or invalid! \
|
||||
Check that your `grin server` is OK, or see `wallet help restore`"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -67,7 +68,7 @@ pub fn retrieve_info(config: &WalletConfig, keychain: &Keychain) -> WalletInfo {
|
|||
if out.status == OutputStatus::Unspent {
|
||||
unspent_total += out.value;
|
||||
if out.lock_height > current_height {
|
||||
unspent_but_locked_total += out.value;
|
||||
unspent_but_locked_total += out.value;
|
||||
}
|
||||
}
|
||||
if out.status == OutputStatus::Unconfirmed && !out.is_coinbase {
|
||||
|
@ -76,18 +77,18 @@ pub fn retrieve_info(config: &WalletConfig, keychain: &Keychain) -> WalletInfo {
|
|||
if out.status == OutputStatus::Locked {
|
||||
locked_total += out.value;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
let mut data_confirmed = true;
|
||||
if let Err(_) = result {
|
||||
data_confirmed = false;
|
||||
}
|
||||
Ok(WalletInfo {
|
||||
current_height : current_height,
|
||||
total: unspent_total+unconfirmed_total,
|
||||
current_height: current_height,
|
||||
total: unspent_total + unconfirmed_total,
|
||||
amount_awaiting_confirmation: unconfirmed_total,
|
||||
amount_confirmed_but_locked: unspent_but_locked_total,
|
||||
amount_currently_spendable: unspent_total-unspent_but_locked_total,
|
||||
amount_currently_spendable: unspent_total - unspent_but_locked_total,
|
||||
amount_locked: locked_total,
|
||||
data_confirmed: data_confirmed,
|
||||
data_confirmed_from: String::from(from),
|
||||
|
|
|
@ -16,17 +16,17 @@
|
|||
|
||||
extern crate blake2_rfc as blake2;
|
||||
extern crate byteorder;
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
extern crate rand;
|
||||
extern crate serde;
|
||||
extern crate uuid;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
#[macro_use]
|
||||
extern crate prettytable;
|
||||
extern crate term;
|
||||
extern crate uuid;
|
||||
|
||||
extern crate bodyparser;
|
||||
extern crate failure;
|
||||
|
@ -57,8 +57,9 @@ pub mod client;
|
|||
pub mod server;
|
||||
|
||||
pub use outputs::show_outputs;
|
||||
pub use info::{show_info, retrieve_info};
|
||||
pub use receiver::{WalletReceiver};
|
||||
pub use info::{retrieve_info, show_info};
|
||||
pub use receiver::WalletReceiver;
|
||||
pub use sender::{issue_burn_tx, issue_send_tx};
|
||||
pub use types::{BlockFees, CbData, Error, ErrorKind, WalletConfig, WalletReceiveRequest, WalletInfo, WalletSeed};
|
||||
pub use types::{BlockFees, CbData, Error, ErrorKind, WalletConfig, WalletInfo,
|
||||
WalletReceiveRequest, WalletSeed};
|
||||
pub use restore::restore;
|
||||
|
|
|
@ -15,19 +15,19 @@
|
|||
use checker;
|
||||
use keychain::Keychain;
|
||||
use core::core;
|
||||
use types::{WalletConfig, WalletData, OutputStatus};
|
||||
use types::{OutputStatus, WalletConfig, WalletData};
|
||||
use prettytable;
|
||||
use term;
|
||||
use std::io::prelude::*;
|
||||
|
||||
pub fn show_outputs(config: &WalletConfig, keychain: &Keychain, show_spent:bool) {
|
||||
pub fn show_outputs(config: &WalletConfig, keychain: &Keychain, show_spent: bool) {
|
||||
let root_key_id = keychain.root_key_id();
|
||||
let result = checker::refresh_outputs(&config, &keychain);
|
||||
|
||||
// just read the wallet here, no need for a write lock
|
||||
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
|
||||
// get the current height via the api
|
||||
// if we cannot get the current height use the max height known to the wallet
|
||||
// if we cannot get the current height use the max height known to the wallet
|
||||
let current_height = match checker::get_tip_from_node(config) {
|
||||
Ok(tip) => tip.height,
|
||||
Err(_) => match wallet_data.outputs.values().map(|out| out.height).max() {
|
||||
|
@ -40,16 +40,17 @@ pub fn show_outputs(config: &WalletConfig, keychain: &Keychain, show_spent:bool)
|
|||
.outputs
|
||||
.values()
|
||||
.filter(|out| out.root_key_id == root_key_id)
|
||||
.filter(|out|
|
||||
.filter(|out| {
|
||||
if show_spent {
|
||||
true
|
||||
} else {
|
||||
out.status != OutputStatus::Spent
|
||||
})
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
outputs.sort_by_key(|out| out.n_child);
|
||||
|
||||
let title=format!("Wallet Outputs - Block Height: {}", current_height);
|
||||
let title = format!("Wallet Outputs - Block Height: {}", current_height);
|
||||
println!();
|
||||
let mut t = term::stdout().unwrap();
|
||||
t.fg(term::color::MAGENTA).unwrap();
|
||||
|
@ -69,13 +70,13 @@ pub fn show_outputs(config: &WalletConfig, keychain: &Keychain, show_spent:bool)
|
|||
]);
|
||||
|
||||
for out in outputs {
|
||||
let key_id=format!("{}", out.key_id);
|
||||
let height=format!("{}", out.height);
|
||||
let lock_height=format!("{}", out.lock_height);
|
||||
let status=format!("{:?}", out.status);
|
||||
let is_coinbase=format!("{}", out.is_coinbase);
|
||||
let num_confirmations=format!("{}", out.num_confirmations(current_height));
|
||||
let value=format!("{}", core::amount_to_hr_string(out.value));
|
||||
let key_id = format!("{}", out.key_id);
|
||||
let height = format!("{}", out.height);
|
||||
let lock_height = format!("{}", out.lock_height);
|
||||
let status = format!("{:?}", out.status);
|
||||
let is_coinbase = format!("{}", out.is_coinbase);
|
||||
let num_confirmations = format!("{}", out.num_confirmations(current_height));
|
||||
let value = format!("{}", core::amount_to_hr_string(out.value));
|
||||
table.add_row(row![
|
||||
bFC->key_id,
|
||||
bFB->height,
|
||||
|
|
|
@ -25,12 +25,12 @@ use uuid::Uuid;
|
|||
|
||||
use api;
|
||||
use core::consensus::reward;
|
||||
use core::core::{build, Block, Committed, Output, Transaction, TxKernel, amount_to_hr_string};
|
||||
use core::core::{amount_to_hr_string, build, Block, Committed, Output, Transaction, TxKernel};
|
||||
use core::{global, ser};
|
||||
use keychain::{Identifier, Keychain, BlindingFactor};
|
||||
use keychain::{BlindingFactor, Identifier, Keychain};
|
||||
use types::*;
|
||||
use util::{LOGGER, to_hex, secp};
|
||||
use failure::{ResultExt};
|
||||
use util::{secp, to_hex, LOGGER};
|
||||
use failure::ResultExt;
|
||||
|
||||
/// Dummy wrapper for the hex-encoded serialized transaction.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
|
@ -51,9 +51,10 @@ pub struct TxWrapper {
|
|||
fn handle_sender_initiation(
|
||||
config: &WalletConfig,
|
||||
keychain: &Keychain,
|
||||
partial_tx: &PartialTx
|
||||
partial_tx: &PartialTx,
|
||||
) -> Result<PartialTx, Error> {
|
||||
let (amount, _sender_pub_blinding, sender_pub_nonce, kernel_offset, _sig, tx) = read_partial_tx(keychain, partial_tx)?;
|
||||
let (amount, _sender_pub_blinding, sender_pub_nonce, kernel_offset, _sig, tx) =
|
||||
read_partial_tx(keychain, partial_tx)?;
|
||||
|
||||
let root_key_id = keychain.root_key_id();
|
||||
|
||||
|
@ -68,23 +69,24 @@ fn handle_sender_initiation(
|
|||
})?;
|
||||
}
|
||||
|
||||
if fee > amount {
|
||||
if fee > amount {
|
||||
info!(
|
||||
LOGGER,
|
||||
"Rejected the transfer because transaction fee ({}) exceeds received amount ({}).",
|
||||
amount_to_hr_string(fee),
|
||||
amount_to_hr_string(amount)
|
||||
);
|
||||
return Err(ErrorKind::FeeExceedsAmount {
|
||||
sender_amount: amount,
|
||||
recipient_fee: fee,
|
||||
})?;
|
||||
}
|
||||
return Err(ErrorKind::FeeExceedsAmount {
|
||||
sender_amount: amount,
|
||||
recipient_fee: fee,
|
||||
})?;
|
||||
}
|
||||
|
||||
let out_amount = amount - fee;
|
||||
|
||||
// First step is just to get the excess sum of the outputs we're participating in
|
||||
// Output and key needs to be stored until transaction finalisation time, somehow
|
||||
// First step is just to get the excess sum of the outputs we're participating
|
||||
// in Output and key needs to be stored until transaction finalisation time,
|
||||
// somehow
|
||||
|
||||
let key_id = WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||
let (key_id, derivation) = next_available_key(&wallet_data, keychain);
|
||||
|
@ -106,29 +108,35 @@ fn handle_sender_initiation(
|
|||
})?;
|
||||
|
||||
// Still handy for getting the blinding sum
|
||||
let (_, blind_sum) = build::partial_transaction(
|
||||
vec![
|
||||
build::output(out_amount, key_id.clone()),
|
||||
],
|
||||
keychain,
|
||||
).context(ErrorKind::Keychain)?;
|
||||
let (_, blind_sum) =
|
||||
build::partial_transaction(vec![build::output(out_amount, key_id.clone())], keychain)
|
||||
.context(ErrorKind::Keychain)?;
|
||||
|
||||
warn!(LOGGER, "Creating new aggsig context");
|
||||
// Create a new aggsig context
|
||||
// this will create a new blinding sum and nonce, and store them
|
||||
let blind = blind_sum.secret_key(&keychain.secp()).context(ErrorKind::Keychain)?;
|
||||
keychain.aggsig_create_context(&partial_tx.id, blind).context(ErrorKind::Keychain)?;
|
||||
let blind = blind_sum
|
||||
.secret_key(&keychain.secp())
|
||||
.context(ErrorKind::Keychain)?;
|
||||
keychain
|
||||
.aggsig_create_context(&partial_tx.id, blind)
|
||||
.context(ErrorKind::Keychain)?;
|
||||
keychain.aggsig_add_output(&partial_tx.id, &key_id);
|
||||
|
||||
let sig_part = keychain.aggsig_calculate_partial_sig(
|
||||
&partial_tx.id,
|
||||
&sender_pub_nonce,
|
||||
fee,
|
||||
tx.lock_height(),
|
||||
).unwrap();
|
||||
let sig_part = keychain
|
||||
.aggsig_calculate_partial_sig(&partial_tx.id, &sender_pub_nonce, fee, tx.lock_height())
|
||||
.unwrap();
|
||||
|
||||
// Build the response, which should contain sR, blinding excess xR * G, public nonce kR * G
|
||||
let mut partial_tx = build_partial_tx(&partial_tx.id, keychain, amount, kernel_offset, Some(sig_part), tx);
|
||||
// Build the response, which should contain sR, blinding excess xR * G, public
|
||||
// nonce kR * G
|
||||
let mut partial_tx = build_partial_tx(
|
||||
&partial_tx.id,
|
||||
keychain,
|
||||
amount,
|
||||
kernel_offset,
|
||||
Some(sig_part),
|
||||
tx,
|
||||
);
|
||||
partial_tx.phase = PartialTxPhase::ReceiverInitiation;
|
||||
|
||||
Ok(partial_tx)
|
||||
|
@ -149,16 +157,18 @@ fn handle_sender_initiation(
|
|||
fn handle_sender_confirmation(
|
||||
config: &WalletConfig,
|
||||
keychain: &Keychain,
|
||||
partial_tx: &PartialTx
|
||||
partial_tx: &PartialTx,
|
||||
) -> Result<PartialTx, Error> {
|
||||
let (amount, sender_pub_blinding, sender_pub_nonce, kernel_offset, sender_sig_part, tx) = read_partial_tx(keychain, partial_tx)?;
|
||||
let (amount, sender_pub_blinding, sender_pub_nonce, kernel_offset, sender_sig_part, tx) =
|
||||
read_partial_tx(keychain, partial_tx)?;
|
||||
let sender_sig_part = sender_sig_part.unwrap();
|
||||
let res = keychain.aggsig_verify_partial_sig(
|
||||
&partial_tx.id,
|
||||
&sender_sig_part,
|
||||
&sender_pub_nonce,
|
||||
&sender_pub_blinding,
|
||||
tx.fee(), tx.lock_height(),
|
||||
tx.fee(),
|
||||
tx.lock_height(),
|
||||
);
|
||||
|
||||
if !res {
|
||||
|
@ -167,26 +177,29 @@ fn handle_sender_confirmation(
|
|||
}
|
||||
|
||||
// Just calculate our sig part again instead of storing
|
||||
let our_sig_part = keychain.aggsig_calculate_partial_sig(
|
||||
&partial_tx.id,
|
||||
&sender_pub_nonce,
|
||||
tx.fee(),
|
||||
tx.lock_height(),
|
||||
).unwrap();
|
||||
let our_sig_part = keychain
|
||||
.aggsig_calculate_partial_sig(
|
||||
&partial_tx.id,
|
||||
&sender_pub_nonce,
|
||||
tx.fee(),
|
||||
tx.lock_height(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// And the final signature
|
||||
let final_sig = keychain.aggsig_calculate_final_sig(
|
||||
&partial_tx.id,
|
||||
&sender_sig_part,
|
||||
&our_sig_part,
|
||||
&sender_pub_nonce,
|
||||
).unwrap();
|
||||
let final_sig = keychain
|
||||
.aggsig_calculate_final_sig(
|
||||
&partial_tx.id,
|
||||
&sender_sig_part,
|
||||
&our_sig_part,
|
||||
&sender_pub_nonce,
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Calculate the final public key (for our own sanity check)
|
||||
let final_pubkey = keychain.aggsig_calculate_final_pubkey(
|
||||
&partial_tx.id,
|
||||
&sender_pub_blinding,
|
||||
).unwrap();
|
||||
let final_pubkey = keychain
|
||||
.aggsig_calculate_final_pubkey(&partial_tx.id, &sender_pub_blinding)
|
||||
.unwrap();
|
||||
|
||||
// Check our final sig verifies
|
||||
let res = keychain.aggsig_verify_final_sig_build_msg(
|
||||
|
@ -214,12 +227,18 @@ fn handle_sender_confirmation(
|
|||
let tx_hex = to_hex(ser::ser_vec(&final_tx).unwrap());
|
||||
|
||||
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
|
||||
api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })
|
||||
.context(ErrorKind::Node)?;
|
||||
api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).context(ErrorKind::Node)?;
|
||||
|
||||
// Return what we've actually posted
|
||||
// TODO - why build_partial_tx here? Just a naming issue?
|
||||
let mut partial_tx = build_partial_tx(&partial_tx.id, keychain, amount, kernel_offset, Some(final_sig), tx);
|
||||
let mut partial_tx = build_partial_tx(
|
||||
&partial_tx.id,
|
||||
keychain,
|
||||
amount,
|
||||
kernel_offset,
|
||||
Some(final_sig),
|
||||
tx,
|
||||
);
|
||||
partial_tx.phase = PartialTxPhase::ReceiverConfirmation;
|
||||
Ok(partial_tx)
|
||||
}
|
||||
|
@ -239,28 +258,38 @@ impl Handler for WalletReceiver {
|
|||
if let Ok(Some(partial_tx)) = struct_body {
|
||||
match partial_tx.phase {
|
||||
PartialTxPhase::SenderInitiation => {
|
||||
let resp_tx = handle_sender_initiation(&self.config, &self.keychain, &partial_tx)
|
||||
.map_err(|e| {
|
||||
let resp_tx = handle_sender_initiation(
|
||||
&self.config,
|
||||
&self.keychain,
|
||||
&partial_tx,
|
||||
).map_err(|e| {
|
||||
error!(LOGGER, "Phase 1 Sender Initiation -> Problematic partial tx, looks like this: {:?}", partial_tx);
|
||||
api::Error::Internal(
|
||||
format!("Error processing partial transaction: {:?}", e),
|
||||
)})
|
||||
.unwrap();
|
||||
api::Error::Internal(format!(
|
||||
"Error processing partial transaction: {:?}",
|
||||
e
|
||||
))
|
||||
})
|
||||
.unwrap();
|
||||
let json = serde_json::to_string(&resp_tx).unwrap();
|
||||
Ok(Response::with((status::Ok, json)))
|
||||
},
|
||||
}
|
||||
PartialTxPhase::SenderConfirmation => {
|
||||
let resp_tx = handle_sender_confirmation(&self.config, &self.keychain, &partial_tx)
|
||||
.map_err(|e| {
|
||||
let resp_tx = handle_sender_confirmation(
|
||||
&self.config,
|
||||
&self.keychain,
|
||||
&partial_tx,
|
||||
).map_err(|e| {
|
||||
error!(LOGGER, "Phase 3 Sender Confirmation -> Problematic partial tx, looks like this: {:?}", partial_tx);
|
||||
api::Error::Internal(
|
||||
format!("Error processing partial transaction: {:?}", e),
|
||||
)})
|
||||
.unwrap();
|
||||
api::Error::Internal(format!(
|
||||
"Error processing partial transaction: {:?}",
|
||||
e
|
||||
))
|
||||
})
|
||||
.unwrap();
|
||||
let json = serde_json::to_string(&resp_tx).unwrap();
|
||||
Ok(Response::with((status::Ok, json)))
|
||||
},
|
||||
_=> {
|
||||
}
|
||||
_ => {
|
||||
error!(LOGGER, "Unhandled Phase: {:?}", partial_tx);
|
||||
Ok(Response::with((status::BadRequest, "Unhandled Phase")))
|
||||
}
|
||||
|
@ -271,10 +300,7 @@ impl Handler for WalletReceiver {
|
|||
}
|
||||
}
|
||||
|
||||
fn retrieve_existing_key(
|
||||
wallet_data: &WalletData,
|
||||
key_id: Identifier,
|
||||
) -> (Identifier, u32) {
|
||||
fn retrieve_existing_key(wallet_data: &WalletData, key_id: Identifier) -> (Identifier, u32) {
|
||||
if let Some(existing) = wallet_data.get_output(&key_id) {
|
||||
let key_id = existing.key_id.clone();
|
||||
let derivation = existing.n_child;
|
||||
|
@ -284,10 +310,7 @@ fn retrieve_existing_key(
|
|||
}
|
||||
}
|
||||
|
||||
fn next_available_key(
|
||||
wallet_data: &WalletData,
|
||||
keychain: &Keychain,
|
||||
) -> (Identifier, u32) {
|
||||
fn next_available_key(wallet_data: &WalletData, keychain: &Keychain) -> (Identifier, u32) {
|
||||
let root_key_id = keychain.root_key_id();
|
||||
let derivation = wallet_data.next_child(root_key_id.clone());
|
||||
let key_id = keychain.derive_key_id(derivation).unwrap();
|
||||
|
@ -342,12 +365,8 @@ pub fn receive_coinbase(
|
|||
|
||||
debug!(LOGGER, "receive_coinbase: {:?}", block_fees);
|
||||
|
||||
let (out, kern) = Block::reward_output(
|
||||
&keychain,
|
||||
&key_id,
|
||||
block_fees.fees,
|
||||
block_fees.height,
|
||||
).context(ErrorKind::Keychain)?;
|
||||
let (out, kern) = Block::reward_output(&keychain, &key_id, block_fees.fees, block_fees.height)
|
||||
.context(ErrorKind::Keychain)?;
|
||||
Ok((out, kern, block_fees))
|
||||
}
|
||||
|
||||
|
@ -381,11 +400,11 @@ fn build_final_transaction(
|
|||
amount_to_hr_string(fee),
|
||||
amount_to_hr_string(amount)
|
||||
);
|
||||
return Err(ErrorKind::FeeExceedsAmount {
|
||||
sender_amount: amount,
|
||||
recipient_fee: fee,
|
||||
})?;
|
||||
}
|
||||
return Err(ErrorKind::FeeExceedsAmount {
|
||||
sender_amount: amount,
|
||||
recipient_fee: fee,
|
||||
})?;
|
||||
}
|
||||
|
||||
let out_amount = amount - fee;
|
||||
|
||||
|
@ -430,8 +449,14 @@ fn build_final_transaction(
|
|||
let tx_excess = final_tx.sum_commitments().context(ErrorKind::Transaction)?;
|
||||
|
||||
// subtract the kernel_excess (built from kernel_offset)
|
||||
let offset_excess = keychain.secp().commit(0, kernel_offset.secret_key(&keychain.secp()).unwrap()).unwrap();
|
||||
keychain.secp().commit_sum(vec![tx_excess], vec![offset_excess]).context(ErrorKind::Transaction)?
|
||||
let offset_excess = keychain
|
||||
.secp()
|
||||
.commit(0, kernel_offset.secret_key(&keychain.secp()).unwrap())
|
||||
.unwrap();
|
||||
keychain
|
||||
.secp()
|
||||
.commit_sum(vec![tx_excess], vec![offset_excess])
|
||||
.context(ErrorKind::Transaction)?
|
||||
};
|
||||
|
||||
// update the tx kernel to reflect the offset excess and sig
|
||||
|
@ -440,7 +465,9 @@ fn build_final_transaction(
|
|||
final_tx.kernels[0].excess_sig = excess_sig.clone();
|
||||
|
||||
// confirm the kernel verifies successfully before proceeding
|
||||
final_tx.kernels[0].verify().context(ErrorKind::Transaction)?;
|
||||
final_tx.kernels[0]
|
||||
.verify()
|
||||
.context(ErrorKind::Transaction)?;
|
||||
|
||||
// confirm the overall transaction is valid (including the updated kernel)
|
||||
let _ = final_tx.validate().context(ErrorKind::Transaction)?;
|
||||
|
|
|
@ -11,18 +11,17 @@
|
|||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
use failure::{ResultExt, Fail};
|
||||
use keychain::{Keychain, Identifier};
|
||||
use util::{LOGGER, to_hex};
|
||||
use failure::{Fail, ResultExt};
|
||||
use keychain::{Identifier, Keychain};
|
||||
use util::{to_hex, LOGGER};
|
||||
use util::secp::pedersen;
|
||||
use api;
|
||||
use core::global;
|
||||
use core::core::{Output, SwitchCommitHash};
|
||||
use core::core::transaction::OutputFeatures;
|
||||
use types::{WalletConfig, WalletData, OutputData, OutputStatus, Error, ErrorKind};
|
||||
use types::{Error, ErrorKind, OutputData, OutputStatus, WalletConfig, WalletData};
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
|
||||
|
||||
pub fn get_chain_height(config: &WalletConfig) -> Result<u64, Error> {
|
||||
let url = format!("{}/v1/chain", config.check_node_api_http_addr);
|
||||
|
||||
|
@ -46,13 +45,9 @@ fn output_with_range_proof(
|
|||
commit_id: &str,
|
||||
height: u64,
|
||||
) -> Result<api::OutputPrintable, Error> {
|
||||
let url =
|
||||
format!(
|
||||
let url = format!(
|
||||
"{}/v1/chain/utxos/byheight?start_height={}&end_height={}&id={}&include_rp",
|
||||
config.check_node_api_http_addr,
|
||||
height,
|
||||
height,
|
||||
commit_id,
|
||||
config.check_node_api_http_addr, height, height, commit_id,
|
||||
);
|
||||
|
||||
match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) {
|
||||
|
@ -64,7 +59,7 @@ fn output_with_range_proof(
|
|||
Err(ErrorKind::Node)?
|
||||
}
|
||||
} else {
|
||||
Err(ErrorKind::Node)?
|
||||
Err(ErrorKind::Node)?
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
|
@ -90,9 +85,15 @@ fn retrieve_amount_and_coinbase_status(
|
|||
api::OutputType::Coinbase => OutputFeatures::COINBASE_OUTPUT,
|
||||
api::OutputType::Transaction => OutputFeatures::DEFAULT_OUTPUT,
|
||||
},
|
||||
proof: output.range_proof().context(ErrorKind::GenericError("range proof error"))?,
|
||||
switch_commit_hash: output.switch_commit_hash().context(ErrorKind::GenericError("switch commit hash error"))?,
|
||||
commit: output.commit().context(ErrorKind::GenericError("commit error"))?,
|
||||
proof: output
|
||||
.range_proof()
|
||||
.context(ErrorKind::GenericError("range proof error"))?,
|
||||
switch_commit_hash: output
|
||||
.switch_commit_hash()
|
||||
.context(ErrorKind::GenericError("switch commit hash error"))?,
|
||||
commit: output
|
||||
.commit()
|
||||
.context(ErrorKind::GenericError("commit error"))?,
|
||||
};
|
||||
|
||||
if let Some(amount) = core_output.recover_value(keychain, &key_id) {
|
||||
|
@ -113,11 +114,9 @@ pub fn utxos_batch_block(
|
|||
) -> Result<Vec<api::BlockOutputs>, Error> {
|
||||
let query_param = format!("start_height={}&end_height={}", start_height, end_height);
|
||||
|
||||
let url =
|
||||
format!(
|
||||
let url = format!(
|
||||
"{}/v1/chain/utxos/byheight?{}",
|
||||
config.check_node_api_http_addr,
|
||||
query_param,
|
||||
config.check_node_api_http_addr, query_param,
|
||||
);
|
||||
|
||||
match api::client::get::<Vec<api::BlockOutputs>>(url.as_str()) {
|
||||
|
@ -167,12 +166,7 @@ fn find_utxos_with_key(
|
|||
);
|
||||
|
||||
if x == expected_hash {
|
||||
info!(
|
||||
LOGGER,
|
||||
"Output found: {:?}, key_index: {:?}",
|
||||
output,
|
||||
i,
|
||||
);
|
||||
info!(LOGGER, "Output found: {:?}, key_index: {:?}", output, i,);
|
||||
|
||||
// add it to result set here
|
||||
let commit_id = output.commit.0;
|
||||
|
@ -219,8 +213,7 @@ fn find_utxos_with_key(
|
|||
} else {
|
||||
info!(
|
||||
LOGGER,
|
||||
"Unable to retrieve the amount (needs investigating) {:?}",
|
||||
res,
|
||||
"Unable to retrieve the amount (needs investigating) {:?}", res,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -258,15 +251,13 @@ pub fn restore(
|
|||
let chain_height = get_chain_height(config)?;
|
||||
info!(
|
||||
LOGGER,
|
||||
"Starting restore: Chain height is {}.",
|
||||
chain_height
|
||||
"Starting restore: Chain height is {}.", chain_height
|
||||
);
|
||||
|
||||
let mut switch_commit_cache: Vec<pedersen::Commitment> = vec![];
|
||||
info!(
|
||||
LOGGER,
|
||||
"Building key derivation cache ({}) ...",
|
||||
key_derivations,
|
||||
"Building key derivation cache ({}) ...", key_derivations,
|
||||
);
|
||||
for i in 0..key_derivations {
|
||||
let switch_commit = keychain.switch_commit_from_index(i as u32).unwrap();
|
||||
|
@ -318,12 +309,11 @@ pub fn restore(
|
|||
block: None,
|
||||
merkle_proof: None,
|
||||
});
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
h > 0
|
||||
}
|
||||
{}
|
||||
} {}
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
@ -18,9 +18,9 @@ use uuid::Uuid;
|
|||
use api;
|
||||
use client;
|
||||
use checker;
|
||||
use core::core::{build, Transaction, amount_to_hr_string};
|
||||
use core::core::{amount_to_hr_string, build, Transaction};
|
||||
use core::ser;
|
||||
use keychain::{BlindingFactor, BlindSum, Identifier, Keychain};
|
||||
use keychain::{BlindSum, BlindingFactor, Identifier, Keychain};
|
||||
use receiver::TxWrapper;
|
||||
use types::*;
|
||||
use util::LOGGER;
|
||||
|
@ -64,44 +64,52 @@ pub fn issue_send_tx(
|
|||
// Generate a random kernel offset here
|
||||
// and subtract it from the blind_sum so we create
|
||||
// the aggsig context with the "split" key
|
||||
let kernel_offset = BlindingFactor::from_secret_key(
|
||||
SecretKey::new(&keychain.secp(), &mut thread_rng())
|
||||
);
|
||||
let kernel_offset =
|
||||
BlindingFactor::from_secret_key(SecretKey::new(&keychain.secp(), &mut thread_rng()));
|
||||
|
||||
let blind_offset = keychain.blind_sum(
|
||||
&BlindSum::new()
|
||||
let blind_offset = keychain
|
||||
.blind_sum(&BlindSum::new()
|
||||
.add_blinding_factor(blind)
|
||||
.sub_blinding_factor(kernel_offset)
|
||||
).unwrap();
|
||||
.sub_blinding_factor(kernel_offset))
|
||||
.unwrap();
|
||||
|
||||
//
|
||||
// -Sender picks random blinding factors for all outputs it participates in, computes total blinding excess xS
|
||||
// -Sender picks random nonce kS
|
||||
// -Sender picks random blinding factors for all outputs it participates in,
|
||||
// computes total blinding excess xS -Sender picks random nonce kS
|
||||
// -Sender posts inputs, outputs, Message M=fee, xS * G and kS * G to Receiver
|
||||
//
|
||||
// Create a new aggsig context
|
||||
let tx_id = Uuid::new_v4();
|
||||
let skey = blind_offset.secret_key(&keychain.secp()).context(ErrorKind::Keychain)?;
|
||||
keychain.aggsig_create_context(&tx_id, skey).context(ErrorKind::Keychain)?;
|
||||
let skey = blind_offset
|
||||
.secret_key(&keychain.secp())
|
||||
.context(ErrorKind::Keychain)?;
|
||||
keychain
|
||||
.aggsig_create_context(&tx_id, skey)
|
||||
.context(ErrorKind::Keychain)?;
|
||||
|
||||
let partial_tx = build_partial_tx(&tx_id, keychain, amount_with_fee, kernel_offset, None, tx);
|
||||
|
||||
// Closure to acquire wallet lock and lock the coins being spent
|
||||
// so we avoid accidental double spend attempt.
|
||||
let update_wallet = || WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||
for coin in coins {
|
||||
wallet_data.lock_output(&coin);
|
||||
}
|
||||
});
|
||||
let update_wallet = || {
|
||||
WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||
for coin in coins {
|
||||
wallet_data.lock_output(&coin);
|
||||
}
|
||||
})
|
||||
};
|
||||
|
||||
// Closure to acquire wallet lock and delete the change output in case of tx failure.
|
||||
let rollback_wallet = || WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||
info!(LOGGER, "cleaning up unused change output from wallet");
|
||||
wallet_data.delete_output(&change_key);
|
||||
});
|
||||
// Closure to acquire wallet lock and delete the change output in case of tx
|
||||
// failure.
|
||||
let rollback_wallet = || {
|
||||
WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||
info!(LOGGER, "cleaning up unused change output from wallet");
|
||||
wallet_data.delete_output(&change_key);
|
||||
})
|
||||
};
|
||||
|
||||
// TODO: stdout option removed for now, as it won't work very will with this version of
|
||||
// aggsig exchange
|
||||
// TODO: stdout option removed for now, as it won't work very will with this
|
||||
// version of aggsig exchange
|
||||
|
||||
/*if dest == "stdout" {
|
||||
let json_tx = serde_json::to_string_pretty(&partial_tx).unwrap();
|
||||
|
@ -110,7 +118,10 @@ pub fn issue_send_tx(
|
|||
} else */
|
||||
|
||||
if &dest[..4] != "http" {
|
||||
panic!("dest formatted as {} but send -d expected stdout or http://IP:port", dest);
|
||||
panic!(
|
||||
"dest formatted as {} but send -d expected stdout or http://IP:port",
|
||||
dest
|
||||
);
|
||||
}
|
||||
|
||||
let url = format!("{}/v1/receive/transaction", &dest);
|
||||
|
@ -118,14 +129,19 @@ pub fn issue_send_tx(
|
|||
let res = client::send_partial_tx(&url, &partial_tx);
|
||||
if let Err(e) = res {
|
||||
match e.kind() {
|
||||
ErrorKind::FeeExceedsAmount {sender_amount, recipient_fee} =>
|
||||
error!(
|
||||
ErrorKind::FeeExceedsAmount {
|
||||
sender_amount,
|
||||
recipient_fee,
|
||||
} => error!(
|
||||
LOGGER,
|
||||
"Recipient rejected the transfer because transaction fee ({}) exceeded amount ({}).",
|
||||
amount_to_hr_string(recipient_fee),
|
||||
amount_to_hr_string(sender_amount)
|
||||
),
|
||||
_ => error!(LOGGER, "Communication with receiver failed on SenderInitiation send. Aborting transaction"),
|
||||
_ => error!(
|
||||
LOGGER,
|
||||
"Communication with receiver failed on SenderInitiation send. Aborting transaction"
|
||||
),
|
||||
}
|
||||
rollback_wallet()?;
|
||||
return Err(e);
|
||||
|
@ -133,11 +149,12 @@ pub fn issue_send_tx(
|
|||
|
||||
/* -Sender receives xR * G, kR * G, sR
|
||||
* -Sender computes Schnorr challenge e = H(M | kR * G + kS * G)
|
||||
* -Sender verifies receivers sig, by verifying that kR * G + e * xR * G = sR * G·
|
||||
* -Sender computes their part of signature, sS = kS + e * xS
|
||||
* -Sender verifies receivers sig, by verifying that kR * G + e * xR * G =
|
||||
* sR * G· -Sender computes their part of signature, sS = kS + e * xS
|
||||
* -Sender posts sS to receiver
|
||||
*/
|
||||
let (_amount, recp_pub_blinding, recp_pub_nonce, kernel_offset, sig, tx) = read_partial_tx(keychain, &res.unwrap())?;
|
||||
*/
|
||||
let (_amount, recp_pub_blinding, recp_pub_nonce, kernel_offset, sig, tx) =
|
||||
read_partial_tx(keychain, &res.unwrap())?;
|
||||
let res = keychain.aggsig_verify_partial_sig(
|
||||
&tx_id,
|
||||
&sig.unwrap(),
|
||||
|
@ -151,11 +168,21 @@ pub fn issue_send_tx(
|
|||
return Err(ErrorKind::Signature("Partial Sig from recipient invalid."))?;
|
||||
}
|
||||
|
||||
let sig_part = keychain.aggsig_calculate_partial_sig(&tx_id, &recp_pub_nonce, tx.fee(), tx.lock_height()).unwrap();
|
||||
let sig_part = keychain
|
||||
.aggsig_calculate_partial_sig(&tx_id, &recp_pub_nonce, tx.fee(), tx.lock_height())
|
||||
.unwrap();
|
||||
|
||||
// Build the next stage, containing sS (and our pubkeys again, for the recipient's convenience)
|
||||
// offset has not been modified during tx building, so pass it back in
|
||||
let mut partial_tx = build_partial_tx(&tx_id, keychain, amount_with_fee, kernel_offset, Some(sig_part), tx);
|
||||
// Build the next stage, containing sS (and our pubkeys again, for the
|
||||
// recipient's convenience) offset has not been modified during tx building,
|
||||
// so pass it back in
|
||||
let mut partial_tx = build_partial_tx(
|
||||
&tx_id,
|
||||
keychain,
|
||||
amount_with_fee,
|
||||
kernel_offset,
|
||||
Some(sig_part),
|
||||
tx,
|
||||
);
|
||||
partial_tx.phase = PartialTxPhase::SenderConfirmation;
|
||||
|
||||
// And send again
|
||||
|
@ -192,7 +219,16 @@ fn build_send_tx(
|
|||
lock_height: u64,
|
||||
max_outputs: usize,
|
||||
selection_strategy_is_use_all: bool,
|
||||
) -> Result<(Transaction, BlindingFactor, Vec<OutputData>, Identifier, u64), Error> {
|
||||
) -> Result<
|
||||
(
|
||||
Transaction,
|
||||
BlindingFactor,
|
||||
Vec<OutputData>,
|
||||
Identifier,
|
||||
u64,
|
||||
),
|
||||
Error,
|
||||
> {
|
||||
let key_id = keychain.clone().root_key_id();
|
||||
|
||||
// select some spendable coins from the wallet
|
||||
|
@ -208,14 +244,14 @@ fn build_send_tx(
|
|||
})?;
|
||||
|
||||
// Get the maximum number of outputs in the wallet
|
||||
let max_outputs = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
|
||||
let max_outputs = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
|
||||
Ok(wallet_data.select_coins(
|
||||
key_id.clone(),
|
||||
amount,
|
||||
current_height,
|
||||
minimum_confirmations,
|
||||
max_outputs,
|
||||
true,
|
||||
key_id.clone(),
|
||||
amount,
|
||||
current_height,
|
||||
minimum_confirmations,
|
||||
max_outputs,
|
||||
true,
|
||||
))
|
||||
})?.len();
|
||||
|
||||
|
@ -226,8 +262,8 @@ fn build_send_tx(
|
|||
let mut total: u64 = coins.iter().map(|c| c.value).sum();
|
||||
let mut amount_with_fee = amount + fee;
|
||||
|
||||
// Here check if we have enough outputs for the amount including fee otherwise look for other
|
||||
// outputs and check again
|
||||
// Here check if we have enough outputs for the amount including fee otherwise
|
||||
// look for other outputs and check again
|
||||
while total <= amount_with_fee {
|
||||
// End the loop if we have selected all the outputs and still not enough funds
|
||||
if coins.len() == max_outputs {
|
||||
|
@ -324,12 +360,15 @@ fn inputs_and_change(
|
|||
parts.push(build::with_fee(fee));
|
||||
|
||||
// if we are spending 10,000 coins to send 1,000 then our change will be 9,000
|
||||
// if the fee is 80 then the recipient will receive 1000 and our change will be 8,920
|
||||
// if the fee is 80 then the recipient will receive 1000 and our change will be
|
||||
// 8,920
|
||||
let change = total - amount - fee;
|
||||
|
||||
// build inputs using the appropriate derived key_ids
|
||||
for coin in coins {
|
||||
let key_id = keychain.derive_key_id(coin.n_child).context(ErrorKind::Keychain)?;
|
||||
let key_id = keychain
|
||||
.derive_key_id(coin.n_child)
|
||||
.context(ErrorKind::Keychain)?;
|
||||
if coin.is_coinbase {
|
||||
let block = coin.block.clone();
|
||||
let merkle_proof = coin.merkle_proof.clone();
|
||||
|
@ -378,7 +417,6 @@ mod test {
|
|||
use core::core::build;
|
||||
use keychain::Keychain;
|
||||
|
||||
|
||||
#[test]
|
||||
// demonstrate that input.commitment == referenced output.commitment
|
||||
// based on the public key and amount begin spent
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
|
||||
use api::ApiServer;
|
||||
use keychain::Keychain;
|
||||
use handlers::CoinbaseHandler;
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
use blake2;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::{fmt};
|
||||
use std::fmt;
|
||||
use std::fmt::Display;
|
||||
use uuid::Uuid;
|
||||
use std::convert::From;
|
||||
|
@ -68,103 +68,100 @@ pub fn tx_fee(input_len: usize, output_len: usize, base_fee: Option<u64>) -> u64
|
|||
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
inner: Context<ErrorKind>,
|
||||
inner: Context<ErrorKind>,
|
||||
}
|
||||
|
||||
/// Wallet errors, mostly wrappers around underlying crypto or I/O errors.
|
||||
#[derive(Copy, Clone, Eq, PartialEq, Debug, Fail)]
|
||||
pub enum ErrorKind {
|
||||
#[fail(display = "Not enough funds")]
|
||||
NotEnoughFunds(u64),
|
||||
#[fail(display = "Not enough funds")] NotEnoughFunds(u64),
|
||||
|
||||
#[fail(display = "Fee dispute: sender fee {}, recipient fee {}", sender_fee, recipient_fee)]
|
||||
FeeDispute{sender_fee: u64, recipient_fee: u64},
|
||||
#[fail(display = "Fee dispute: sender fee {}, recipient fee {}", sender_fee, recipient_fee)]
|
||||
FeeDispute {
|
||||
sender_fee: u64,
|
||||
recipient_fee: u64,
|
||||
},
|
||||
|
||||
#[fail(display = "Fee exceeds amount: sender amount {}, recipient fee {}", sender_amount, recipient_fee)]
|
||||
FeeExceedsAmount{sender_amount: u64,recipient_fee: u64},
|
||||
#[fail(display = "Fee exceeds amount: sender amount {}, recipient fee {}", sender_amount,
|
||||
recipient_fee)]
|
||||
FeeExceedsAmount {
|
||||
sender_amount: u64,
|
||||
recipient_fee: u64,
|
||||
},
|
||||
|
||||
#[fail(display = "Keychain error")]
|
||||
Keychain,
|
||||
#[fail(display = "Keychain error")] Keychain,
|
||||
|
||||
#[fail(display = "Transaction error")]
|
||||
Transaction,
|
||||
#[fail(display = "Transaction error")] Transaction,
|
||||
|
||||
#[fail(display = "Secp error")]
|
||||
Secp,
|
||||
#[fail(display = "Secp error")] Secp,
|
||||
|
||||
#[fail(display = "Wallet data error: {}", _0)]
|
||||
WalletData(&'static str),
|
||||
#[fail(display = "Wallet data error: {}", _0)] WalletData(&'static str),
|
||||
|
||||
/// An error in the format of the JSON structures exchanged by the wallet
|
||||
#[fail(display = "JSON format error")]
|
||||
Format,
|
||||
/// An error in the format of the JSON structures exchanged by the wallet
|
||||
#[fail(display = "JSON format error")]
|
||||
Format,
|
||||
|
||||
#[fail(display = "I/O error")] IO,
|
||||
|
||||
#[fail(display = "I/O error")]
|
||||
IO,
|
||||
/// Error when contacting a node through its API
|
||||
#[fail(display = "Node API error")]
|
||||
Node,
|
||||
|
||||
/// Error when contacting a node through its API
|
||||
#[fail(display = "Node API error")]
|
||||
Node,
|
||||
/// Error originating from hyper.
|
||||
#[fail(display = "Hyper error")]
|
||||
Hyper,
|
||||
|
||||
/// Error originating from hyper.
|
||||
#[fail(display = "Hyper error")]
|
||||
Hyper,
|
||||
/// Error originating from hyper uri parsing.
|
||||
#[fail(display = "Uri parsing error")]
|
||||
Uri,
|
||||
|
||||
/// Error originating from hyper uri parsing.
|
||||
#[fail(display = "Uri parsing error")]
|
||||
Uri,
|
||||
|
||||
#[fail(display = "Signature error")]
|
||||
Signature(&'static str),
|
||||
#[fail(display = "Signature error")] Signature(&'static str),
|
||||
|
||||
/// Attempt to use duplicate transaction id in separate transactions
|
||||
#[fail(display = "Duplicate transaction ID error")]
|
||||
#[fail(display = "Duplicate transaction ID error")]
|
||||
DuplicateTransactionId,
|
||||
|
||||
/// Wallet seed already exists
|
||||
#[fail(display = "Wallet seed exists error")]
|
||||
#[fail(display = "Wallet seed exists error")]
|
||||
WalletSeedExists,
|
||||
|
||||
#[fail(display = "Generic error: {}", _0)]
|
||||
GenericError(&'static str),
|
||||
#[fail(display = "Generic error: {}", _0)] GenericError(&'static str),
|
||||
}
|
||||
|
||||
|
||||
impl Fail for Error {
|
||||
fn cause(&self) -> Option<&Fail> {
|
||||
self.inner.cause()
|
||||
}
|
||||
fn cause(&self) -> Option<&Fail> {
|
||||
self.inner.cause()
|
||||
}
|
||||
|
||||
fn backtrace(&self) -> Option<&Backtrace> {
|
||||
self.inner.backtrace()
|
||||
}
|
||||
fn backtrace(&self) -> Option<&Backtrace> {
|
||||
self.inner.backtrace()
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
Display::fmt(&self.inner, f)
|
||||
}
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
Display::fmt(&self.inner, f)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn kind(&self) -> ErrorKind {
|
||||
*self.inner.get_context()
|
||||
}
|
||||
pub fn kind(&self) -> ErrorKind {
|
||||
*self.inner.get_context()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ErrorKind> for Error {
|
||||
fn from(kind: ErrorKind) -> Error {
|
||||
Error {
|
||||
inner: Context::new(kind),
|
||||
}
|
||||
}
|
||||
fn from(kind: ErrorKind) -> Error {
|
||||
Error {
|
||||
inner: Context::new(kind),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Context<ErrorKind>> for Error {
|
||||
fn from(inner: Context<ErrorKind>) -> Error {
|
||||
Error { inner: inner }
|
||||
}
|
||||
fn from(inner: Context<ErrorKind>) -> Error {
|
||||
Error { inner: inner }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
|
@ -271,7 +268,6 @@ impl<'de> serde::de::Visitor<'de> for MerkleProofWrapperVisitor {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
|
||||
pub struct BlockIdentifier(Hash);
|
||||
|
||||
|
@ -371,7 +367,8 @@ impl OutputData {
|
|||
}
|
||||
}
|
||||
|
||||
/// Check if output is eligible to spend based on state and height and confirmations
|
||||
/// Check if output is eligible to spend based on state and height and
|
||||
/// confirmations
|
||||
pub fn eligible_to_spend(&self, current_height: u64, minimum_confirmations: u64) -> bool {
|
||||
if [OutputStatus::Spent, OutputStatus::Locked].contains(&self.status) {
|
||||
return false;
|
||||
|
@ -404,7 +401,8 @@ impl WalletSeed {
|
|||
}
|
||||
|
||||
fn from_hex(hex: &str) -> Result<WalletSeed, Error> {
|
||||
let bytes = util::from_hex(hex.to_string()).context(ErrorKind::GenericError("Invalid hex"))?;
|
||||
let bytes =
|
||||
util::from_hex(hex.to_string()).context(ErrorKind::GenericError("Invalid hex"))?;
|
||||
Ok(WalletSeed::from_bytes(&bytes))
|
||||
}
|
||||
|
||||
|
@ -429,9 +427,7 @@ impl WalletSeed {
|
|||
|
||||
let seed_file_path = &format!(
|
||||
"{}{}{}",
|
||||
wallet_config.data_file_dir,
|
||||
MAIN_SEPARATOR,
|
||||
SEED_FILE,
|
||||
wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE,
|
||||
);
|
||||
|
||||
debug!(LOGGER, "Generating wallet seed file at: {}", seed_file_path,);
|
||||
|
@ -441,7 +437,8 @@ impl WalletSeed {
|
|||
} else {
|
||||
let seed = WalletSeed::init_new();
|
||||
let mut file = File::create(seed_file_path).context(ErrorKind::IO)?;
|
||||
file.write_all(&seed.to_hex().as_bytes()).context(ErrorKind::IO)?;
|
||||
file.write_all(&seed.to_hex().as_bytes())
|
||||
.context(ErrorKind::IO)?;
|
||||
Ok(seed)
|
||||
}
|
||||
}
|
||||
|
@ -452,9 +449,7 @@ impl WalletSeed {
|
|||
|
||||
let seed_file_path = &format!(
|
||||
"{}{}{}",
|
||||
wallet_config.data_file_dir,
|
||||
MAIN_SEPARATOR,
|
||||
SEED_FILE,
|
||||
wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE,
|
||||
);
|
||||
|
||||
debug!(LOGGER, "Using wallet seed file at: {}", seed_file_path,);
|
||||
|
@ -539,7 +534,10 @@ impl WalletData {
|
|||
LOGGER,
|
||||
"Failed to acquire wallet lock file (multiple retries)",
|
||||
);
|
||||
return Err(e.context(ErrorKind::WalletData("Failed to acquire lock file")).into());
|
||||
return Err(
|
||||
e.context(ErrorKind::WalletData("Failed to acquire lock file"))
|
||||
.into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -549,7 +547,9 @@ impl WalletData {
|
|||
wdat.write(data_file_path)?;
|
||||
|
||||
// delete the lock file
|
||||
fs::remove_file(lock_file_path).context(ErrorKind::WalletData("Could not remove wallet lock file. Maybe insufficient rights?"))?;
|
||||
fs::remove_file(lock_file_path).context(ErrorKind::WalletData(
|
||||
"Could not remove wallet lock file. Maybe insufficient rights?",
|
||||
))?;
|
||||
|
||||
info!(LOGGER, "... released wallet lock");
|
||||
|
||||
|
@ -570,10 +570,12 @@ impl WalletData {
|
|||
|
||||
/// Read output_data vec from disk.
|
||||
fn read_outputs(data_file_path: &str) -> Result<Vec<OutputData>, Error> {
|
||||
let data_file = File::open(data_file_path).context(ErrorKind::WalletData(&"Could not open wallet file"))?;
|
||||
serde_json::from_reader(data_file).map_err(|e| { e.context(ErrorKind::WalletData(&"Error reading wallet file ")).into()})
|
||||
|
||||
|
||||
let data_file = File::open(data_file_path)
|
||||
.context(ErrorKind::WalletData(&"Could not open wallet file"))?;
|
||||
serde_json::from_reader(data_file).map_err(|e| {
|
||||
e.context(ErrorKind::WalletData(&"Error reading wallet file "))
|
||||
.into()
|
||||
})
|
||||
}
|
||||
|
||||
/// Populate wallet_data with output_data from disk.
|
||||
|
@ -590,14 +592,16 @@ impl WalletData {
|
|||
|
||||
/// Write the wallet data to disk.
|
||||
fn write(&self, data_file_path: &str) -> Result<(), Error> {
|
||||
let mut data_file = File::create(data_file_path).map_err(|e| {
|
||||
e.context(ErrorKind::WalletData(&"Could not create "))})?;
|
||||
let mut data_file = File::create(data_file_path)
|
||||
.map_err(|e| e.context(ErrorKind::WalletData(&"Could not create ")))?;
|
||||
let mut outputs = self.outputs.values().collect::<Vec<_>>();
|
||||
outputs.sort();
|
||||
let res_json = serde_json::to_vec_pretty(&outputs).map_err(|e| {
|
||||
e.context(ErrorKind::WalletData("Error serializing wallet data"))
|
||||
})?;
|
||||
data_file.write_all(res_json.as_slice()).context(ErrorKind::WalletData(&"Error writing wallet file")).map_err(|e| e.into())
|
||||
let res_json = serde_json::to_vec_pretty(&outputs)
|
||||
.map_err(|e| e.context(ErrorKind::WalletData("Error serializing wallet data")))?;
|
||||
data_file
|
||||
.write_all(res_json.as_slice())
|
||||
.context(ErrorKind::WalletData(&"Error writing wallet file"))
|
||||
.map_err(|e| e.into())
|
||||
}
|
||||
|
||||
/// Append a new output data to the wallet data.
|
||||
|
@ -654,12 +658,12 @@ impl WalletData {
|
|||
|
||||
// use a sliding window to identify potential sets of possible outputs to spend
|
||||
// Case of amount > total amount of max_outputs(500):
|
||||
// The limit exists because by default, we always select as many inputs as possible in a transaction,
|
||||
// to reduce both the UTXO set and the fees.
|
||||
// But that only makes sense up to a point, hence the limit to avoid being too greedy.
|
||||
// But if max_outputs(500) is actually not enought to cover the whole amount,
|
||||
// the wallet should allow going over it to satisfy what the user wants to send.
|
||||
// So the wallet considers max_outputs more of a soft limit.
|
||||
// The limit exists because by default, we always select as many inputs as
|
||||
// possible in a transaction, to reduce both the UTXO set and the fees.
|
||||
// But that only makes sense up to a point, hence the limit to avoid being too
|
||||
// greedy. But if max_outputs(500) is actually not enought to cover the whole
|
||||
// amount, the wallet should allow going over it to satisfy what the user
|
||||
// wants to send. So the wallet considers max_outputs more of a soft limit.
|
||||
if eligible.len() > max_outputs {
|
||||
for window in eligible.windows(max_outputs) {
|
||||
let windowed_eligibles = window.iter().cloned().collect::<Vec<_>>();
|
||||
|
@ -668,9 +672,14 @@ impl WalletData {
|
|||
}
|
||||
}
|
||||
// Not exist in any window of which total amount >= amount.
|
||||
// Then take coins from the smallest one up to the total amount of selected coins = the amount.
|
||||
// Then take coins from the smallest one up to the total amount of selected
|
||||
// coins = the amount.
|
||||
if let Some(outputs) = self.select_from(amount, false, eligible.clone()) {
|
||||
debug!(LOGGER, "Extending maximum number of outputs. {} outputs selected.", outputs.len());
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Extending maximum number of outputs. {} outputs selected.",
|
||||
outputs.len()
|
||||
);
|
||||
return outputs;
|
||||
}
|
||||
} else {
|
||||
|
@ -680,7 +689,8 @@ impl WalletData {
|
|||
}
|
||||
|
||||
// we failed to find a suitable set of outputs to spend,
|
||||
// so return the largest amount we can so we can provide guidance on what is possible
|
||||
// so return the largest amount we can so we can provide guidance on what is
|
||||
// possible
|
||||
eligible.reverse();
|
||||
eligible.iter().take(max_outputs).cloned().collect()
|
||||
}
|
||||
|
@ -700,14 +710,15 @@ impl WalletData {
|
|||
} else {
|
||||
let mut selected_amount = 0;
|
||||
return Some(
|
||||
outputs.iter()
|
||||
outputs
|
||||
.iter()
|
||||
.take_while(|out| {
|
||||
let res = selected_amount < amount;
|
||||
selected_amount += out.value;
|
||||
res
|
||||
})
|
||||
.cloned()
|
||||
.collect()
|
||||
.collect(),
|
||||
);
|
||||
}
|
||||
} else {
|
||||
|
@ -733,14 +744,14 @@ pub enum PartialTxPhase {
|
|||
SenderInitiation,
|
||||
ReceiverInitiation,
|
||||
SenderConfirmation,
|
||||
ReceiverConfirmation
|
||||
ReceiverConfirmation,
|
||||
}
|
||||
|
||||
/// Helper in serializing the information required during an interactive aggsig
|
||||
/// transaction
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct PartialTx {
|
||||
pub phase: PartialTxPhase,
|
||||
pub phase: PartialTxPhase,
|
||||
pub id: Uuid,
|
||||
pub amount: u64,
|
||||
pub public_blind_excess: String,
|
||||
|
@ -754,14 +765,13 @@ pub struct PartialTx {
|
|||
/// aggsig_tx_context should contain the private key/nonce pair
|
||||
/// the resulting partial tx will contain the corresponding public keys
|
||||
pub fn build_partial_tx(
|
||||
transaction_id : &Uuid,
|
||||
transaction_id: &Uuid,
|
||||
keychain: &keychain::Keychain,
|
||||
receive_amount: u64,
|
||||
kernel_offset: BlindingFactor,
|
||||
part_sig: Option<secp::Signature>,
|
||||
tx: Transaction,
|
||||
) -> PartialTx {
|
||||
|
||||
let (pub_excess, pub_nonce) = keychain.aggsig_get_public_keys(transaction_id);
|
||||
let mut pub_excess = pub_excess.serialize_vec(keychain.secp(), true).clone();
|
||||
let len = pub_excess.clone().len();
|
||||
|
@ -773,7 +783,7 @@ pub fn build_partial_tx(
|
|||
|
||||
PartialTx {
|
||||
phase: PartialTxPhase::SenderInitiation,
|
||||
id : transaction_id.clone(),
|
||||
id: transaction_id.clone(),
|
||||
amount: receive_amount,
|
||||
public_blind_excess: util::to_hex(pub_excess),
|
||||
public_nonce: util::to_hex(pub_nonce),
|
||||
|
@ -791,23 +801,43 @@ pub fn build_partial_tx(
|
|||
pub fn read_partial_tx(
|
||||
keychain: &keychain::Keychain,
|
||||
partial_tx: &PartialTx,
|
||||
) -> Result<(u64, PublicKey, PublicKey, BlindingFactor, Option<Signature>, Transaction), Error> {
|
||||
let blind_bin = util::from_hex(partial_tx.public_blind_excess.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let blinding = PublicKey::from_slice(keychain.secp(), &blind_bin[..]).context(ErrorKind::GenericError("Could not construct public key"))?;
|
||||
) -> Result<
|
||||
(
|
||||
u64,
|
||||
PublicKey,
|
||||
PublicKey,
|
||||
BlindingFactor,
|
||||
Option<Signature>,
|
||||
Transaction,
|
||||
),
|
||||
Error,
|
||||
> {
|
||||
let blind_bin = util::from_hex(partial_tx.public_blind_excess.clone())
|
||||
.context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let blinding = PublicKey::from_slice(keychain.secp(), &blind_bin[..])
|
||||
.context(ErrorKind::GenericError("Could not construct public key"))?;
|
||||
|
||||
let nonce_bin = util::from_hex(partial_tx.public_nonce.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let nonce = PublicKey::from_slice(keychain.secp(), &nonce_bin[..]).context(ErrorKind::GenericError("Could not construct public key"))?;
|
||||
let nonce_bin = util::from_hex(partial_tx.public_nonce.clone())
|
||||
.context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let nonce = PublicKey::from_slice(keychain.secp(), &nonce_bin[..])
|
||||
.context(ErrorKind::GenericError("Could not construct public key"))?;
|
||||
|
||||
let kernel_offset = BlindingFactor::from_hex(&partial_tx.kernel_offset.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let kernel_offset = BlindingFactor::from_hex(&partial_tx.kernel_offset.clone())
|
||||
.context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
|
||||
let sig_bin = util::from_hex(partial_tx.part_sig.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let sig_bin = util::from_hex(partial_tx.part_sig.clone())
|
||||
.context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let sig = match sig_bin.len() {
|
||||
1 => None,
|
||||
_ => Some(Signature::from_der(keychain.secp(), &sig_bin[..]).context(ErrorKind::GenericError("Could not create signature"))?),
|
||||
_ => Some(Signature::from_der(keychain.secp(), &sig_bin[..])
|
||||
.context(ErrorKind::GenericError("Could not create signature"))?),
|
||||
};
|
||||
let tx_bin = util::from_hex(partial_tx.tx.clone()).context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let tx = ser::deserialize(&mut &tx_bin[..]).context(ErrorKind::GenericError("Could not deserialize transaction, invalid format."))?;
|
||||
Ok((partial_tx.amount, blinding, nonce, kernel_offset, sig, tx))
|
||||
let tx_bin = util::from_hex(partial_tx.tx.clone())
|
||||
.context(ErrorKind::GenericError("Could not decode HEX"))?;
|
||||
let tx = ser::deserialize(&mut &tx_bin[..]).context(ErrorKind::GenericError(
|
||||
"Could not deserialize transaction, invalid format.",
|
||||
))?;
|
||||
Ok((partial_tx.amount, blinding, nonce, kernel_offset, sig, tx))
|
||||
}
|
||||
|
||||
/// Amount in request to build a coinbase output.
|
||||
|
|
Loading…
Reference in a new issue