mirror of
https://github.com/mimblewimble/grin.git
synced 2025-05-06 09:11:13 +03:00
Rustify core/src/core (#1122)
Small refactoring of one folder, if it makes sense I could extend the scope. * Remove some cloning (real and just verbosity in the code) * Naming conventions like to/into* * Some Clippy's suggestions I found that we don't use field init shorthand syntax, so I didn't touch this part, was it discussed before?
This commit is contained in:
parent
7812a02233
commit
2fa32d15ce
27 changed files with 215 additions and 263 deletions
|
@ -568,7 +568,7 @@ impl BlockHandler {
|
|||
))?;
|
||||
}
|
||||
let vec = util::from_hex(input).unwrap();
|
||||
Ok(Hash::from_vec(vec))
|
||||
Ok(Hash::from_vec(&vec))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -53,7 +53,7 @@ impl Tip {
|
|||
height: tip.height,
|
||||
last_block_pushed: util::to_hex(tip.last_block_h.to_vec()),
|
||||
prev_block_to_last: util::to_hex(tip.prev_block_h.to_vec()),
|
||||
total_difficulty: tip.total_difficulty.into_num(),
|
||||
total_difficulty: tip.total_difficulty.to_num(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -497,7 +497,7 @@ impl BlockHeaderPrintable {
|
|||
range_proof_root: util::to_hex(h.range_proof_root.to_vec()),
|
||||
kernel_root: util::to_hex(h.kernel_root.to_vec()),
|
||||
nonce: h.nonce,
|
||||
total_difficulty: h.total_difficulty.into_num(),
|
||||
total_difficulty: h.total_difficulty.to_num(),
|
||||
total_kernel_offset: h.total_kernel_offset.to_hex(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,8 +30,8 @@ use pipe;
|
|||
use store;
|
||||
use txhashset;
|
||||
use types::*;
|
||||
use util::secp::pedersen::{Commitment, RangeProof};
|
||||
use util::LOGGER;
|
||||
use util::secp::pedersen::{Commitment, RangeProof};
|
||||
|
||||
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
|
||||
pub const MAX_ORPHAN_SIZE: usize = 200;
|
||||
|
@ -260,7 +260,7 @@ impl Chain {
|
|||
debug!(
|
||||
LOGGER,
|
||||
"Chain init: {} @ {} [{}]",
|
||||
head.total_difficulty.into_num(),
|
||||
head.total_difficulty.to_num(),
|
||||
head.height,
|
||||
head.last_block_h,
|
||||
);
|
||||
|
|
|
@ -277,7 +277,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
|
||||
let target_difficulty = header.total_difficulty.clone() - prev.total_difficulty.clone();
|
||||
|
||||
if header.pow.clone().to_difficulty() < target_difficulty {
|
||||
if header.pow.to_difficulty() < target_difficulty {
|
||||
return Err(Error::DifficultyTooLow);
|
||||
}
|
||||
|
||||
|
@ -297,8 +297,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
error!(
|
||||
LOGGER,
|
||||
"validate_header: BANNABLE OFFENCE: header cumulative difficulty {} != {}",
|
||||
target_difficulty.into_num(),
|
||||
prev.total_difficulty.into_num() + network_difficulty.into_num()
|
||||
target_difficulty.to_num(),
|
||||
prev.total_difficulty.to_num() + network_difficulty.to_num()
|
||||
);
|
||||
return Err(Error::WrongTotalDifficulty);
|
||||
}
|
||||
|
|
|
@ -930,7 +930,7 @@ impl<'a> Extension<'a> {
|
|||
if pmmr::is_leaf(n) {
|
||||
if let Some(out) = self.output_pmmr.get_data(n) {
|
||||
if let Some(rp) = self.rproof_pmmr.get_data(n) {
|
||||
out.to_output(rp).verify_proof()?;
|
||||
out.into_output(rp).verify_proof()?;
|
||||
} else {
|
||||
// TODO - rangeproof not found
|
||||
return Err(Error::OutputNotFound);
|
||||
|
|
|
@ -24,8 +24,8 @@ extern crate time;
|
|||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
||||
use chain::types::*;
|
||||
use chain::Chain;
|
||||
use chain::types::*;
|
||||
use core::consensus;
|
||||
use core::core::hash::Hashed;
|
||||
use core::core::target::Difficulty;
|
||||
|
@ -444,7 +444,7 @@ fn actual_diff_iter_output() {
|
|||
println!(
|
||||
"next_difficulty time: {}, diff: {}, duration: {} ",
|
||||
elem.0,
|
||||
elem.1.into_num(),
|
||||
elem.1.to_num(),
|
||||
last_time - elem.0
|
||||
);
|
||||
last_time = elem.0;
|
||||
|
|
|
@ -221,7 +221,7 @@ where
|
|||
let diff_sum = diff_data
|
||||
.iter()
|
||||
.skip(MEDIAN_TIME_WINDOW as usize)
|
||||
.fold(0, |sum, d| sum + d.clone().unwrap().1.into_num());
|
||||
.fold(0, |sum, d| sum + d.clone().unwrap().1.to_num());
|
||||
|
||||
// Apply dampening except when difficulty is near 1
|
||||
let ts_damp = if diff_sum < DAMP_FACTOR * DIFFICULTY_ADJUST_WINDOW {
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::collections::HashSet;
|
||||
use std::iter::FromIterator;
|
||||
use time;
|
||||
|
||||
use consensus;
|
||||
|
@ -199,7 +200,7 @@ impl BlockHeader {
|
|||
[write_u64, self.height],
|
||||
[write_fixed_bytes, &self.previous],
|
||||
[write_i64, self.timestamp.to_timespec().sec],
|
||||
[write_u64, self.total_difficulty.into_num()],
|
||||
[write_u64, self.total_difficulty.to_num()],
|
||||
[write_fixed_bytes, &self.output_root],
|
||||
[write_fixed_bytes, &self.range_proof_root],
|
||||
[write_fixed_bytes, &self.kernel_root],
|
||||
|
@ -245,10 +246,10 @@ pub struct CompactBlock {
|
|||
/// purpose of full serialization and the one of just extracting a hash.
|
||||
impl Writeable for CompactBlock {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
try!(self.header.write(writer));
|
||||
self.header.write(writer)?;
|
||||
|
||||
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
||||
try!(writer.write_u64(self.nonce));
|
||||
writer.write_u64(self.nonce)?;
|
||||
|
||||
ser_multiwrite!(
|
||||
writer,
|
||||
|
@ -263,9 +264,9 @@ impl Writeable for CompactBlock {
|
|||
|
||||
// Consensus rule that everything is sorted in lexicographical order on the
|
||||
// wire.
|
||||
try!(out_full.write_sorted(writer));
|
||||
try!(kern_full.write_sorted(writer));
|
||||
try!(kern_ids.write_sorted(writer));
|
||||
out_full.write_sorted(writer)?;
|
||||
kern_full.write_sorted(writer)?;
|
||||
kern_ids.write_sorted(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -275,7 +276,7 @@ impl Writeable for CompactBlock {
|
|||
/// compact block from a binary stream.
|
||||
impl Readable for CompactBlock {
|
||||
fn read(reader: &mut Reader) -> Result<CompactBlock, ser::Error> {
|
||||
let header = try!(BlockHeader::read(reader));
|
||||
let header = BlockHeader::read(reader)?;
|
||||
|
||||
let (nonce, out_full_len, kern_full_len, kern_id_len) =
|
||||
ser_multiread!(reader, read_u64, read_u64, read_u64, read_u64);
|
||||
|
@ -316,7 +317,7 @@ pub struct Block {
|
|||
/// full serialization and the one of just extracting a hash.
|
||||
impl Writeable for Block {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
|
||||
try!(self.header.write(writer));
|
||||
self.header.write(writer)?;
|
||||
|
||||
if writer.serialization_mode() != ser::SerializationMode::Hash {
|
||||
ser_multiwrite!(
|
||||
|
@ -332,9 +333,9 @@ impl Writeable for Block {
|
|||
|
||||
// Consensus rule that everything is sorted in lexicographical order on the
|
||||
// wire.
|
||||
try!(inputs.write_sorted(writer));
|
||||
try!(outputs.write_sorted(writer));
|
||||
try!(kernels.write_sorted(writer));
|
||||
inputs.write_sorted(writer)?;
|
||||
outputs.write_sorted(writer)?;
|
||||
kernels.write_sorted(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -344,7 +345,7 @@ impl Writeable for Block {
|
|||
/// from a binary stream.
|
||||
impl Readable for Block {
|
||||
fn read(reader: &mut Reader) -> Result<Block, ser::Error> {
|
||||
let header = try!(BlockHeader::read(reader));
|
||||
let header = BlockHeader::read(reader)?;
|
||||
|
||||
let (input_len, output_len, kernel_len) =
|
||||
ser_multiread!(reader, read_u64, read_u64, read_u64);
|
||||
|
@ -358,7 +359,6 @@ impl Readable for Block {
|
|||
inputs: inputs,
|
||||
outputs: outputs,
|
||||
kernels: kernels,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -404,8 +404,7 @@ impl Block {
|
|||
difficulty: Difficulty,
|
||||
reward_output: (Output, TxKernel),
|
||||
) -> Result<Block, Error> {
|
||||
let block = Block::with_reward(prev, txs, reward_output.0, reward_output.1, difficulty)?;
|
||||
Ok(block)
|
||||
Block::with_reward(prev, txs, reward_output.0, reward_output.1, difficulty)
|
||||
}
|
||||
|
||||
/// Hydrate a block from a compact block.
|
||||
|
@ -435,9 +434,9 @@ impl Block {
|
|||
all_kernels.extend(cb.kern_full);
|
||||
|
||||
// convert the sets to vecs
|
||||
let mut all_inputs = all_inputs.iter().cloned().collect::<Vec<_>>();
|
||||
let mut all_outputs = all_outputs.iter().cloned().collect::<Vec<_>>();
|
||||
let mut all_kernels = all_kernels.iter().cloned().collect::<Vec<_>>();
|
||||
let mut all_inputs = Vec::from_iter(all_inputs);
|
||||
let mut all_outputs = Vec::from_iter(all_outputs);
|
||||
let mut all_kernels = Vec::from_iter(all_kernels);
|
||||
|
||||
// sort them all lexicographically
|
||||
all_inputs.sort();
|
||||
|
@ -526,9 +525,9 @@ impl Block {
|
|||
kernel_offsets.push(tx.offset);
|
||||
|
||||
// add all tx inputs/outputs/kernels to the block
|
||||
kernels.extend(tx.kernels.iter().cloned());
|
||||
inputs.extend(tx.inputs.iter().cloned());
|
||||
outputs.extend(tx.outputs.iter().cloned());
|
||||
kernels.extend(tx.kernels.into_iter());
|
||||
inputs.extend(tx.inputs.into_iter());
|
||||
outputs.extend(tx.outputs.into_iter());
|
||||
}
|
||||
|
||||
// include the reward kernel and output
|
||||
|
@ -546,8 +545,7 @@ impl Block {
|
|||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
let mut keys = kernel_offsets
|
||||
.iter()
|
||||
.cloned()
|
||||
.into_iter()
|
||||
.filter(|x| *x != BlindingFactor::zero())
|
||||
.filter_map(|x| x.secret_key(&secp).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
@ -571,7 +569,7 @@ impl Block {
|
|||
..time::now_utc()
|
||||
},
|
||||
previous: prev.hash(),
|
||||
total_difficulty: difficulty + prev.total_difficulty.clone(),
|
||||
total_difficulty: difficulty + prev.total_difficulty,
|
||||
total_kernel_offset: total_kernel_offset,
|
||||
..Default::default()
|
||||
},
|
||||
|
@ -594,13 +592,14 @@ impl Block {
|
|||
/// Matches any output with a potential spending input, eliminating them
|
||||
/// from the block. Provides a simple way to cut-through the block. The
|
||||
/// elimination is stable with respect to the order of inputs and outputs.
|
||||
/// Method consumes the block.
|
||||
///
|
||||
/// NOTE: exclude coinbase from cut-through process
|
||||
/// if a block contains a new coinbase output and
|
||||
/// is a transaction spending a previous coinbase
|
||||
/// we do not want to cut-through (all coinbase must be preserved)
|
||||
///
|
||||
pub fn cut_through(&self) -> Block {
|
||||
pub fn cut_through(self) -> Block {
|
||||
let in_set = self.inputs
|
||||
.iter()
|
||||
.map(|inp| inp.commitment())
|
||||
|
@ -615,26 +614,24 @@ impl Block {
|
|||
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
|
||||
|
||||
let new_inputs = self.inputs
|
||||
.iter()
|
||||
.into_iter()
|
||||
.filter(|inp| !to_cut_through.contains(&inp.commitment()))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let new_outputs = self.outputs
|
||||
.iter()
|
||||
.into_iter()
|
||||
.filter(|out| !to_cut_through.contains(&out.commitment()))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
Block {
|
||||
header: BlockHeader {
|
||||
pow: self.header.pow.clone(),
|
||||
total_difficulty: self.header.total_difficulty.clone(),
|
||||
pow: self.header.pow,
|
||||
total_difficulty: self.header.total_difficulty,
|
||||
..self.header
|
||||
},
|
||||
inputs: new_inputs,
|
||||
outputs: new_outputs,
|
||||
kernels: self.kernels.clone(),
|
||||
kernels: self.kernels,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -651,9 +648,7 @@ impl Block {
|
|||
self.verify_coinbase()?;
|
||||
self.verify_inputs()?;
|
||||
self.verify_kernel_lock_heights()?;
|
||||
let (new_output_sum, new_kernel_sum) = self.verify_sums(prev_output_sum, prev_kernel_sum)?;
|
||||
|
||||
Ok((new_output_sum, new_kernel_sum))
|
||||
self.verify_sums(prev_output_sum, prev_kernel_sum)
|
||||
}
|
||||
|
||||
fn verify_weight(&self) -> Result<(), Error> {
|
||||
|
@ -745,14 +740,12 @@ impl Block {
|
|||
let cb_outs = self.outputs
|
||||
.iter()
|
||||
.filter(|out| out.features.contains(OutputFeatures::COINBASE_OUTPUT))
|
||||
.cloned()
|
||||
.collect::<Vec<Output>>();
|
||||
.collect::<Vec<&Output>>();
|
||||
|
||||
let cb_kerns = self.kernels
|
||||
.iter()
|
||||
.filter(|kernel| kernel.features.contains(KernelFeatures::COINBASE_KERNEL))
|
||||
.cloned()
|
||||
.collect::<Vec<TxKernel>>();
|
||||
.collect::<Vec<&TxKernel>>();
|
||||
|
||||
let over_commit;
|
||||
let out_adjust_sum;
|
||||
|
|
|
@ -17,11 +17,11 @@
|
|||
//! Primary hash function used in the protocol
|
||||
//!
|
||||
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
use std::cmp::min;
|
||||
use std::{fmt, ops};
|
||||
use std::convert::AsRef;
|
||||
use std::ops::Add;
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
use std::{fmt, ops};
|
||||
|
||||
use blake2::blake2b::Blake2b;
|
||||
|
||||
|
@ -39,8 +39,8 @@ pub struct Hash(pub [u8; 32]);
|
|||
|
||||
impl fmt::Debug for Hash {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
for i in self.0[..4].iter().cloned() {
|
||||
try!(write!(f, "{:02x}", i));
|
||||
for i in self.0[..4].iter() {
|
||||
write!(f, "{:02x}", i)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -55,11 +55,10 @@ impl fmt::Display for Hash {
|
|||
impl Hash {
|
||||
/// Builds a Hash from a byte vector. If the vector is too short, it will be
|
||||
/// completed by zeroes. If it's too long, it will be truncated.
|
||||
pub fn from_vec(v: Vec<u8>) -> Hash {
|
||||
pub fn from_vec(v: &[u8]) -> Hash {
|
||||
let mut h = [0; 32];
|
||||
for i in 0..min(v.len(), 32) {
|
||||
h[i] = v[i];
|
||||
}
|
||||
let copy_size = min(v.len(), 32);
|
||||
h[..copy_size].copy_from_slice(&v[..copy_size]);
|
||||
Hash(h)
|
||||
}
|
||||
|
||||
|
@ -75,8 +74,9 @@ impl Hash {
|
|||
|
||||
/// Convert hex string back to hash.
|
||||
pub fn from_hex(hex: &str) -> Result<Hash, Error> {
|
||||
let bytes = util::from_hex(hex.to_string()).unwrap();
|
||||
Ok(Hash::from_vec(bytes))
|
||||
let bytes = util::from_hex(hex.to_string())
|
||||
.map_err(|_| Error::HexError(format!("failed to decode {}", hex)))?;
|
||||
Ok(Hash::from_vec(&bytes))
|
||||
}
|
||||
|
||||
/// Most significant 64 bits
|
||||
|
@ -133,11 +133,9 @@ impl AsRef<[u8]> for Hash {
|
|||
|
||||
impl Readable for Hash {
|
||||
fn read(reader: &mut Reader) -> Result<Hash, ser::Error> {
|
||||
let v = try!(reader.read_fixed_bytes(32));
|
||||
let v = reader.read_fixed_bytes(32)?;
|
||||
let mut a = [0; 32];
|
||||
for i in 0..a.len() {
|
||||
a[i] = v[i];
|
||||
}
|
||||
a.copy_from_slice(&v[..]);
|
||||
Ok(Hash(a))
|
||||
}
|
||||
}
|
||||
|
@ -177,7 +175,7 @@ impl HashWriter {
|
|||
/// current state
|
||||
pub fn into_hash(self) -> Hash {
|
||||
let mut res = [0; 32];
|
||||
(&mut res).copy_from_slice(self.state.finalize().as_bytes());
|
||||
res.copy_from_slice(self.state.finalize().as_bytes());
|
||||
Hash(res)
|
||||
}
|
||||
}
|
||||
|
@ -230,14 +228,15 @@ impl<W: ser::Writeable> Hashed for W {
|
|||
|
||||
impl<T: Writeable> consensus::VerifySortOrder<T> for Vec<T> {
|
||||
fn verify_sort_order(&self) -> Result<(), consensus::Error> {
|
||||
match self.iter()
|
||||
if self.iter()
|
||||
.map(|item| item.hash())
|
||||
.collect::<Vec<_>>()
|
||||
.windows(2)
|
||||
.any(|pair| pair[0] > pair[1])
|
||||
{
|
||||
true => Err(consensus::Error::SortError),
|
||||
false => Ok(()),
|
||||
Err(consensus::Error::SortError)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -38,8 +38,8 @@ pub trait ShortIdentifiable {
|
|||
impl<H: Hashed> ShortIdentifiable for H {
|
||||
/// Generate a short_id via the following -
|
||||
///
|
||||
/// * extract k0/k1 from block_hash hashed with the nonce (first two u64 values)
|
||||
/// * initialize a siphasher24 with k0/k1
|
||||
/// * extract k0/k1 from block_hash hashed with the nonce (first two u64
|
||||
/// values) * initialize a siphasher24 with k0/k1
|
||||
/// * self.hash() passing in the siphasher24 instance
|
||||
/// * drop the 2 most significant bytes (to return a 6 byte short_id)
|
||||
///
|
||||
|
@ -75,24 +75,23 @@ impl<H: Hashed> ShortIdentifiable for H {
|
|||
pub struct ShortId([u8; 6]);
|
||||
|
||||
/// We want to sort short_ids in a canonical and consistent manner so we can
|
||||
/// verify sort order in the same way we do for full inputs|outputs|kernels themselves.
|
||||
/// verify sort order in the same way we do for full inputs|outputs|kernels
|
||||
/// themselves.
|
||||
hashable_ord!(ShortId);
|
||||
|
||||
impl ::std::fmt::Debug for ShortId {
|
||||
fn fmt(&self, f: &mut ::std::fmt::Formatter) -> ::std::fmt::Result {
|
||||
try!(write!(f, "{}(", stringify!(ShortId)));
|
||||
try!(write!(f, "{}", self.to_hex()));
|
||||
write!(f, "{}(", stringify!(ShortId))?;
|
||||
write!(f, "{}", self.to_hex())?;
|
||||
write!(f, ")")
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for ShortId {
|
||||
fn read(reader: &mut Reader) -> Result<ShortId, ser::Error> {
|
||||
let v = try!(reader.read_fixed_bytes(SHORT_ID_SIZE));
|
||||
let v = reader.read_fixed_bytes(SHORT_ID_SIZE)?;
|
||||
let mut a = [0; SHORT_ID_SIZE];
|
||||
for i in 0..a.len() {
|
||||
a[i] = v[i];
|
||||
}
|
||||
a.copy_from_slice(&v[..]);
|
||||
Ok(ShortId(a))
|
||||
}
|
||||
}
|
||||
|
@ -107,9 +106,8 @@ impl ShortId {
|
|||
/// Build a new short_id from a byte slice
|
||||
pub fn from_bytes(bytes: &[u8]) -> ShortId {
|
||||
let mut hash = [0; SHORT_ID_SIZE];
|
||||
for i in 0..min(SHORT_ID_SIZE, bytes.len()) {
|
||||
hash[i] = bytes[i];
|
||||
}
|
||||
let copy_size = min(SHORT_ID_SIZE, bytes.len());
|
||||
hash[..copy_size].copy_from_slice(&bytes[..copy_size]);
|
||||
ShortId(hash)
|
||||
}
|
||||
|
||||
|
@ -121,7 +119,7 @@ impl ShortId {
|
|||
/// Reconstructs a switch commit hash from a hex string.
|
||||
pub fn from_hex(hex: &str) -> Result<ShortId, ser::Error> {
|
||||
let bytes = util::from_hex(hex.to_string())
|
||||
.map_err(|_| ser::Error::HexError(format!("short_id from_hex error")))?;
|
||||
.map_err(|_| ser::Error::HexError("short_id from_hex error".to_string()))?;
|
||||
Ok(ShortId::from_bytes(&bytes))
|
||||
}
|
||||
|
||||
|
|
|
@ -20,25 +20,24 @@ pub mod id;
|
|||
pub mod pmmr;
|
||||
pub mod target;
|
||||
pub mod transaction;
|
||||
use consensus::GRIN_BASE;
|
||||
#[allow(dead_code)]
|
||||
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::{fmt, iter};
|
||||
use std::cmp::Ordering;
|
||||
use std::num::ParseFloatError;
|
||||
use consensus::GRIN_BASE;
|
||||
use std::{fmt, iter};
|
||||
|
||||
use util::{secp, secp_static, static_secp_instance};
|
||||
use util::secp::pedersen::*;
|
||||
use util::{secp, secp_static, static_secp_instance};
|
||||
|
||||
pub use self::block::*;
|
||||
pub use self::transaction::*;
|
||||
pub use self::id::ShortId;
|
||||
pub use self::transaction::*;
|
||||
use core::hash::Hashed;
|
||||
use ser::{Error, Readable, Reader, Writeable, Writer};
|
||||
use global;
|
||||
use keychain;
|
||||
use keychain::BlindingFactor;
|
||||
use ser::{Error, Readable, Reader, Writeable, Writer};
|
||||
|
||||
/// Implemented by types that hold inputs and outputs (and kernels)
|
||||
/// containing Pedersen commitments.
|
||||
|
@ -141,63 +140,36 @@ pub trait Committed {
|
|||
}
|
||||
|
||||
/// Proof of work
|
||||
#[derive(Clone, PartialOrd, PartialEq)]
|
||||
pub struct Proof {
|
||||
/// The nonces
|
||||
pub nonces: Vec<u32>,
|
||||
|
||||
/// The proof size
|
||||
pub proof_size: usize,
|
||||
}
|
||||
|
||||
impl fmt::Debug for Proof {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
try!(write!(f, "Cuckoo("));
|
||||
write!(f, "Cuckoo(")?;
|
||||
for (i, val) in self.nonces[..].iter().enumerate() {
|
||||
try!(write!(f, "{:x}", val));
|
||||
write!(f, "{:x}", val)?;
|
||||
if i < self.nonces.len() - 1 {
|
||||
try!(write!(f, " "));
|
||||
write!(f, " ")?;
|
||||
}
|
||||
}
|
||||
write!(f, ")")
|
||||
}
|
||||
}
|
||||
impl PartialOrd for Proof {
|
||||
fn partial_cmp(&self, other: &Proof) -> Option<Ordering> {
|
||||
self.nonces.partial_cmp(&other.nonces)
|
||||
}
|
||||
}
|
||||
impl PartialEq for Proof {
|
||||
fn eq(&self, other: &Proof) -> bool {
|
||||
self.nonces[..] == other.nonces[..]
|
||||
}
|
||||
}
|
||||
|
||||
impl Eq for Proof {}
|
||||
impl Clone for Proof {
|
||||
fn clone(&self) -> Proof {
|
||||
let mut out_nonces = Vec::new();
|
||||
for n in self.nonces.iter() {
|
||||
out_nonces.push(*n as u32);
|
||||
}
|
||||
Proof {
|
||||
proof_size: out_nonces.len(),
|
||||
nonces: out_nonces,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Proof {
|
||||
/// Builds a proof with all bytes zeroed out
|
||||
pub fn new(in_nonces: Vec<u32>) -> Proof {
|
||||
Proof {
|
||||
proof_size: in_nonces.len(),
|
||||
nonces: in_nonces,
|
||||
}
|
||||
Proof { nonces: in_nonces }
|
||||
}
|
||||
|
||||
/// Builds a proof with all bytes zeroed out
|
||||
pub fn zero(proof_size: usize) -> Proof {
|
||||
Proof {
|
||||
proof_size: proof_size,
|
||||
nonces: vec![0; proof_size],
|
||||
}
|
||||
}
|
||||
|
@ -211,16 +183,13 @@ impl Proof {
|
|||
.map(|()| rng.gen())
|
||||
.take(proof_size)
|
||||
.collect();
|
||||
Proof {
|
||||
proof_size: proof_size,
|
||||
nonces: v,
|
||||
}
|
||||
Proof { nonces: v }
|
||||
}
|
||||
|
||||
/// Converts the proof to a vector of u64s
|
||||
pub fn to_u64s(&self) -> Vec<u64> {
|
||||
let mut out_nonces = Vec::with_capacity(self.proof_size);
|
||||
for n in self.nonces.iter() {
|
||||
let mut out_nonces = Vec::with_capacity(self.proof_size());
|
||||
for n in &self.nonces {
|
||||
out_nonces.push(*n as u64);
|
||||
}
|
||||
out_nonces
|
||||
|
@ -233,9 +202,14 @@ impl Proof {
|
|||
|
||||
/// Converts the proof to a proof-of-work Target so they can be compared.
|
||||
/// Hashes the Cuckoo Proof data.
|
||||
pub fn to_difficulty(self) -> target::Difficulty {
|
||||
pub fn to_difficulty(&self) -> target::Difficulty {
|
||||
target::Difficulty::from_hash(&self.hash())
|
||||
}
|
||||
|
||||
/// Returns the proof size
|
||||
pub fn proof_size(&self) -> usize {
|
||||
self.nonces.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Readable for Proof {
|
||||
|
@ -243,7 +217,7 @@ impl Readable for Proof {
|
|||
let proof_size = global::proofsize();
|
||||
let mut pow = vec![0u32; proof_size];
|
||||
for n in 0..proof_size {
|
||||
pow[n] = try!(reader.read_u32());
|
||||
pow[n] = reader.read_u32()?;
|
||||
}
|
||||
Ok(Proof::new(pow))
|
||||
}
|
||||
|
@ -251,8 +225,8 @@ impl Readable for Proof {
|
|||
|
||||
impl Writeable for Proof {
|
||||
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
|
||||
for n in 0..self.proof_size {
|
||||
try!(writer.write_u32(self.nonces[n]));
|
||||
for n in 0..self.proof_size() {
|
||||
writer.write_u32(self.nonces[n])?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -271,7 +245,7 @@ pub fn amount_from_hr_string(amount: &str) -> Result<u64, ParseFloatError> {
|
|||
pub fn amount_to_hr_string(amount: u64) -> String {
|
||||
let amount = (amount as f64 / GRIN_BASE as f64) as f64;
|
||||
let places = (GRIN_BASE as f64).log(10.0) as usize + 1;
|
||||
String::from(format!("{:.*}", places, amount))
|
||||
format!("{:.*}", places, amount)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
|
|
@ -132,8 +132,8 @@ impl Writeable for MerkleProof {
|
|||
[write_u64, self.path.len() as u64]
|
||||
);
|
||||
|
||||
try!(self.peaks.write(writer));
|
||||
try!(self.path.write(writer));
|
||||
self.peaks.write(writer)?;
|
||||
self.path.write(writer)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -202,7 +202,7 @@ impl MerkleProof {
|
|||
pub fn from_hex(hex: &str) -> Result<MerkleProof, String> {
|
||||
let bytes = util::from_hex(hex.to_string()).unwrap();
|
||||
let res = ser::deserialize(&mut &bytes[..])
|
||||
.map_err(|_| format!("failed to deserialize a Merkle Proof"))?;
|
||||
.map_err(|_| "failed to deserialize a Merkle Proof".to_string())?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
|
@ -346,15 +346,12 @@ where
|
|||
|
||||
let path = family_branch
|
||||
.iter()
|
||||
.map(|x| (self.get_from_file(x.1).unwrap_or(Hash::default()), x.1))
|
||||
.map(|x| (self.get_from_file(x.1).unwrap_or_default(), x.1))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let peaks = peaks(self.last_pos)
|
||||
.iter()
|
||||
.filter_map(|&x| {
|
||||
let res = self.get_from_file(x);
|
||||
res
|
||||
})
|
||||
.filter_map(|&x| self.get_from_file(x))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let proof = MerkleProof {
|
||||
|
@ -394,7 +391,7 @@ where
|
|||
|
||||
current_hash = (left_hash, current_hash).hash_with_index(pos - 1);
|
||||
|
||||
to_append.push((current_hash.clone(), None));
|
||||
to_append.push((current_hash, None));
|
||||
}
|
||||
|
||||
// append all the new nodes and update the MMR index
|
||||
|
@ -425,7 +422,7 @@ where
|
|||
/// to keep an index of elements to positions in the tree. Prunes parent
|
||||
/// nodes as well when they become childless.
|
||||
pub fn prune(&mut self, position: u64, index: u32) -> Result<bool, String> {
|
||||
if let None = self.backend.get_hash(position) {
|
||||
if self.backend.get_hash(position).is_none() {
|
||||
return Ok(false);
|
||||
}
|
||||
let prunable_height = bintree_postorder_height(position);
|
||||
|
@ -451,7 +448,7 @@ where
|
|||
|
||||
// if we have a pruned sibling, we can continue up the tree
|
||||
// otherwise we're done
|
||||
if let None = self.backend.get_hash(sibling) {
|
||||
if self.backend.get_hash(sibling).is_none() {
|
||||
current = parent;
|
||||
} else {
|
||||
break;
|
||||
|
@ -554,7 +551,8 @@ where
|
|||
if bintree_postorder_height(n) > 0 {
|
||||
if let Some(hash) = self.get_hash(n) {
|
||||
// take the left and right children, if they exist
|
||||
let left_pos = bintree_move_down_left(n).ok_or(format!("left_pos not found"))?;
|
||||
let left_pos =
|
||||
bintree_move_down_left(n).ok_or("left_pos not found".to_string())?;
|
||||
let right_pos = bintree_jump_right_sibling(left_pos);
|
||||
|
||||
// using get_from_file here for the children (they may have been "removed")
|
||||
|
@ -663,6 +661,7 @@ where
|
|||
/// but positions of a node within the PMMR will not match positions in the
|
||||
/// backend storage anymore. The PruneList accounts for that mismatch and does
|
||||
/// the position translation.
|
||||
#[derive(Default)]
|
||||
pub struct PruneList {
|
||||
/// Vector of pruned nodes positions
|
||||
pub pruned_nodes: Vec<u64>,
|
||||
|
@ -718,28 +717,24 @@ impl PruneList {
|
|||
let pruned_idx = self.next_pruned_idx(pos);
|
||||
let next_idx = self.pruned_nodes.binary_search(&pos).map(|x| x + 1).ok();
|
||||
|
||||
match pruned_idx.or(next_idx) {
|
||||
None => None,
|
||||
Some(idx) => {
|
||||
Some(
|
||||
// skip by the number of leaf nodes pruned in the preceeding subtrees
|
||||
// which just 2^height
|
||||
// except in the case of height==0
|
||||
// (where we want to treat the pruned tree as 0 leaves)
|
||||
self.pruned_nodes[0..(idx as usize)]
|
||||
.iter()
|
||||
.map(|n| {
|
||||
let height = bintree_postorder_height(*n);
|
||||
if height == 0 {
|
||||
0
|
||||
} else {
|
||||
(1 << height)
|
||||
}
|
||||
})
|
||||
.sum(),
|
||||
)
|
||||
}
|
||||
}
|
||||
let idx = pruned_idx.or(next_idx)?;
|
||||
Some(
|
||||
// skip by the number of leaf nodes pruned in the preceeding subtrees
|
||||
// which just 2^height
|
||||
// except in the case of height==0
|
||||
// (where we want to treat the pruned tree as 0 leaves)
|
||||
self.pruned_nodes[0..(idx as usize)]
|
||||
.iter()
|
||||
.map(|n| {
|
||||
let height = bintree_postorder_height(*n);
|
||||
if height == 0 {
|
||||
0
|
||||
} else {
|
||||
(1 << height)
|
||||
}
|
||||
})
|
||||
.sum(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Push the node at the provided position in the prune list. Compacts the
|
||||
|
@ -814,7 +809,7 @@ pub fn peaks(num: u64) -> Vec<u64> {
|
|||
// have for index a binary values with all 1s (i.e. 11, 111, 1111, etc.)
|
||||
let mut top = 1;
|
||||
while (top - 1) <= num {
|
||||
top = top << 1;
|
||||
top <<= 1;
|
||||
}
|
||||
top = (top >> 1) - 1;
|
||||
if top == 0 {
|
||||
|
@ -859,7 +854,7 @@ pub fn n_leaves(mut sz: u64) -> u64 {
|
|||
/// Returns the pmmr index of the nth inserted element
|
||||
pub fn insertion_to_pmmr_index(mut sz: u64) -> u64 {
|
||||
//1 based pmmrs
|
||||
sz = sz - 1;
|
||||
sz -= 1;
|
||||
2 * sz - sz.count_ones() as u64 + 1
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ use core::hash::Hash;
|
|||
use ser::{self, Readable, Reader, Writeable, Writer};
|
||||
|
||||
/// The difficulty is defined as the maximum target divided by the block hash.
|
||||
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Eq, Ord)]
|
||||
pub struct Difficulty {
|
||||
num: u64,
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ impl Difficulty {
|
|||
}
|
||||
|
||||
/// Converts the difficulty into a u64
|
||||
pub fn into_num(&self) -> u64 {
|
||||
pub fn to_num(&self) -> u64 {
|
||||
self.num
|
||||
}
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ impl Writeable for Difficulty {
|
|||
|
||||
impl Readable for Difficulty {
|
||||
fn read(reader: &mut Reader) -> Result<Difficulty, ser::Error> {
|
||||
let data = try!(reader.read_u64());
|
||||
let data = reader.read_u64()?;
|
||||
Ok(Difficulty { num: data })
|
||||
}
|
||||
}
|
||||
|
@ -155,7 +155,7 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
|
|||
E: de::Error,
|
||||
{
|
||||
let num_in = s.parse::<u64>();
|
||||
if let Err(_) = num_in {
|
||||
if num_in.is_err() {
|
||||
return Err(de::Error::invalid_value(
|
||||
de::Unexpected::Str(s),
|
||||
&"a value number",
|
||||
|
|
|
@ -271,9 +271,9 @@ impl Writeable for Transaction {
|
|||
let mut outputs = self.outputs.clone();
|
||||
let mut kernels = self.kernels.clone();
|
||||
|
||||
try!(inputs.write_sorted(writer));
|
||||
try!(outputs.write_sorted(writer));
|
||||
try!(kernels.write_sorted(writer));
|
||||
inputs.write_sorted(writer)?;
|
||||
outputs.write_sorted(writer)?;
|
||||
kernels.write_sorted(writer)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
@ -303,7 +303,6 @@ impl Readable for Transaction {
|
|||
inputs,
|
||||
outputs,
|
||||
kernels,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -494,7 +493,7 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
|
|||
|
||||
// we will sum these together at the end to give us the overall offset for the
|
||||
// transaction
|
||||
let mut kernel_offsets = vec![];
|
||||
let mut kernel_offsets: Vec<BlindingFactor> = vec![];
|
||||
|
||||
for mut transaction in transactions {
|
||||
// we will summ these later to give a single aggregate offset
|
||||
|
@ -511,8 +510,7 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
|
|||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
let mut keys = kernel_offsets
|
||||
.iter()
|
||||
.cloned()
|
||||
.into_iter()
|
||||
.filter(|x| *x != BlindingFactor::zero())
|
||||
.filter_map(|x| x.secret_key(&secp).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
@ -539,15 +537,13 @@ pub fn aggregate(transactions: Vec<Transaction>) -> Result<Transaction, Error> {
|
|||
let to_cut_through = in_set.intersection(&out_set).collect::<HashSet<_>>();
|
||||
|
||||
let mut new_inputs = inputs
|
||||
.iter()
|
||||
.into_iter()
|
||||
.filter(|inp| !to_cut_through.contains(&inp.commitment()))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut new_outputs = outputs
|
||||
.iter()
|
||||
.into_iter()
|
||||
.filter(|out| !to_cut_through.contains(&out.commitment()))
|
||||
.cloned()
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// sort them lexicographically
|
||||
|
@ -576,19 +572,19 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
|
|||
// transaction
|
||||
let mut kernel_offsets = vec![];
|
||||
|
||||
let tx = aggregate(txs).unwrap();
|
||||
let tx = aggregate(txs)?;
|
||||
|
||||
for mk_input in mk_tx.clone().inputs {
|
||||
for mk_input in mk_tx.inputs {
|
||||
if !tx.inputs.contains(&mk_input) && !inputs.contains(&mk_input) {
|
||||
inputs.push(mk_input);
|
||||
}
|
||||
}
|
||||
for mk_output in mk_tx.clone().outputs {
|
||||
for mk_output in mk_tx.outputs {
|
||||
if !tx.outputs.contains(&mk_output) && !outputs.contains(&mk_output) {
|
||||
outputs.push(mk_output);
|
||||
}
|
||||
}
|
||||
for mk_kernel in mk_tx.clone().kernels {
|
||||
for mk_kernel in mk_tx.kernels {
|
||||
if !tx.kernels.contains(&mk_kernel) && !kernels.contains(&mk_kernel) {
|
||||
kernels.push(mk_kernel);
|
||||
}
|
||||
|
@ -601,14 +597,12 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
|
|||
let secp = static_secp_instance();
|
||||
let secp = secp.lock().unwrap();
|
||||
let mut positive_key = vec![mk_tx.offset]
|
||||
.iter()
|
||||
.cloned()
|
||||
.into_iter()
|
||||
.filter(|x| *x != BlindingFactor::zero())
|
||||
.filter_map(|x| x.secret_key(&secp).ok())
|
||||
.collect::<Vec<_>>();
|
||||
let mut negative_keys = kernel_offsets
|
||||
.iter()
|
||||
.cloned()
|
||||
.into_iter()
|
||||
.filter(|x| *x != BlindingFactor::zero())
|
||||
.filter_map(|x| x.secret_key(&secp).ok())
|
||||
.collect::<Vec<_>>();
|
||||
|
@ -626,9 +620,10 @@ pub fn deaggregate(mk_tx: Transaction, txs: Vec<Transaction>) -> Result<Transact
|
|||
outputs.sort();
|
||||
kernels.sort();
|
||||
|
||||
let tx = Transaction::new(inputs, outputs, kernels);
|
||||
let mut tx = Transaction::new(inputs, outputs, kernels);
|
||||
tx.offset = total_kernel_offset;
|
||||
|
||||
Ok(tx.with_offset(total_kernel_offset))
|
||||
Ok(tx)
|
||||
}
|
||||
|
||||
/// A transaction input.
|
||||
|
@ -728,14 +723,13 @@ impl Input {
|
|||
/// identify the output. Specifically the block hash (to correctly
|
||||
/// calculate lock_height for coinbase outputs).
|
||||
pub fn commitment(&self) -> Commitment {
|
||||
self.commit.clone()
|
||||
self.commit
|
||||
}
|
||||
|
||||
/// Convenience functon to return the (optional) block_hash for this input.
|
||||
/// Will return the default hash if we do not have one.
|
||||
pub fn block_hash(&self) -> Hash {
|
||||
let block_hash = self.block_hash.clone();
|
||||
block_hash.unwrap_or(Hash::default())
|
||||
self.block_hash.unwrap_or_else(Hash::default)
|
||||
}
|
||||
|
||||
/// Convenience function to return the (optional) merkle_proof for this
|
||||
|
@ -744,7 +738,7 @@ impl Input {
|
|||
/// coinbase outputs.
|
||||
pub fn merkle_proof(&self) -> MerkleProof {
|
||||
let merkle_proof = self.merkle_proof.clone();
|
||||
merkle_proof.unwrap_or(MerkleProof::empty())
|
||||
merkle_proof.unwrap_or_else(MerkleProof::empty)
|
||||
}
|
||||
|
||||
/// Verify the maturity of an output being spent by an input.
|
||||
|
@ -915,7 +909,7 @@ impl OutputIdentifier {
|
|||
/// Build a new output_identifier.
|
||||
pub fn new(features: OutputFeatures, commit: &Commitment) -> OutputIdentifier {
|
||||
OutputIdentifier {
|
||||
features: features.clone(),
|
||||
features: features,
|
||||
commit: commit.clone(),
|
||||
}
|
||||
}
|
||||
|
@ -929,7 +923,7 @@ impl OutputIdentifier {
|
|||
}
|
||||
|
||||
/// Converts this identifier to a full output, provided a RangeProof
|
||||
pub fn to_output(self, proof: RangeProof) -> Output {
|
||||
pub fn into_output(self, proof: RangeProof) -> Output {
|
||||
Output {
|
||||
features: self.features,
|
||||
commit: self.commit,
|
||||
|
@ -1082,7 +1076,7 @@ impl ProofMessageElements {
|
|||
|
||||
/// Deserialise and return the message elements
|
||||
pub fn from_proof_message(
|
||||
proof_message: ProofMessage,
|
||||
proof_message: &ProofMessage,
|
||||
) -> Result<ProofMessageElements, ser::Error> {
|
||||
let mut c = Cursor::new(proof_message.as_bytes());
|
||||
ser::deserialize::<ProofMessageElements>(&mut c)
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
//! simple miner is included, mostly for testing purposes. John Tromp's Tomato
|
||||
//! miner will be much faster in almost every environment.
|
||||
|
||||
use std::collections::HashSet;
|
||||
use std::cmp;
|
||||
use std::collections::HashSet;
|
||||
|
||||
use blake2;
|
||||
|
||||
|
@ -93,9 +93,9 @@ impl Cuckoo {
|
|||
pub fn verify(&self, proof: Proof, ease: u64) -> bool {
|
||||
let easiness = ease * (self.size as u64) / 100;
|
||||
let nonces = proof.to_u64s();
|
||||
let mut us = vec![0; proof.proof_size];
|
||||
let mut vs = vec![0; proof.proof_size];
|
||||
for n in 0..proof.proof_size {
|
||||
let mut us = vec![0; proof.proof_size()];
|
||||
let mut vs = vec![0; proof.proof_size()];
|
||||
for n in 0..proof.proof_size() {
|
||||
if nonces[n] >= easiness || (n != 0 && nonces[n] <= nonces[n - 1]) {
|
||||
return false;
|
||||
}
|
||||
|
@ -103,10 +103,10 @@ impl Cuckoo {
|
|||
vs[n] = self.new_node(nonces[n], 1);
|
||||
}
|
||||
let mut i = 0;
|
||||
let mut count = proof.proof_size;
|
||||
let mut count = proof.proof_size();
|
||||
loop {
|
||||
let mut j = i;
|
||||
for k in 0..proof.proof_size {
|
||||
for k in 0..proof.proof_size() {
|
||||
// find unique other j with same vs[j]
|
||||
if k != i && vs[k] == vs[i] {
|
||||
if j != i {
|
||||
|
@ -119,7 +119,7 @@ impl Cuckoo {
|
|||
return false;
|
||||
}
|
||||
i = j;
|
||||
for k in 0..proof.proof_size {
|
||||
for k in 0..proof.proof_size() {
|
||||
// find unique other i with same us[i]
|
||||
if k != j && us[k] == us[j] {
|
||||
if i != j {
|
||||
|
@ -361,8 +361,8 @@ mod test {
|
|||
assert!(Cuckoo::new(&[51], 20).verify(Proof::new(V3.to_vec().clone()), 70));
|
||||
}
|
||||
|
||||
/// Just going to disable this for now, as it's painful to try and get a valid
|
||||
/// cuckoo28 vector (TBD: 30 is more relevant now anyhow)
|
||||
/// Just going to disable this for now, as it's painful to try and get a
|
||||
/// valid cuckoo28 vector (TBD: 30 is more relevant now anyhow)
|
||||
#[test]
|
||||
#[ignore]
|
||||
fn validate28_vectors() {
|
||||
|
|
|
@ -35,14 +35,14 @@ extern crate time;
|
|||
|
||||
extern crate grin_util as util;
|
||||
|
||||
mod siphash;
|
||||
pub mod cuckoo;
|
||||
mod siphash;
|
||||
|
||||
use consensus;
|
||||
use core::{Block, BlockHeader};
|
||||
use core::target::Difficulty;
|
||||
use global;
|
||||
use core::{Block, BlockHeader};
|
||||
use genesis;
|
||||
use global;
|
||||
use pow::cuckoo::{Cuckoo, Error};
|
||||
|
||||
/// Validates the proof of work of a given header, and that the proof of work
|
||||
|
@ -97,7 +97,7 @@ pub fn pow_size(
|
|||
if let Ok(proof) =
|
||||
cuckoo::Miner::new(&pow_hash[..], consensus::EASINESS, proof_size, sz).mine()
|
||||
{
|
||||
if proof.clone().to_difficulty() >= diff {
|
||||
if proof.to_difficulty() >= diff {
|
||||
bh.pow = proof.clone();
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -117,9 +117,9 @@ pub fn pow_size(
|
|||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
use global;
|
||||
use core::target::Difficulty;
|
||||
use genesis;
|
||||
use global;
|
||||
|
||||
/// We'll be generating genesis blocks differently
|
||||
#[ignore]
|
||||
|
@ -134,7 +134,7 @@ mod test {
|
|||
global::sizeshift(),
|
||||
).unwrap();
|
||||
assert!(b.header.nonce != 310);
|
||||
assert!(b.header.pow.clone().to_difficulty() >= Difficulty::one());
|
||||
assert!(b.header.pow.to_difficulty() >= Difficulty::one());
|
||||
assert!(verify_size(&b.header, global::sizeshift()));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -24,30 +24,30 @@ pub fn siphash24(v: [u64; 4], nonce: u64) -> u64 {
|
|||
|
||||
// macro for left rotation
|
||||
macro_rules! rotl {
|
||||
($num:ident, $shift:expr) => {
|
||||
$num = ($num << $shift) | ($num >> (64 - $shift));
|
||||
}
|
||||
}
|
||||
($num:ident, $shift:expr) => {
|
||||
$num = ($num << $shift) | ($num >> (64 - $shift));
|
||||
};
|
||||
}
|
||||
|
||||
// macro for a single siphash round
|
||||
macro_rules! round {
|
||||
() => {
|
||||
v0 = v0.wrapping_add(v1);
|
||||
v2 = v2.wrapping_add(v3);
|
||||
rotl!(v1, 13);
|
||||
rotl!(v3, 16);
|
||||
v1 ^= v0;
|
||||
v3 ^= v2;
|
||||
rotl!(v0, 32);
|
||||
v2 = v2.wrapping_add(v1);
|
||||
v0 = v0.wrapping_add(v3);
|
||||
rotl!(v1, 17);
|
||||
rotl!(v3, 21);
|
||||
v1 ^= v2;
|
||||
v3 ^= v0;
|
||||
rotl!(v2, 32);
|
||||
}
|
||||
}
|
||||
() => {
|
||||
v0 = v0.wrapping_add(v1);
|
||||
v2 = v2.wrapping_add(v3);
|
||||
rotl!(v1, 13);
|
||||
rotl!(v3, 16);
|
||||
v1 ^= v0;
|
||||
v3 ^= v2;
|
||||
rotl!(v0, 32);
|
||||
v2 = v2.wrapping_add(v1);
|
||||
v0 = v0.wrapping_add(v3);
|
||||
rotl!(v1, 17);
|
||||
rotl!(v3, 21);
|
||||
v1 ^= v2;
|
||||
v3 ^= v0;
|
||||
rotl!(v2, 32);
|
||||
};
|
||||
}
|
||||
|
||||
// 2 rounds
|
||||
round!();
|
||||
|
|
|
@ -19,8 +19,8 @@ use std::sync::{Arc, RwLock};
|
|||
use rand::Rng;
|
||||
use rand::os::OsRng;
|
||||
|
||||
use core::core::target::Difficulty;
|
||||
use core::core::hash::Hash;
|
||||
use core::core::target::Difficulty;
|
||||
use msg::*;
|
||||
use peer::Peer;
|
||||
use types::*;
|
||||
|
@ -35,7 +35,8 @@ pub struct Handshake {
|
|||
/// a node id.
|
||||
nonces: Arc<RwLock<VecDeque<u64>>>,
|
||||
/// The genesis block header of the chain seen by this node.
|
||||
/// We only want to connect to other nodes seeing the same chain (forks are ok).
|
||||
/// We only want to connect to other nodes seeing the same chain (forks are
|
||||
/// ok).
|
||||
genesis: Hash,
|
||||
config: P2PConfig,
|
||||
}
|
||||
|
@ -111,7 +112,7 @@ impl Handshake {
|
|||
debug!(
|
||||
LOGGER,
|
||||
"Connected! Cumulative {} offered from {:?} {:?} {:?}",
|
||||
peer_info.total_difficulty.into_num(),
|
||||
peer_info.total_difficulty.to_num(),
|
||||
peer_info.addr,
|
||||
peer_info.user_agent,
|
||||
peer_info.capabilities
|
||||
|
|
|
@ -686,7 +686,7 @@ impl NetAdapter for Peers {
|
|||
);
|
||||
}
|
||||
|
||||
if diff.into_num() > 0 {
|
||||
if diff.to_num() > 0 {
|
||||
if let Some(peer) = self.get_connected_peer(&addr) {
|
||||
let mut peer = peer.write().unwrap();
|
||||
peer.info.total_difficulty = diff;
|
||||
|
|
|
@ -61,7 +61,7 @@ fn peer_handshake() {
|
|||
p2p_config.clone(),
|
||||
dandelion_config.clone(),
|
||||
net_adapter.clone(),
|
||||
Hash::from_vec(vec![]),
|
||||
Hash::from_vec(&vec![]),
|
||||
Arc::new(AtomicBool::new(false)),
|
||||
false,
|
||||
None,
|
||||
|
@ -82,7 +82,7 @@ fn peer_handshake() {
|
|||
p2p::Capabilities::UNKNOWN,
|
||||
Difficulty::one(),
|
||||
my_addr,
|
||||
&p2p::handshake::Handshake::new(Hash::from_vec(vec![]), p2p_config.clone()),
|
||||
&p2p::handshake::Handshake::new(Hash::from_vec(&vec![]), p2p_config.clone()),
|
||||
net_adapter,
|
||||
).unwrap();
|
||||
|
||||
|
|
|
@ -15,8 +15,8 @@
|
|||
//! Server stat collection types, to be used by tests, logging or GUI/TUI
|
||||
//! to collect information about server status
|
||||
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use chain;
|
||||
|
@ -165,7 +165,7 @@ impl PeerStats {
|
|||
state: state.to_string(),
|
||||
addr: addr,
|
||||
version: peer.info.version,
|
||||
total_difficulty: peer.info.total_difficulty.into_num(),
|
||||
total_difficulty: peer.info.total_difficulty.to_num(),
|
||||
height: peer.info.height,
|
||||
direction: direction.to_string(),
|
||||
}
|
||||
|
|
|
@ -376,7 +376,7 @@ impl Server {
|
|||
last_time = time;
|
||||
DiffBlock {
|
||||
block_number: height,
|
||||
difficulty: diff.into_num(),
|
||||
difficulty: diff.to_num(),
|
||||
time: time,
|
||||
duration: dur,
|
||||
}
|
||||
|
|
|
@ -12,10 +12,10 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{thread, cmp};
|
||||
use std::time::Duration;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::time::Duration;
|
||||
use std::{cmp, thread};
|
||||
use time;
|
||||
|
||||
use chain;
|
||||
|
@ -296,7 +296,7 @@ fn needs_syncing(
|
|||
info!(
|
||||
LOGGER,
|
||||
"synchronised at {} @ {} [{}]",
|
||||
local_diff.into_num(),
|
||||
local_diff.to_num(),
|
||||
ch.height,
|
||||
ch.last_block_h
|
||||
);
|
||||
|
@ -338,8 +338,8 @@ fn needs_syncing(
|
|||
}
|
||||
|
||||
/// We build a locator based on sync_head.
|
||||
/// Even if sync_head is significantly out of date we will "reset" it once we start getting
|
||||
/// headers back from a peer.
|
||||
/// Even if sync_head is significantly out of date we will "reset" it once we
|
||||
/// start getting headers back from a peer.
|
||||
///
|
||||
/// TODO - this gets *expensive* with a large header chain to iterate over
|
||||
/// as we need to get each block header from the db
|
||||
|
|
|
@ -181,15 +181,14 @@ fn build_block(
|
|||
b.header.nonce = rng.gen();
|
||||
b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0));
|
||||
|
||||
let b_difficulty =
|
||||
(b.header.total_difficulty.clone() - head.total_difficulty.clone()).into_num();
|
||||
let b_difficulty = (b.header.total_difficulty.clone() - head.total_difficulty.clone()).to_num();
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
|
||||
b.inputs.len(),
|
||||
b.outputs.len(),
|
||||
b_difficulty,
|
||||
b.header.clone().total_difficulty.clone().into_num(),
|
||||
b.header.clone().total_difficulty.to_num(),
|
||||
);
|
||||
|
||||
let roots_result = chain.set_txhashset_roots(&mut b, false);
|
||||
|
|
|
@ -446,7 +446,6 @@ impl StratumServer {
|
|||
// Reconstruct the block header with this nonce and pow added
|
||||
b = self.current_block.clone();
|
||||
b.header.nonce = submit_params.nonce;
|
||||
b.header.pow.proof_size = submit_params.pow.len();
|
||||
b.header.pow.nonces = submit_params.pow;
|
||||
let res = self.chain.process_block(b.clone(), chain::Options::MINE);
|
||||
if let Err(e) = res {
|
||||
|
@ -644,7 +643,7 @@ impl StratumServer {
|
|||
self.current_block = new_block;
|
||||
self.current_difficulty = (self.current_block.header.total_difficulty.clone()
|
||||
- head.total_difficulty.clone())
|
||||
.into_num();
|
||||
.to_num();
|
||||
self.current_key_id = block_fees.key_id();
|
||||
current_hash = latest_hash;
|
||||
// set a new deadline for rebuilding with fresh transactions
|
||||
|
|
|
@ -17,23 +17,23 @@
|
|||
//! header with its proof-of-work. Any valid mined blocks are submitted to the
|
||||
//! network.
|
||||
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use time;
|
||||
|
||||
use common::adapters::PoolToChainAdapter;
|
||||
use common::types::StratumServerConfig;
|
||||
use core::consensus;
|
||||
use core::core::Proof;
|
||||
use core::core::{Block, BlockHeader};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::{Block, BlockHeader};
|
||||
use core::pow::cuckoo;
|
||||
use common::types::StratumServerConfig;
|
||||
use util::LOGGER;
|
||||
|
||||
use chain;
|
||||
use pool;
|
||||
use mining::mine_block;
|
||||
use core::global;
|
||||
use mining::mine_block;
|
||||
use pool;
|
||||
|
||||
// Max number of transactions this miner will assemble in a block
|
||||
const MAX_TX: u32 = 5000;
|
||||
|
@ -108,7 +108,7 @@ impl Miner {
|
|||
global::sizeshift(),
|
||||
).mine()
|
||||
{
|
||||
let proof_diff = proof.clone().to_difficulty();
|
||||
let proof_diff = proof.to_difficulty();
|
||||
if proof_diff >= (b.header.total_difficulty.clone() - head.total_difficulty.clone())
|
||||
{
|
||||
sol = Some(proof);
|
||||
|
|
|
@ -137,7 +137,7 @@ fn find_outputs_with_key<T: WalletBackend>(
|
|||
None,
|
||||
output.range_proof().unwrap(),
|
||||
).unwrap();
|
||||
let message = ProofMessageElements::from_proof_message(info.message).unwrap();
|
||||
let message = ProofMessageElements::from_proof_message(&info.message).unwrap();
|
||||
let value = message.value();
|
||||
if value.is_err() {
|
||||
continue;
|
||||
|
@ -175,7 +175,7 @@ fn find_outputs_with_key<T: WalletBackend>(
|
|||
None,
|
||||
output.range_proof().unwrap(),
|
||||
).unwrap();
|
||||
let message = ProofMessageElements::from_proof_message(info.message).unwrap();
|
||||
let message = ProofMessageElements::from_proof_message(&info.message).unwrap();
|
||||
let value = message.value();
|
||||
if value.is_err() || !message.zeroes_correct() {
|
||||
continue;
|
||||
|
|
Loading…
Add table
Reference in a new issue