Accept multiple Cuckoo graph sizes ()

* Added sizeshift to proof of work and encoding as VLQ
* Fix some loose ends to get things to compile
* Rename sizehift to min_sizeshift
  * Pipeline checks for PoW Cuckoo Cycle size shift and adjustment
factor before difficulty comparison.
  * Working delta+VLQ encoding of u64 PoW, unfortunately still a
little larger than u32, at least for Cuckoo30.
* Changed binary encoding of proof of work to a sequence of exact
compact bit representation of each nonce. Somewhat simpler,
shorter and matches the data to be hashed.
* Few fixes based on @tromp feedback:
* Max nonce is 2^(N-1)
* Need a separate constant for reference sizeshift
* Cuckoo implementation now conserves sizeshift. Test providing a
block at higher sizeshift.
* Last small overflow protection
This commit is contained in:
Ignotus Peverell 2018-06-29 18:41:28 +01:00 committed by GitHub
parent 1df409fa69
commit 1398e0bf85
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
28 changed files with 214 additions and 141 deletions

View file

@ -230,20 +230,17 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
if !ctx.opts.contains(Options::SKIP_POW) {
let n = global::sizeshift();
if !(ctx.pow_verifier)(header, n) {
if global::min_sizeshift() > header.pow.cuckoo_sizeshift {
return Err(Error::LowSizeshift);
}
if !(ctx.pow_verifier)(header, header.pow.cuckoo_sizeshift) {
error!(
LOGGER,
"pipe: validate_header failed for cuckoo shift size {}", n
"pipe: validate_header failed for cuckoo shift size {}",
header.pow.cuckoo_sizeshift,
);
return Err(Error::InvalidPow);
}
if header.height % 500 == 0 {
debug!(
LOGGER,
"Validating header validated, using cuckoo shift size {}", n
);
}
}
// first I/O cost, better as late as possible

View file

@ -64,6 +64,8 @@ pub enum Error {
DifficultyTooLow,
/// Addition of difficulties on all previous block is wrong
WrongTotalDifficulty,
/// Block header sizeshift is lower than our min
LowSizeshift,
/// The proof of work is invalid
InvalidPow,
/// The block doesn't sum correctly or a tx signature is invalid

View file

@ -87,7 +87,7 @@ fn data_files() {
&mut b.header,
difficulty,
global::proofsize(),
global::sizeshift(),
global::min_sizeshift(),
).unwrap();
let _bhash = b.hash();

View file

@ -67,12 +67,19 @@ fn mine_empty_chain() {
chain.set_txhashset_roots(&mut b, false).unwrap();
let sizeshift = if n == 2 {
global::min_sizeshift() + 1
} else {
global::min_sizeshift()
};
b.header.pow.cuckoo_sizeshift = sizeshift;
pow::pow_size(
&mut b.header,
difficulty,
global::proofsize(),
global::sizeshift(),
sizeshift,
).unwrap();
b.header.pow.cuckoo_sizeshift = sizeshift;
let bhash = b.hash();
chain.process_block(b, chain::Options::MINE).unwrap();

View file

@ -73,7 +73,7 @@ fn test_coinbase_maturity() {
&mut block.header,
difficulty,
global::proofsize(),
global::sizeshift(),
global::min_sizeshift(),
).unwrap();
assert_eq!(block.outputs.len(), 1);
@ -134,7 +134,7 @@ fn test_coinbase_maturity() {
&mut block.header,
difficulty,
global::proofsize(),
global::sizeshift(),
global::min_sizeshift(),
).unwrap();
// mine enough blocks to increase the height sufficiently for
@ -157,7 +157,7 @@ fn test_coinbase_maturity() {
&mut block.header,
difficulty,
global::proofsize(),
global::sizeshift(),
global::min_sizeshift(),
).unwrap();
chain.process_block(block, chain::Options::MINE).unwrap();
@ -184,7 +184,7 @@ fn test_coinbase_maturity() {
&mut block.header,
difficulty,
global::proofsize(),
global::sizeshift(),
global::min_sizeshift(),
).unwrap();
let result = chain.process_block(block, chain::Options::MINE);

View file

@ -54,7 +54,11 @@ pub const BLOCK_TIME_SEC: u64 = 60;
pub const PROOFSIZE: usize = 42;
/// Default Cuckoo Cycle size shift used for mining and validating.
pub const DEFAULT_SIZESHIFT: u8 = 30;
pub const DEFAULT_MIN_SIZESHIFT: u8 = 30;
/// Original reference sizeshift to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const REFERENCE_SIZESHIFT: u8 = 30;
/// Default Cuckoo Cycle easiness, high enough to have good likeliness to find
/// a solution.

View file

@ -37,18 +37,28 @@ pub use self::id::ShortId;
pub use self::transaction::*;
use core::hash::Hashed;
use global;
use ser::{Error, Readable, Reader, Writeable, Writer};
use ser::{self, Error, Readable, Reader, Writeable, Writer};
/// Proof of work
/// A Cuckoo Cycle proof of work, consisting of the shift to get the graph
/// size (i.e. 31 for Cuckoo31 with a 2^31 or 1<<31 graph size) and the nonces
/// of the graph solution. While being expressed as u64 for simplicity, each
/// nonce is strictly less than half the cycle size (i.e. <2^30 for Cuckoo 31).
///
/// The hash of the `Proof` is the hash of its packed nonces when serializing
/// them at their exact bit size. The resulting bit sequence is padded to be
/// byte-aligned.
///
#[derive(Clone, PartialOrd, PartialEq)]
pub struct Proof {
/// Power of 2 used for the size of the cuckoo graph
pub cuckoo_sizeshift: u8,
/// The nonces
pub nonces: Vec<u32>,
pub nonces: Vec<u64>,
}
impl fmt::Debug for Proof {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Cuckoo(")?;
write!(f, "Cuckoo{}(", self.cuckoo_sizeshift)?;
for (i, val) in self.nonces[..].iter().enumerate() {
write!(f, "{:x}", val)?;
if i < self.nonces.len() - 1 {
@ -62,14 +72,19 @@ impl fmt::Debug for Proof {
impl Eq for Proof {}
impl Proof {
/// Builds a proof with all bytes zeroed out
pub fn new(in_nonces: Vec<u32>) -> Proof {
Proof { nonces: in_nonces }
/// Builds a proof with provided nonces at default sizeshift
pub fn new(mut in_nonces: Vec<u64>) -> Proof {
in_nonces.sort();
Proof {
cuckoo_sizeshift: global::min_sizeshift(),
nonces: in_nonces,
}
}
/// Builds a proof with all bytes zeroed out
pub fn zero(proof_size: usize) -> Proof {
Proof {
cuckoo_sizeshift: global::min_sizeshift(),
nonces: vec![0; proof_size],
}
}
@ -78,32 +93,25 @@ impl Proof {
/// needed so that tests that ignore POW
/// don't fail due to duplicate hashes
pub fn random(proof_size: usize) -> Proof {
let sizeshift = global::min_sizeshift();
let nonce_mask = (1 << (sizeshift - 1)) - 1;
let mut rng = thread_rng();
let v: Vec<u32> = iter::repeat(())
.map(|()| rng.gen())
// force the random num to be within sizeshift bits
let mut v: Vec<u64> = iter::repeat(())
.map(|()| (rng.gen::<u32>() & nonce_mask) as u64)
.take(proof_size)
.collect();
Proof { nonces: v }
}
/// Converts the proof to a vector of u64s
pub fn to_u64s(&self) -> Vec<u64> {
let mut out_nonces = Vec::with_capacity(self.proof_size());
for n in &self.nonces {
out_nonces.push(*n as u64);
v.sort();
Proof {
cuckoo_sizeshift: global::min_sizeshift(),
nonces: v,
}
out_nonces
}
/// Converts the proof to a vector of u32s
pub fn to_u32s(&self) -> Vec<u32> {
self.clone().nonces
}
/// Converts the proof to a proof-of-work Target so they can be compared.
/// Hashes the Cuckoo Proof data.
pub fn to_difficulty(&self) -> target::Difficulty {
target::Difficulty::from_hash(&self.hash())
target::Difficulty::from_hash_and_shift(&self.hash(), self.cuckoo_sizeshift)
}
/// Returns the proof size
@ -114,24 +122,76 @@ impl Proof {
impl Readable for Proof {
fn read(reader: &mut Reader) -> Result<Proof, Error> {
let proof_size = global::proofsize();
let mut pow = vec![0u32; proof_size];
for n in 0..proof_size {
pow[n] = reader.read_u32()?;
let cuckoo_sizeshift = reader.read_u8()?;
let mut nonces = Vec::with_capacity(global::proofsize());
let nonce_bits = cuckoo_sizeshift as usize - 1;
let bytes_len = BitVec::bytes_len(nonce_bits * global::proofsize());
let bits = reader.read_fixed_bytes(bytes_len)?;
let bitvec = BitVec { bits };
for n in 0..global::proofsize() {
let mut nonce = 0;
for bit in 0..nonce_bits {
if bitvec.bit_at(n * nonce_bits + (bit as usize)) {
nonce |= 1 << bit;
}
}
nonces.push(nonce);
}
Ok(Proof::new(pow))
Ok(Proof {
cuckoo_sizeshift,
nonces,
})
}
}
impl Writeable for Proof {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), Error> {
for n in 0..self.proof_size() {
writer.write_u32(self.nonces[n])?;
if writer.serialization_mode() != ser::SerializationMode::Hash {
writer.write_u8(self.cuckoo_sizeshift)?;
}
let nonce_bits = self.cuckoo_sizeshift as usize - 1;
let mut bitvec = BitVec::new(nonce_bits * global::proofsize());
for (n, nonce) in self.nonces.iter().enumerate() {
for bit in 0..nonce_bits {
if nonce & (1 << bit) != 0 {
bitvec.set_bit_at(n * nonce_bits + (bit as usize))
}
}
}
writer.write_fixed_bytes(&bitvec.bits)?;
Ok(())
}
}
// TODO this could likely be optimized by writing whole bytes (or even words)
// in the `BitVec` at once, dealing with the truncation, instead of bits by bits
struct BitVec {
bits: Vec<u8>,
}
impl BitVec {
/// Number of bytes required to store the provided number of bits
fn bytes_len(bits_len: usize) -> usize {
(bits_len + 7) / 8
}
fn new(bits_len: usize) -> BitVec {
BitVec {
bits: vec![0; BitVec::bytes_len(bits_len)],
}
}
fn set_bit_at(&mut self, pos: usize) {
self.bits[pos / 8] |= 1 << (pos % 8) as u8;
}
fn bit_at(&self, pos: usize) -> bool {
self.bits[pos / 8] & (1 << (pos % 8) as u8) != 0
}
}
/// Common method for parsing an amount from human-readable, and converting
/// to internally-compatible u64

View file

@ -25,6 +25,7 @@ use std::ops::{Add, Div, Mul, Sub};
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use std::cmp::max;
use core::global;
use core::hash::Hash;
use ser::{self, Readable, Reader, Writeable, Writer};
@ -53,12 +54,16 @@ impl Difficulty {
}
/// Computes the difficulty from a hash. Divides the maximum target by the
/// provided hash.
pub fn from_hash(h: &Hash) -> Difficulty {
/// provided hash and applies the Cuckoo sizeshift adjustment factor (see
/// https://lists.launchpad.net/mimblewimble/msg00494.html).
pub fn from_hash_and_shift(h: &Hash, shift: u8) -> Difficulty {
let max_target = <u64>::max_value();
let num = h.to_u64();
// Adjust the difficulty based on a 2^(N-M)*(N-1) factor, with M being
// the minimum sizeshift and N the provided sizeshift
let adjust_factor = (1 << (shift - global::ref_sizeshift()) as u64) * (shift as u64 - 1);
Difficulty {
num: max_target / max(num, 1),
num: (max_target / max(num, adjust_factor)) * adjust_factor,
}
}

View file

@ -17,8 +17,9 @@
//! should be used sparingly.
use consensus::TargetError;
use consensus::{BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DEFAULT_SIZESHIFT,
DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, MEDIAN_TIME_WINDOW, PROOFSIZE};
use consensus::{BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DEFAULT_MIN_SIZESHIFT,
DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, MEDIAN_TIME_WINDOW, PROOFSIZE,
REFERENCE_SIZESHIFT};
use core::target::Difficulty;
/// An enum collecting sets of parameters used throughout the
/// code wherever mining is needed. This should allow for
@ -30,13 +31,13 @@ use std::sync::RwLock;
/// by users
/// Automated testing sizeshift
pub const AUTOMATED_TESTING_SIZESHIFT: u8 = 10;
pub const AUTOMATED_TESTING_MIN_SIZESHIFT: u8 = 10;
/// Automated testing proof size
pub const AUTOMATED_TESTING_PROOF_SIZE: usize = 4;
/// User testing sizeshift
pub const USER_TESTING_SIZESHIFT: u8 = 16;
pub const USER_TESTING_MIN_SIZESHIFT: u8 = 16;
/// User testing proof size
pub const USER_TESTING_PROOF_SIZE: usize = 42;
@ -94,15 +95,29 @@ pub fn set_mining_mode(mode: ChainTypes) {
*param_ref = mode;
}
/// The sizeshift
pub fn sizeshift() -> u8 {
/// The minimum acceptable sizeshift
pub fn min_sizeshift() -> u8 {
let param_ref = CHAIN_TYPE.read().unwrap();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_SIZESHIFT,
ChainTypes::UserTesting => USER_TESTING_SIZESHIFT,
ChainTypes::Testnet1 => USER_TESTING_SIZESHIFT,
ChainTypes::Testnet2 => DEFAULT_SIZESHIFT,
ChainTypes::Mainnet => DEFAULT_SIZESHIFT,
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_SIZESHIFT,
ChainTypes::UserTesting => USER_TESTING_MIN_SIZESHIFT,
ChainTypes::Testnet1 => USER_TESTING_MIN_SIZESHIFT,
ChainTypes::Testnet2 => DEFAULT_MIN_SIZESHIFT,
ChainTypes::Mainnet => DEFAULT_MIN_SIZESHIFT,
}
}
/// Reference sizeshift used to compute factor on higher Cuckoo graph sizes,
/// while the min_sizeshift can be changed on a soft fork, changing
/// ref_sizeshift is a hard fork.
pub fn ref_sizeshift() -> u8 {
let param_ref = CHAIN_TYPE.read().unwrap();
match *param_ref {
ChainTypes::AutomatedTesting => AUTOMATED_TESTING_MIN_SIZESHIFT,
ChainTypes::UserTesting => USER_TESTING_MIN_SIZESHIFT,
ChainTypes::Testnet1 => USER_TESTING_MIN_SIZESHIFT,
ChainTypes::Testnet2 => REFERENCE_SIZESHIFT,
ChainTypes::Mainnet => REFERENCE_SIZESHIFT,
}
}

View file

@ -87,9 +87,9 @@ impl Cuckoo {
/// Assuming increasing nonces all smaller than easiness, verifies the
/// nonces form a cycle in a Cuckoo graph. Each nonce generates an edge, we
/// build the nodes on both side of that edge and count the connections.
pub fn verify(&self, proof: Proof, ease: u64) -> bool {
pub fn verify(&self, proof: &Proof, ease: u64) -> bool {
let easiness = ease * (self.size as u64) / 100;
let nonces = proof.to_u64s();
let nonces = &proof.nonces;
let mut us = vec![0; proof.proof_size()];
let mut vs = vec![0; proof.proof_size()];
for n in 0..proof.proof_size() {
@ -146,6 +146,7 @@ pub struct Miner {
proof_size: usize,
cuckoo: Cuckoo,
graph: Vec<u32>,
sizeshift: u8,
}
/// What type of cycle we have found?
@ -170,12 +171,7 @@ impl Miner {
let size = 1 << sizeshift;
let graph = vec![0; size + 1];
let easiness = (ease as u64) * (size as u64) / 100;
Miner {
easiness: easiness,
cuckoo: cuckoo,
graph: graph,
proof_size: proof_size,
}
Miner{easiness, cuckoo, graph, proof_size, sizeshift}
}
/// Searches for a solution
@ -196,7 +192,9 @@ impl Miner {
let sol = self.find_sol(nu, &us, nv, &vs);
match sol {
CycleSol::ValidProof(res) => {
return Ok(Proof::new(res.to_vec()));
let mut proof = Proof::new(map_vec!(res.to_vec(), |&n| n as u64));
proof.cuckoo_sizeshift = self.sizeshift;
return Ok(proof);
}
CycleSol::InvalidCycle(_) => continue,
CycleSol::NoCycle => {
@ -311,21 +309,21 @@ mod test {
use blake2;
use core::Proof;
static V1: [u32; 42] = [
static V1: [u64; 42] = [
0x3bbd, 0x4e96, 0x1013b, 0x1172b, 0x1371b, 0x13e6a, 0x1aaa6, 0x1b575, 0x1e237, 0x1ee88,
0x22f94, 0x24223, 0x25b4f, 0x2e9f3, 0x33b49, 0x34063, 0x3454a, 0x3c081, 0x3d08e, 0x3d863,
0x4285a, 0x42f22, 0x43122, 0x4b853, 0x4cd0c, 0x4f280, 0x557d5, 0x562cf, 0x58e59, 0x59a62,
0x5b568, 0x644b9, 0x657e9, 0x66337, 0x6821c, 0x7866f, 0x7e14b, 0x7ec7c, 0x7eed7, 0x80643,
0x8628c, 0x8949e,
];
static V2: [u32; 42] = [
static V2: [u64; 42] = [
0x5e3a, 0x8a8b, 0x103d8, 0x1374b, 0x14780, 0x16110, 0x1b571, 0x1c351, 0x1c826, 0x28228,
0x2909f, 0x29516, 0x2c1c4, 0x334eb, 0x34cdd, 0x38a2c, 0x3ad23, 0x45ac5, 0x46afe, 0x50f43,
0x51ed6, 0x52ddd, 0x54a82, 0x5a46b, 0x5dbdb, 0x60f6f, 0x60fcd, 0x61c78, 0x63899, 0x64dab,
0x6affc, 0x6b569, 0x72639, 0x73987, 0x78806, 0x7b98e, 0x7c7d7, 0x7ddd4, 0x7fa88, 0x8277c,
0x832d9, 0x8ba6f,
];
static V3: [u32; 42] = [
static V3: [u64; 42] = [
0x308b, 0x9004, 0x91fc, 0x983e, 0x9d67, 0xa293, 0xb4cb, 0xb6c8, 0xccc8, 0xdddc, 0xf04d,
0x1372f, 0x16ec9, 0x17b61, 0x17d03, 0x1e3bc, 0x1fb0f, 0x29e6e, 0x2a2ca, 0x2a719, 0x3a078,
0x3b7cc, 0x3c71d, 0x40daa, 0x43e17, 0x46adc, 0x4b359, 0x4c3aa, 0x4ce92, 0x4d06e, 0x51140,
@ -333,7 +331,7 @@ mod test {
0x7f400,
];
// cuckoo28 at 50% edges of letter 'u'
static V4: [u32; 42] = [
static V4: [u64; 42] = [
0xf7243, 0x11f130, 0x193812, 0x23b565, 0x279ac3, 0x69b270, 0xe0778f, 0xef51fc, 0x10bf6e8,
0x13ccf7d, 0x1551177, 0x1b6cfd2, 0x1f872c3, 0x2075681, 0x2e23ccc, 0x2e4c0aa, 0x2f607f1,
0x3007eeb, 0x3407e9a, 0x35423f9, 0x39e48bf, 0x45e3bf6, 0x46aa484, 0x47c0fe1, 0x4b1d5a6,
@ -371,15 +369,15 @@ mod test {
fn validate20_vectors() {
assert!(
Cuckoo::from_hash(blake2(&[49]).as_bytes(), 20)
.verify(Proof::new(V1.to_vec().clone()), 75)
.verify(&Proof::new(V1.to_vec().clone()), 75)
);
assert!(
Cuckoo::from_hash(blake2(&[50]).as_bytes(), 20)
.verify(Proof::new(V2.to_vec().clone()), 70)
.verify(&Proof::new(V2.to_vec().clone()), 70)
);
assert!(
Cuckoo::from_hash(blake2(&[51]).as_bytes(), 20)
.verify(Proof::new(V3.to_vec().clone()), 70)
.verify(&Proof::new(V3.to_vec().clone()), 70)
);
}
@ -390,22 +388,22 @@ mod test {
fn validate28_vectors() {
let mut test_header = [0; 32];
test_header[0] = 24;
assert!(Cuckoo::from_hash(&test_header, 28).verify(Proof::new(V4.to_vec().clone()), 50));
assert!(Cuckoo::from_hash(&test_header, 28).verify(&Proof::new(V4.to_vec().clone()), 50));
}
#[test]
fn validate_fail() {
// edge checks
assert!(!Cuckoo::from_hash(blake2(&[49]).as_bytes(), 20).verify(Proof::new(vec![0; 42]), 75));
assert!(!Cuckoo::from_hash(blake2(&[49]).as_bytes(), 20).verify(&Proof::new(vec![0; 42]), 75));
assert!(!Cuckoo::from_hash(blake2(&[49]).as_bytes(), 20)
.verify(Proof::new(vec![0xffff; 42]), 75));
.verify(&Proof::new(vec![0xffff; 42]), 75));
// wrong data for proof
assert!(!Cuckoo::from_hash(blake2(&[50]).as_bytes(), 20)
.verify(Proof::new(V1.to_vec().clone()), 75));
.verify(&Proof::new(V1.to_vec().clone()), 75));
let mut test_header = [0; 32];
test_header[0] = 24;
assert!(!Cuckoo::from_hash(blake2(&test_header).as_bytes(), 20)
.verify(Proof::new(V4.to_vec().clone()), 50));
.verify(&Proof::new(V4.to_vec().clone()), 50));
}
#[test]
@ -414,13 +412,13 @@ mod test {
for n in 1..5 {
let h = [n; 32];
let nonces = Miner::from_hash(&h, 75, 42, 20).mine().unwrap();
assert!(Cuckoo::from_hash(&h, 20).verify(nonces, 75));
assert!(Cuckoo::from_hash(&h, 20).verify(&nonces, 75));
}
// cuckoo18
for n in 1..5 {
let h = [n; 32];
let nonces = Miner::from_hash(&h, 75, 42, 18).mine().unwrap();
assert!(Cuckoo::from_hash(&h, 18).verify(nonces, 75));
assert!(Cuckoo::from_hash(&h, 18).verify(&nonces, 75));
}
}
}

View file

@ -44,12 +44,13 @@ use core::{Block, BlockHeader};
use genesis;
use global;
use pow::cuckoo::{Cuckoo, Error};
use ser;
/// Validates the proof of work of a given header, and that the proof of work
/// satisfies the requirements of the header.
pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u8) -> bool {
Cuckoo::from_hash(bh.pre_pow_hash().as_ref(), cuckoo_sz)
.verify(bh.pow.clone(), consensus::EASINESS as u64)
.verify(&bh.pow, consensus::EASINESS as u64)
}
/// Mines a genesis block using the internal miner
@ -63,7 +64,7 @@ pub fn mine_genesis_block() -> Result<Block, Error> {
// total_difficulty on the genesis header *is* the difficulty of that block
let genesis_difficulty = gen.header.total_difficulty.clone();
let sz = global::sizeshift();
let sz = global::min_sizeshift();
let proof_size = global::proofsize();
pow_size(&mut gen.header, genesis_difficulty, proof_size, sz).unwrap();
@ -125,10 +126,10 @@ mod test {
&mut b.header,
Difficulty::one(),
global::proofsize(),
global::sizeshift(),
global::min_sizeshift(),
).unwrap();
assert!(b.header.nonce != 310);
assert!(b.header.pow.to_difficulty() >= Difficulty::one());
assert!(verify_size(&b.header, global::sizeshift()));
assert!(verify_size(&b.header, global::min_sizeshift()));
}
}

View file

@ -233,8 +233,8 @@ fn empty_block_serialized_size() {
let b = new_block(vec![], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 1_266;
assert_eq!(vec.len(), target_len,);
let target_len = 1_252;
assert_eq!(vec.len(), target_len);
}
#[test]
@ -246,7 +246,7 @@ fn block_single_tx_serialized_size() {
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 2_848;
let target_len = 2_834;
assert_eq!(vec.len(), target_len);
}
@ -258,8 +258,8 @@ fn empty_compact_block_serialized_size() {
let b = new_block(vec![], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
let target_len = 1_274;
assert_eq!(vec.len(), target_len,);
let target_len = 1_260;
assert_eq!(vec.len(), target_len);
}
#[test]
@ -271,8 +271,8 @@ fn compact_block_single_tx_serialized_size() {
let b = new_block(vec![&tx1], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
let target_len = 1_280;
assert_eq!(vec.len(), target_len,);
let target_len = 1_266;
assert_eq!(vec.len(), target_len);
}
#[test]
@ -290,7 +290,7 @@ fn block_10_tx_serialized_size() {
let b = new_block(txs.iter().collect(), &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 17_086;
let target_len = 17_072;
assert_eq!(vec.len(), target_len,);
}
@ -308,7 +308,7 @@ fn compact_block_10_tx_serialized_size() {
let b = new_block(txs.iter().collect(), &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b.as_compact_block()).expect("serialization failed");
let target_len = 1_334;
let target_len = 1_320;
assert_eq!(vec.len(), target_len,);
}

View file

@ -266,7 +266,7 @@ impl Server {
/// Start a minimal "stratum" mining service on a separate thread
pub fn start_stratum_server(&self, config: StratumServerConfig) {
let cuckoo_size = global::sizeshift();
let cuckoo_size = global::min_sizeshift();
let proof_size = global::proofsize();
let currently_syncing = self.currently_syncing.clone();

View file

@ -30,7 +30,7 @@ use common::adapters::PoolToChainAdapter;
use common::stats::{StratumStats, WorkerStats};
use common::types::StratumServerConfig;
use core::core::{Block, BlockHeader};
use core::{consensus, pow};
use core::{pow, global};
use keychain;
use mining::mine_block;
use pool;
@ -77,7 +77,7 @@ struct LoginParams {
struct SubmitParams {
height: u64,
nonce: u64,
pow: Vec<u32>,
pow: Vec<u64>,
}
#[derive(Serialize, Deserialize, Debug)]
@ -496,7 +496,7 @@ impl StratumServer {
} else {
// This is a low-difficulty share, not a full solution
// Do some validation but dont submit
if !pow::verify_size(&b.header, consensus::DEFAULT_SIZESHIFT) {
if !pow::verify_size(&b.header, global::min_sizeshift()) {
// Return error status
error!(
LOGGER,

View file

@ -87,7 +87,7 @@ impl Miner {
LOGGER,
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
self.debug_output_id,
global::sizeshift(),
global::min_sizeshift(),
attempt_time_per_block,
b.header.total_difficulty,
b.header.height,
@ -101,7 +101,7 @@ impl Miner {
&b.header,
consensus::EASINESS,
global::proofsize(),
global::sizeshift(),
global::min_sizeshift(),
).mine()
{
let proof_diff = proof.to_difficulty();

View file

@ -40,8 +40,8 @@ pub mod tui;
use std::env::current_dir;
use std::process::exit;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::sync::atomic::{AtomicBool, Ordering};
use std::thread;
use std::time::Duration;

View file

@ -13,8 +13,8 @@
// limitations under the License.
use core::core::{self, amount_to_hr_string};
use libwallet::types::{OutputData, WalletInfo};
use libwallet::Error;
use libwallet::types::{OutputData, WalletInfo};
use prettytable;
use std::io::prelude::Write;
use term;

View file

@ -19,24 +19,26 @@ use std::path::{Path, MAIN_SEPARATOR};
use serde_json;
use tokio_core::reactor;
use tokio_retry::strategy::FibonacciBackoff;
use tokio_retry::Retry;
use tokio_retry::strategy::FibonacciBackoff;
use failure::ResultExt;
use keychain::{self, Identifier, Keychain};
use util::secp::pedersen;
use util::LOGGER;
use util::secp::pedersen;
use error::{Error, ErrorKind};
use client;
use libtx::slate::Slate;
use libwallet;
use libwallet::types::{
BlockFees, BlockIdentifier, CbData, OutputData, TxWrapper, WalletBackend,
WalletClient, WalletDetails, WalletOutputBatch,
};
use types::{WalletConfig, WalletSeed};
const DETAIL_FILE: &'static str = "wallet.det";

View file

@ -106,13 +106,7 @@ where
let commit = build.keychain.commit(value, &key_id).unwrap();
trace!(LOGGER, "Builder - Pedersen Commit is: {:?}", commit,);
let rproof = proof::create(
build.keychain,
value,
&key_id,
commit,
None,
).unwrap();
let rproof = proof::create(build.keychain, value, &key_id, commit, None).unwrap();
(
tx.with_output(Output {

View file

@ -38,13 +38,7 @@ where
trace!(LOGGER, "Block reward - Pedersen Commit is: {:?}", commit,);
let rproof = proof::create(
keychain,
value,
key_id,
commit,
None,
)?;
let rproof = proof::create(keychain, value, key_id, commit, None)?;
let output = Output {
features: OutputFeatures::COINBASE_OUTPUT,

View file

@ -22,11 +22,10 @@ use std::marker::PhantomData;
use core::ser;
use keychain::Keychain;
use libtx::slate::Slate;
use libwallet::internal::{tx, updater};
use libwallet::types::{
BlockFees, CbData, OutputData, TxWrapper, WalletBackend, WalletClient, WalletInfo,
};
use libwallet::Error;
use libwallet::internal::{tx, updater};
use libwallet::types::{BlockFees, CbData, OutputData, TxWrapper, WalletBackend, WalletClient,
WalletInfo};
use util::{self, LOGGER};
/// Wrapper around internal API functions, containing a reference to

View file

@ -31,9 +31,8 @@ use failure::Fail;
use keychain::Keychain;
use libtx::slate::Slate;
use libwallet::api::{APIForeign, APIOwner};
use libwallet::types::{
BlockFees, CbData, OutputData, SendTXArgs, WalletBackend, WalletClient, WalletInfo,
};
use libwallet::types::{BlockFees, CbData, OutputData, SendTXArgs, WalletBackend, WalletClient,
WalletInfo};
use libwallet::{Error, ErrorKind};
use util::LOGGER;

View file

@ -48,11 +48,8 @@ pub enum ErrorKind {
},
/// Fee Exceeds amount
#[fail(
display = "Fee exceeds amount: sender amount {}, recipient fee {}",
sender_amount,
recipient_fee
)]
#[fail(display = "Fee exceeds amount: sender amount {}, recipient fee {}", sender_amount,
recipient_fee)]
FeeExceedsAmount {
/// sender amount
sender_amount: u64,

View file

@ -16,9 +16,9 @@
use core::global;
use keychain::{Identifier, Keychain};
use libtx::proof;
use libwallet::types::*;
use libwallet::Error;
use util::secp::{key::SecretKey, pedersen};
use libwallet::types::*;
use util::secp::{pedersen, key::SecretKey};
use util::{self, LOGGER};
/// Utility struct for return values from below

View file

@ -15,7 +15,7 @@
//! Selection of inputs for building transactions
use keychain::{Identifier, Keychain};
use libtx::{build, slate::Slate, tx_fee};
use libtx::{build, tx_fee, slate::Slate};
use libwallet::error::{Error, ErrorKind};
use libwallet::internal::{keys, sigcontext};
use libwallet::types::*;

View file

@ -26,9 +26,8 @@ use libtx::reward;
use libwallet;
use libwallet::error::{Error, ErrorKind};
use libwallet::internal::keys;
use libwallet::types::{
BlockFees, CbData, OutputData, OutputStatus, WalletBackend, WalletClient, WalletInfo,
};
use libwallet::types::{BlockFees, CbData, OutputData, OutputStatus, WalletBackend, WalletClient,
WalletInfo};
use util::secp::pedersen;
use util::{self, LOGGER};

View file

@ -13,8 +13,8 @@
// limitations under the License.
use std::cell::RefCell;
use std::collections::hash_map::Values;
use std::collections::HashMap;
use std::collections::hash_map::Values;
use std::ops::Deref;
use std::sync::Arc;
use std::{fs, path};

View file

@ -147,7 +147,7 @@ pub fn add_block_with_reward(chain: &Chain, txs: Vec<&Transaction>, reward: (Out
&mut b.header,
difficulty,
global::proofsize(),
global::sizeshift(),
global::min_sizeshift(),
).unwrap();
chain.process_block(b, chain::Options::MINE).unwrap();
chain.validate(false).unwrap();