Move nonce/pow to end of block header, change post-pow hash (#794)

* move nonce/pow to end of block header, update call to cuckoo-miner
* change header hash to be pow only
* fix genesis block miner and tests
This commit is contained in:
Yeastplume 2018-03-16 19:04:31 +00:00 committed by Ignotus Peverell
parent 1bad7188b7
commit b53b84b0c6
6 changed files with 103 additions and 94 deletions

View file

@ -421,6 +421,7 @@ fn prepare_block_nosum(
diff: u64,
txs: Vec<&Transaction>,
) -> Block {
let proof_size = global::proofsize();
let key_id = kc.derive_key_id(diff as u32).unwrap();
let mut b = match core::core::Block::new(prev, txs, kc, &key_id, Difficulty::from_num(diff)) {
@ -429,5 +430,6 @@ fn prepare_block_nosum(
};
b.header.timestamp = prev.timestamp + time::Duration::seconds(60);
b.header.total_difficulty = prev.total_difficulty.clone() + Difficulty::from_num(diff);
b.header.pow = core::core::Proof::random(proof_size);
b
}

View file

@ -22,7 +22,7 @@ use core::{Committed, Input, KernelFeatures, Output, OutputFeatures, Proof, Proo
ShortId, SwitchCommitHash, Transaction, TxKernel};
use consensus;
use consensus::{exceeds_weight, reward, VerifySortOrder, REWARD};
use core::hash::{Hash, Hashed, ZERO_HASH};
use core::hash::{Hash, HashWriter, Hashed, ZERO_HASH};
use core::id::ShortIdentifiable;
use core::target::Difficulty;
use core::transaction;
@ -105,22 +105,22 @@ pub struct BlockHeader {
pub previous: Hash,
/// Timestamp at which the block was built.
pub timestamp: time::Tm,
/// Total accumulated difficulty since genesis block
pub total_difficulty: Difficulty,
/// Merklish root of all the commitments in the TxHashSet
pub output_root: Hash,
/// Merklish root of all range proofs in the TxHashSet
pub range_proof_root: Hash,
/// Merklish root of all transaction kernels in the TxHashSet
pub kernel_root: Hash,
/// Nonce increment used to mine this block.
pub nonce: u64,
/// Proof of work data.
pub pow: Proof,
/// Total accumulated difficulty since genesis block
pub total_difficulty: Difficulty,
/// Total accumulated sum of kernel offsets since genesis block.
/// We can derive the kernel offset sum for *this* block from
/// the total kernel offset of the previous block header.
pub total_kernel_offset: BlindingFactor,
/// Nonce increment used to mine this block.
pub nonce: u64,
/// Proof of work data.
pub pow: Proof,
}
impl Default for BlockHeader {
@ -135,9 +135,9 @@ impl Default for BlockHeader {
output_root: ZERO_HASH,
range_proof_root: ZERO_HASH,
kernel_root: ZERO_HASH,
total_kernel_offset: BlindingFactor::zero(),
nonce: 0,
pow: Proof::zero(proof_size),
total_kernel_offset: BlindingFactor::zero(),
}
}
}
@ -145,23 +145,11 @@ impl Default for BlockHeader {
/// Serialization of a block header
impl Writeable for BlockHeader {
fn write<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(
writer,
[write_u16, self.version],
[write_u64, self.height],
[write_fixed_bytes, &self.previous],
[write_i64, self.timestamp.to_timespec().sec],
[write_fixed_bytes, &self.output_root],
[write_fixed_bytes, &self.range_proof_root],
[write_fixed_bytes, &self.kernel_root]
);
try!(writer.write_u64(self.nonce));
try!(self.total_difficulty.write(writer));
try!(self.total_kernel_offset.write(writer));
if writer.serialization_mode() != ser::SerializationMode::Hash {
if writer.serialization_mode() == ser::SerializationMode::Hash {
try!(self.pow.write(writer));
} else {
self.write_pre_pow(writer)?;
self.pow.write(writer)?;
}
Ok(())
}
@ -173,12 +161,12 @@ impl Readable for BlockHeader {
let (version, height) = ser_multiread!(reader, read_u16, read_u64);
let previous = Hash::read(reader)?;
let timestamp = reader.read_i64()?;
let total_difficulty = Difficulty::read(reader)?;
let output_root = Hash::read(reader)?;
let rproof_root = Hash::read(reader)?;
let kernel_root = Hash::read(reader)?;
let nonce = reader.read_u64()?;
let total_difficulty = Difficulty::read(reader)?;
let total_kernel_offset = BlindingFactor::read(reader)?;
let nonce = reader.read_u64()?;
let pow = Proof::read(reader)?;
if timestamp > (1 << 60) {
@ -193,17 +181,47 @@ impl Readable for BlockHeader {
sec: timestamp,
nsec: 0,
}),
total_difficulty: total_difficulty,
output_root: output_root,
range_proof_root: rproof_root,
kernel_root: kernel_root,
pow: pow,
nonce: nonce,
total_difficulty: total_difficulty,
total_kernel_offset: total_kernel_offset,
nonce: nonce,
pow: pow,
})
}
}
impl BlockHeader {
/// Write the pre-hash portion of the header
pub fn write_pre_pow<W: Writer>(&self, writer: &mut W) -> Result<(), ser::Error> {
ser_multiwrite!(
writer,
[write_u16, self.version],
[write_u64, self.height],
[write_fixed_bytes, &self.previous],
[write_i64, self.timestamp.to_timespec().sec],
[write_u64, self.total_difficulty.into_num()],
[write_fixed_bytes, &self.output_root],
[write_fixed_bytes, &self.range_proof_root],
[write_fixed_bytes, &self.kernel_root],
[write_fixed_bytes, &self.total_kernel_offset],
[write_u64, self.nonce]
);
Ok(())
}
///
/// Returns the pre-pow hash, as the post-pow hash
/// should just be the hash of the POW
pub fn pre_pow_hash(&self) -> Hash {
let mut hasher = HashWriter::default();
self.write_pre_pow(&mut hasher).unwrap();
let mut ret = [0; 32];
hasher.finalize(&mut ret);
Hash(ret)
}
}
/// Compact representation of a full block.
/// Each input/output/kernel is represented as a short_id.
/// A node is reasonably likely to have already seen all tx data (tx broadcast before block)
@ -566,7 +584,7 @@ impl Block {
}.cut_through())
}
/// Blockhash, computed using only the header
/// Blockhash, computed using only the POW
pub fn hash(&self) -> Hash {
self.header.hash()
}
@ -1124,7 +1142,7 @@ mod test {
let cb2 = b.as_compact_block();
// random nonce will not affect the hash of the compact block itself
// hash is based on header only
// hash is based on header POW only
assert!(cb1.nonce != cb2.nonce);
assert_eq!(b.hash(), cb1.hash());
assert_eq!(cb1.hash(), cb2.hash());

View file

@ -24,7 +24,8 @@ pub mod transaction;
// pub mod txoset;
#[allow(dead_code)]
use std::fmt;
use rand::{thread_rng, Rng};
use std::{fmt, iter};
use std::cmp::Ordering;
use std::num::ParseFloatError;
use consensus::GRIN_BASE;
@ -152,6 +153,21 @@ impl Proof {
}
}
/// Builds a proof with random POW data,
/// needed so that tests that ignore POW
/// don't fail due to duplicate hashes
pub fn random(proof_size: usize) -> Proof {
let mut rng = thread_rng();
let v: Vec<u32> = iter::repeat(())
.map(|()| rng.gen())
.take(proof_size)
.collect();
Proof {
proof_size: proof_size,
nonces: v,
}
}
/// Converts the proof to a vector of u64s
pub fn to_u64s(&self) -> Vec<u64> {
let mut out_nonces = Vec::with_capacity(self.proof_size);

View file

@ -51,64 +51,41 @@ use itertools::Itertools;
// Max number of transactions this miner will assemble in a block
const MAX_TX: u32 = 5000;
const PRE_NONCE_SIZE: usize = 146;
/// Serializer that outputs pre and post nonce portions of a block header
/// which can then be sent off to miner to mutate at will
pub struct HeaderPartWriter {
//
pub pre_nonce: Vec<u8>,
// Post nonce is currently variable length
// because of difficulty
pub post_nonce: Vec<u8>,
// which difficulty field we're on
bytes_written: usize,
writing_pre: bool,
/// Serializer that outputs the pre-pow part of the header,
/// including the nonce (last 8 bytes) that can be sent off
/// to the miner to mutate at will
pub struct HeaderPrePowWriter {
pub pre_pow: Vec<u8>,
}
impl Default for HeaderPartWriter {
fn default() -> HeaderPartWriter {
HeaderPartWriter {
bytes_written: 0,
writing_pre: true,
pre_nonce: Vec::new(),
post_nonce: Vec::new(),
impl Default for HeaderPrePowWriter {
fn default() -> HeaderPrePowWriter {
HeaderPrePowWriter {
pre_pow: Vec::new(),
}
}
}
impl HeaderPartWriter {
pub fn parts_as_hex_strings(&self) -> (String, String) {
(
String::from(format!("{:02x}", self.pre_nonce.iter().format(""))),
String::from(format!("{:02x}", self.post_nonce.iter().format(""))),
)
impl HeaderPrePowWriter {
pub fn as_hex_string(&self, include_nonce: bool) -> String {
let mut result = String::from(format!("{:02x}", self.pre_pow.iter().format("")));
if !include_nonce {
let l = result.len() - 16;
result.truncate(l);
}
result
}
}
impl ser::Writer for HeaderPartWriter {
impl ser::Writer for HeaderPrePowWriter {
fn serialization_mode(&self) -> ser::SerializationMode {
ser::SerializationMode::Hash
ser::SerializationMode::Full
}
fn write_fixed_bytes<T: AsFixedBytes>(&mut self, bytes_in: &T) -> Result<(), ser::Error> {
if self.writing_pre {
for i in 0..bytes_in.len() {
self.pre_nonce.push(bytes_in.as_ref()[i])
self.pre_pow.push(bytes_in.as_ref()[i])
}
} else if self.bytes_written != 0 {
for i in 0..bytes_in.len() {
self.post_nonce.push(bytes_in.as_ref()[i])
}
}
self.bytes_written += bytes_in.len();
if self.bytes_written == PRE_NONCE_SIZE && self.writing_pre {
self.writing_pre = false;
self.bytes_written = 0;
}
Ok(())
}
}
@ -181,20 +158,13 @@ impl Miner {
let mut next_stat_output = time::get_time().sec + stat_output_interval;
// Get parts of the header
let mut header_parts = HeaderPartWriter::default();
ser::Writeable::write(&b.header, &mut header_parts).unwrap();
let (pre, post) = header_parts.parts_as_hex_strings();
//Just test output to mine a genesis block when needed
/*let mut header_parts = HeaderPartWriter::default();
let gen = genesis::genesis();
ser::Writeable::write(&gen.header, &mut header_parts).unwrap();
let (pre, post) = header_parts.parts_as_hex_strings();
println!("pre, post: {}, {}", pre, post);*/
let mut pre_pow_writer = HeaderPrePowWriter::default();
b.header.write_pre_pow(&mut pre_pow_writer).unwrap();
let pre_pow = pre_pow_writer.as_hex_string(false);
// Start the miner working
let miner = plugin_miner.get_consumable();
let job_handle = miner.notify(1, &pre, &post, 0).unwrap();
let job_handle = miner.notify(1, &pre_pow, "", 0).unwrap();
let mut sol = None;
@ -320,7 +290,7 @@ impl Miner {
let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash();
let pow_hash = b.header.pre_pow_hash();
if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty();
trace!(
@ -437,7 +407,7 @@ impl Miner {
let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash();
let pow_hash = b.header.pre_pow_hash();
if let Ok(proof) = miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty();
if proof_diff > (b.header.total_difficulty.clone() - head.total_difficulty.clone())
@ -588,13 +558,13 @@ impl Miner {
// we found a solution, push our block through the chain processing pipeline
if let Some(proof) = sol {
b.header.pow = proof;
info!(
LOGGER,
"(Server ID: {}) Found valid proof of work, adding block {}.",
self.debug_output_id,
b.hash()
);
b.header.pow = proof;
let res = self.chain.process_block(b, chain::Options::MINE);
if let Err(e) = res {
error!(

View file

@ -636,7 +636,7 @@ mod tests {
use std::sync::{Arc, RwLock};
use blake2;
use core::global::ChainTypes;
use core::core::SwitchCommitHash;
use core::core::{Proof, SwitchCommitHash};
use core::core::hash::{Hash, Hashed};
use core::core::pmmr::MerkleProof;
use core::core::target::Difficulty;
@ -833,6 +833,7 @@ mod tests {
fn test_immature_coinbase() {
global::set_mining_mode(ChainTypes::AutomatedTesting);
let mut dummy_chain = DummyChainImpl::new();
let proof_size = global::proofsize();
let lock_height = 1 + global::coinbase_maturity();
assert_eq!(lock_height, 4);
@ -848,12 +849,14 @@ mod tests {
let coinbase_header = block::BlockHeader {
height: 1,
pow: Proof::random(proof_size),
..block::BlockHeader::default()
};
chain_ref.store_head_header(&coinbase_header);
let head_header = block::BlockHeader {
height: 2,
pow: Proof::random(proof_size),
..block::BlockHeader::default()
};
chain_ref.store_head_header(&head_header);

View file

@ -53,7 +53,6 @@ pub mod types;
use core::consensus;
use core::core::BlockHeader;
use core::core::hash::Hashed;
use core::core::Proof;
use core::core::target::Difficulty;
use core::global;
@ -77,7 +76,8 @@ pub trait MiningWorker {
/// Validates the proof of work of a given header, and that the proof of work
/// satisfies the requirements of the header.
pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
Cuckoo::new(&bh.hash()[..], cuckoo_sz).verify(bh.pow.clone(), consensus::EASINESS as u64)
Cuckoo::new(&bh.pre_pow_hash()[..], cuckoo_sz)
.verify(bh.pow.clone(), consensus::EASINESS as u64)
}
/// Mines a genesis block, using the config specified miner if specified.
@ -127,7 +127,7 @@ pub fn pow_size<T: MiningWorker + ?Sized>(
loop {
// can be trivially optimized by avoiding re-serialization every time but this
// is not meant as a fast miner implementation
let pow_hash = bh.hash();
let pow_hash = bh.pre_pow_hash();
// if we found a cycle (not guaranteed) and the proof hash is higher that the
// diff, we're all good