Hard fork tryout: better PoW encapsulation in block header (#1478)

* Improve encapsulation with ProofOfWork struct
* Add dual pow scaling factor, fix test
* Fix pre_pow serialization, chain tests
* Adjust header serialized size calc
* Hard fork handling, version-based serialization
This commit is contained in:
Ignotus Peverell 2018-09-10 15:36:57 -07:00 committed by GitHub
parent 48857b7e16
commit ecf20602d5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 262 additions and 128 deletions

View file

@ -521,10 +521,10 @@ impl BlockHeaderPrintable {
output_root: util::to_hex(h.output_root.to_vec()), output_root: util::to_hex(h.output_root.to_vec()),
range_proof_root: util::to_hex(h.range_proof_root.to_vec()), range_proof_root: util::to_hex(h.range_proof_root.to_vec()),
kernel_root: util::to_hex(h.kernel_root.to_vec()), kernel_root: util::to_hex(h.kernel_root.to_vec()),
nonce: h.nonce, nonce: h.pow.nonce,
cuckoo_size: h.pow.cuckoo_sizeshift, cuckoo_size: h.pow.cuckoo_sizeshift(),
cuckoo_solution: h.pow.nonces.clone(), cuckoo_solution: h.pow.proof.nonces.clone(),
total_difficulty: h.total_difficulty.to_num(), total_difficulty: h.pow.total_difficulty.to_num(),
total_kernel_offset: h.total_kernel_offset.to_hex(), total_kernel_offset: h.total_kernel_offset.to_hex(),
} }
} }

View file

@ -392,14 +392,14 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
} }
if !ctx.opts.contains(Options::SKIP_POW) { if !ctx.opts.contains(Options::SKIP_POW) {
if global::min_sizeshift() > header.pow.cuckoo_sizeshift { if global::min_sizeshift() > header.pow.cuckoo_sizeshift() {
return Err(ErrorKind::LowSizeshift.into()); return Err(ErrorKind::LowSizeshift.into());
} }
if !(ctx.pow_verifier)(header, header.pow.cuckoo_sizeshift) { if !(ctx.pow_verifier)(header, header.pow.cuckoo_sizeshift()) {
error!( error!(
LOGGER, LOGGER,
"pipe: validate_header failed for cuckoo shift size {}", "pipe: validate_header failed for cuckoo shift size {}",
header.pow.cuckoo_sizeshift, header.pow.cuckoo_sizeshift()
); );
return Err(ErrorKind::InvalidPow.into()); return Err(ErrorKind::InvalidPow.into());
} }
@ -436,11 +436,11 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
// check the pow hash shows a difficulty at least as large // check the pow hash shows a difficulty at least as large
// as the target difficulty // as the target difficulty
if !ctx.opts.contains(Options::SKIP_POW) { if !ctx.opts.contains(Options::SKIP_POW) {
if header.total_difficulty.clone() <= prev.total_difficulty.clone() { if header.total_difficulty() <= prev.total_difficulty() {
return Err(ErrorKind::DifficultyTooLow.into()); return Err(ErrorKind::DifficultyTooLow.into());
} }
let target_difficulty = header.total_difficulty.clone() - prev.total_difficulty.clone(); let target_difficulty = header.total_difficulty() - prev.total_difficulty();
if header.pow.to_difficulty() < target_difficulty { if header.pow.to_difficulty() < target_difficulty {
return Err(ErrorKind::DifficultyTooLow.into()); return Err(ErrorKind::DifficultyTooLow.into());

View file

@ -446,8 +446,8 @@ impl Iterator for DifficultyIter {
let prev_difficulty = self let prev_difficulty = self
.prev_header .prev_header
.clone() .clone()
.map_or(Difficulty::zero(), |x| x.total_difficulty); .map_or(Difficulty::zero(), |x| x.total_difficulty());
let difficulty = header.total_difficulty - prev_difficulty; let difficulty = header.total_difficulty() - prev_difficulty;
Some(Ok((header.timestamp.timestamp() as u64, difficulty))) Some(Ok((header.timestamp.timestamp() as u64, difficulty)))
} else { } else {

View file

@ -78,7 +78,7 @@ impl Tip {
height: bh.height, height: bh.height,
last_block_h: bh.hash(), last_block_h: bh.hash(),
prev_block_h: bh.previous, prev_block_h: bh.previous,
total_difficulty: bh.total_difficulty.clone(), total_difficulty: bh.total_difficulty(),
} }
} }
} }

View file

@ -169,6 +169,6 @@ fn _prepare_block_nosum(
Ok(b) => b, Ok(b) => b,
}; };
b.header.timestamp = prev.timestamp + Duration::seconds(60); b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.total_difficulty = Difficulty::from_num(diff); b.header.pow.total_difficulty = Difficulty::from_num(diff);
b b
} }

View file

@ -77,9 +77,9 @@ fn mine_empty_chain() {
} else { } else {
global::min_sizeshift() global::min_sizeshift()
}; };
b.header.pow.cuckoo_sizeshift = sizeshift; b.header.pow.proof.cuckoo_sizeshift = sizeshift;
pow::pow_size(&mut b.header, difficulty, global::proofsize(), sizeshift).unwrap(); pow::pow_size(&mut b.header, difficulty, global::proofsize(), sizeshift).unwrap();
b.header.pow.cuckoo_sizeshift = sizeshift; b.header.pow.proof.cuckoo_sizeshift = sizeshift;
let bhash = b.hash(); let bhash = b.hash();
chain.process_block(b, chain::Options::MINE).unwrap(); chain.process_block(b, chain::Options::MINE).unwrap();
@ -390,9 +390,9 @@ fn output_header_mappings() {
} else { } else {
global::min_sizeshift() global::min_sizeshift()
}; };
b.header.pow.cuckoo_sizeshift = sizeshift; b.header.pow.proof.cuckoo_sizeshift = sizeshift;
pow::pow_size(&mut b.header, difficulty, global::proofsize(), sizeshift).unwrap(); pow::pow_size(&mut b.header, difficulty, global::proofsize(), sizeshift).unwrap();
b.header.pow.cuckoo_sizeshift = sizeshift; b.header.pow.proof.cuckoo_sizeshift = sizeshift;
chain.process_block(b, chain::Options::MINE).unwrap(); chain.process_block(b, chain::Options::MINE).unwrap();
@ -479,8 +479,8 @@ where
Ok(b) => b, Ok(b) => b,
}; };
b.header.timestamp = prev.timestamp + Duration::seconds(60); b.header.timestamp = prev.timestamp + Duration::seconds(60);
b.header.total_difficulty = prev.total_difficulty.clone() + Difficulty::from_num(diff); b.header.pow.total_difficulty = prev.total_difficulty() + Difficulty::from_num(diff);
b.header.pow = core::core::Proof::random(proof_size); b.header.pow.proof = core::core::Proof::random(proof_size);
b b
} }

View file

@ -105,16 +105,16 @@ pub const HARD_FORK_INTERVAL: u64 = 250_000;
/// 6 months interval scheduled hard forks for the first 2 years. /// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: u16) -> bool { pub fn valid_header_version(height: u64, version: u16) -> bool {
// uncomment below as we go from hard fork to hard fork // uncomment below as we go from hard fork to hard fork
if height <= HARD_FORK_INTERVAL && version == 1 { if height < HEADER_V2_HARD_FORK {
true version == 1
/* } else if height <= 2 * HARD_FORK_INTERVAL && version == 2 { } else if height < HARD_FORK_INTERVAL {
true */ version == 2
/* } else if height <= 3 * HARD_FORK_INTERVAL && version == 3 { } else if height < 2 * HARD_FORK_INTERVAL {
true */ version == 3
/* } else if height <= 4 * HARD_FORK_INTERVAL && version == 4 { /* } else if height < 3 * HARD_FORK_INTERVAL {
true */ version == 4 */
/* } else if height > 4 * HARD_FORK_INTERVAL && version > 4 { /* } else if height >= 4 * HARD_FORK_INTERVAL {
true */ version > 4 */
} else { } else {
false false
} }
@ -249,3 +249,5 @@ pub trait VerifySortOrder<T> {
/// Verify a collection of items is sorted as required. /// Verify a collection of items is sorted as required.
fn verify_sort_order(&self) -> Result<(), Error>; fn verify_sort_order(&self) -> Result<(), Error>;
} }
pub const HEADER_V2_HARD_FORK: u64 = 95_000;

View file

@ -109,6 +109,79 @@ impl fmt::Display for Error {
} }
} }
/// Block header information pertaining to the proof of work
#[derive(Clone, Debug, PartialEq)]
pub struct ProofOfWork {
/// Total accumulated difficulty since genesis block
pub total_difficulty: Difficulty,
/// Difficulty scaling factor between the different proofs of work
pub scaling_difficulty: u64,
/// Nonce increment used to mine this block.
pub nonce: u64,
/// Proof of work data.
pub proof: Proof,
}
impl Default for ProofOfWork {
fn default() -> ProofOfWork {
let proof_size = global::proofsize();
ProofOfWork {
total_difficulty: Difficulty::one(),
scaling_difficulty: 1,
nonce: 0,
proof: Proof::zero(proof_size),
}
}
}
impl ProofOfWork {
/// Read implementation, can't define as trait impl as we need a version
fn read(ver: u16, reader: &mut Reader) -> Result<ProofOfWork, ser::Error> {
let (total_difficulty, scaling_difficulty) = if ver == 1 {
// read earlier in the header on older versions
(Difficulty::one(), 1)
} else {
(Difficulty::read(reader)?, reader.read_u64()?)
};
let nonce = reader.read_u64()?;
let proof = Proof::read(reader)?;
Ok(ProofOfWork { total_difficulty, scaling_difficulty, nonce, proof})
}
/// Write implementation, can't define as trait impl as we need a version
fn write<W: Writer>(&self, ver: u16, writer: &mut W) -> Result<(), ser::Error> {
if writer.serialization_mode() != ser::SerializationMode::Hash {
self.write_pre_pow(ver, writer)?;
}
self.proof.write(writer)?;
Ok(())
}
/// Write the pre-hash portion of the header
pub fn write_pre_pow<W: Writer>(&self, ver: u16, writer: &mut W) -> Result<(), ser::Error> {
if ver > 1 {
ser_multiwrite!(
writer,
[write_u64, self.total_difficulty.to_num()],
[write_u64, self.scaling_difficulty]
);
}
writer.write_u64(self.nonce)?;
Ok(())
}
/// Maximum difficulty this proof of work can achieve
pub fn to_difficulty(&self) -> Difficulty {
self.proof.to_difficulty()
}
/// The shift used for the cuckoo cycle size on this proof
pub fn cuckoo_sizeshift(&self) -> u8 {
self.proof.cuckoo_sizeshift
}
}
/// Block header, fairly standard compared to other blockchains. /// Block header, fairly standard compared to other blockchains.
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct BlockHeader { pub struct BlockHeader {
@ -120,8 +193,6 @@ pub struct BlockHeader {
pub previous: Hash, pub previous: Hash,
/// Timestamp at which the block was built. /// Timestamp at which the block was built.
pub timestamp: DateTime<Utc>, pub timestamp: DateTime<Utc>,
/// Total accumulated difficulty since genesis block
pub total_difficulty: Difficulty,
/// Merklish root of all the commitments in the TxHashSet /// Merklish root of all the commitments in the TxHashSet
pub output_root: Hash, pub output_root: Hash,
/// Merklish root of all range proofs in the TxHashSet /// Merklish root of all range proofs in the TxHashSet
@ -139,20 +210,17 @@ pub struct BlockHeader {
pub output_mmr_size: u64, pub output_mmr_size: u64,
/// Total size of the kernel MMR after applying this block /// Total size of the kernel MMR after applying this block
pub kernel_mmr_size: u64, pub kernel_mmr_size: u64,
/// Nonce increment used to mine this block. /// Proof of work and related
pub nonce: u64, pub pow: ProofOfWork,
/// Proof of work data.
pub pow: Proof,
} }
/// Serialized size of fixed part of a BlockHeader, i.e. without pow /// Serialized size of fixed part of a BlockHeader, i.e. without pow
fn fixed_size_of_serialized_header() -> usize { fn fixed_size_of_serialized_header(version: u16) -> usize {
let mut size: usize = 0; let mut size: usize = 0;
size += mem::size_of::<u16>(); // version size += mem::size_of::<u16>(); // version
size += mem::size_of::<u64>(); // height size += mem::size_of::<u64>(); // height
size += mem::size_of::<Hash>(); // previous size += mem::size_of::<Hash>(); // previous
size += mem::size_of::<u64>(); // timestamp size += mem::size_of::<u64>(); // timestamp
size += mem::size_of::<Difficulty>(); // total_difficulty
size += mem::size_of::<Hash>(); // output_root size += mem::size_of::<Hash>(); // output_root
size += mem::size_of::<Hash>(); // range_proof_root size += mem::size_of::<Hash>(); // range_proof_root
size += mem::size_of::<Hash>(); // kernel_root size += mem::size_of::<Hash>(); // kernel_root
@ -160,13 +228,17 @@ fn fixed_size_of_serialized_header() -> usize {
size += mem::size_of::<Commitment>(); // total_kernel_sum size += mem::size_of::<Commitment>(); // total_kernel_sum
size += mem::size_of::<u64>(); // output_mmr_size size += mem::size_of::<u64>(); // output_mmr_size
size += mem::size_of::<u64>(); // kernel_mmr_size size += mem::size_of::<u64>(); // kernel_mmr_size
size += mem::size_of::<Difficulty>(); // total_difficulty
if version >= 2 {
size += mem::size_of::<u64>(); // scaling_difficulty
}
size += mem::size_of::<u64>(); // nonce size += mem::size_of::<u64>(); // nonce
size size
} }
/// Serialized size of a BlockHeader /// Serialized size of a BlockHeader
pub fn serialized_size_of_header(cuckoo_sizeshift: u8) -> usize { pub fn serialized_size_of_header(version: u16, cuckoo_sizeshift: u8) -> usize {
let mut size = fixed_size_of_serialized_header(); let mut size = fixed_size_of_serialized_header(version);
size += mem::size_of::<u8>(); // pow.cuckoo_sizeshift size += mem::size_of::<u8>(); // pow.cuckoo_sizeshift
let nonce_bits = cuckoo_sizeshift as usize - 1; let nonce_bits = cuckoo_sizeshift as usize - 1;
@ -180,13 +252,11 @@ pub fn serialized_size_of_header(cuckoo_sizeshift: u8) -> usize {
impl Default for BlockHeader { impl Default for BlockHeader {
fn default() -> BlockHeader { fn default() -> BlockHeader {
let proof_size = global::proofsize();
BlockHeader { BlockHeader {
version: 1, version: 1,
height: 0, height: 0,
previous: ZERO_HASH, previous: ZERO_HASH,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc), timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc),
total_difficulty: Difficulty::one(),
output_root: ZERO_HASH, output_root: ZERO_HASH,
range_proof_root: ZERO_HASH, range_proof_root: ZERO_HASH,
kernel_root: ZERO_HASH, kernel_root: ZERO_HASH,
@ -194,8 +264,7 @@ impl Default for BlockHeader {
total_kernel_sum: Commitment::from_vec(vec![0; 33]), total_kernel_sum: Commitment::from_vec(vec![0; 33]),
output_mmr_size: 0, output_mmr_size: 0,
kernel_mmr_size: 0, kernel_mmr_size: 0,
nonce: 0, pow: ProofOfWork::default(),
pow: Proof::zero(proof_size),
} }
} }
} }
@ -206,8 +275,7 @@ impl Writeable for BlockHeader {
if writer.serialization_mode() != ser::SerializationMode::Hash { if writer.serialization_mode() != ser::SerializationMode::Hash {
self.write_pre_pow(writer)?; self.write_pre_pow(writer)?;
} }
self.pow.write(self.version, writer)?;
self.pow.write(writer)?;
Ok(()) Ok(())
} }
} }
@ -218,15 +286,20 @@ impl Readable for BlockHeader {
let (version, height) = ser_multiread!(reader, read_u16, read_u64); let (version, height) = ser_multiread!(reader, read_u16, read_u64);
let previous = Hash::read(reader)?; let previous = Hash::read(reader)?;
let timestamp = reader.read_i64()?; let timestamp = reader.read_i64()?;
let total_difficulty = Difficulty::read(reader)?; let mut total_difficulty = None;
if version == 1 {
total_difficulty = Some(Difficulty::read(reader)?);
}
let output_root = Hash::read(reader)?; let output_root = Hash::read(reader)?;
let range_proof_root = Hash::read(reader)?; let range_proof_root = Hash::read(reader)?;
let kernel_root = Hash::read(reader)?; let kernel_root = Hash::read(reader)?;
let total_kernel_offset = BlindingFactor::read(reader)?; let total_kernel_offset = BlindingFactor::read(reader)?;
let total_kernel_sum = Commitment::read(reader)?; let total_kernel_sum = Commitment::read(reader)?;
let (output_mmr_size, kernel_mmr_size, nonce) = let (output_mmr_size, kernel_mmr_size) = ser_multiread!(reader, read_u64, read_u64);
ser_multiread!(reader, read_u64, read_u64, read_u64); let mut pow = ProofOfWork::read(version, reader)?;
let pow = Proof::read(reader)?; if version == 1 {
pow.total_difficulty = total_difficulty.unwrap();
}
if timestamp > MAX_DATE.and_hms(0, 0, 0).timestamp() if timestamp > MAX_DATE.and_hms(0, 0, 0).timestamp()
|| timestamp < MIN_DATE.and_hms(0, 0, 0).timestamp() || timestamp < MIN_DATE.and_hms(0, 0, 0).timestamp()
@ -239,7 +312,6 @@ impl Readable for BlockHeader {
height, height,
previous, previous,
timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc), timestamp: DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(timestamp, 0), Utc),
total_difficulty,
output_root, output_root,
range_proof_root, range_proof_root,
kernel_root, kernel_root,
@ -247,7 +319,6 @@ impl Readable for BlockHeader {
total_kernel_sum, total_kernel_sum,
output_mmr_size, output_mmr_size,
kernel_mmr_size, kernel_mmr_size,
nonce,
pow, pow,
}) })
} }
@ -261,30 +332,41 @@ impl BlockHeader {
[write_u16, self.version], [write_u16, self.version],
[write_u64, self.height], [write_u64, self.height],
[write_fixed_bytes, &self.previous], [write_fixed_bytes, &self.previous],
[write_i64, self.timestamp.timestamp()], [write_i64, self.timestamp.timestamp()]
[write_u64, self.total_difficulty.to_num()], );
if self.version == 1 {
// written as part of the ProofOfWork in later versions
writer.write_u64(self.pow.total_difficulty.to_num())?;
}
ser_multiwrite!(
writer,
[write_fixed_bytes, &self.output_root], [write_fixed_bytes, &self.output_root],
[write_fixed_bytes, &self.range_proof_root], [write_fixed_bytes, &self.range_proof_root],
[write_fixed_bytes, &self.kernel_root], [write_fixed_bytes, &self.kernel_root],
[write_fixed_bytes, &self.total_kernel_offset], [write_fixed_bytes, &self.total_kernel_offset],
[write_fixed_bytes, &self.total_kernel_sum], [write_fixed_bytes, &self.total_kernel_sum],
[write_u64, self.output_mmr_size], [write_u64, self.output_mmr_size],
[write_u64, self.kernel_mmr_size], [write_u64, self.kernel_mmr_size]
[write_u64, self.nonce]
); );
Ok(()) Ok(())
} }
///
/// Returns the pre-pow hash, as the post-pow hash /// Returns the pre-pow hash, as the post-pow hash
/// should just be the hash of the POW /// should just be the hash of the POW
pub fn pre_pow_hash(&self) -> Hash { pub fn pre_pow_hash(&self) -> Hash {
let mut hasher = HashWriter::default(); let mut hasher = HashWriter::default();
self.write_pre_pow(&mut hasher).unwrap(); self.write_pre_pow(&mut hasher).unwrap();
self.pow.write_pre_pow(self.version, &mut hasher).unwrap();
let mut ret = [0; 32]; let mut ret = [0; 32];
hasher.finalize(&mut ret); hasher.finalize(&mut ret);
Hash(ret) Hash(ret)
} }
/// Total difficulty accumulated by the proof of work on this header
pub fn total_difficulty(&self) -> Difficulty {
self.pow.total_difficulty.clone()
}
/// The "overage" to use when verifying the kernel sums. /// The "overage" to use when verifying the kernel sums.
/// For a block header the overage is 0 - reward. /// For a block header the overage is 0 - reward.
pub fn overage(&self) -> i64 { pub fn overage(&self) -> i64 {
@ -304,10 +386,10 @@ impl BlockHeader {
/// Serialized size of this header /// Serialized size of this header
pub fn serialized_size(&self) -> usize { pub fn serialized_size(&self) -> usize {
let mut size = fixed_size_of_serialized_header(); let mut size = fixed_size_of_serialized_header(self.version);
size += mem::size_of::<u8>(); // pow.cuckoo_sizeshift size += mem::size_of::<u8>(); // pow.cuckoo_sizeshift
let nonce_bits = self.pow.cuckoo_sizeshift as usize - 1; let nonce_bits = self.pow.cuckoo_sizeshift() as usize - 1;
let bitvec_len = global::proofsize() * nonce_bits; let bitvec_len = global::proofsize() * nonce_bits;
size += bitvec_len / 8; // pow.nonces size += bitvec_len / 8; // pow.nonces
if bitvec_len % 8 != 0 { if bitvec_len % 8 != 0 {
@ -418,7 +500,7 @@ impl Block {
// Now set the pow on the header so block hashing works as expected. // Now set the pow on the header so block hashing works as expected.
{ {
let proof_size = global::proofsize(); let proof_size = global::proofsize();
block.header.pow = Proof::random(proof_size); block.header.pow.proof = Proof::random(proof_size);
} }
Ok(block) Ok(block)
@ -515,17 +597,27 @@ impl Block {
let now = Utc::now().timestamp(); let now = Utc::now().timestamp();
let timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now, 0), Utc); let timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now, 0), Utc);
let version = if prev.height + 1 < consensus::HEADER_V2_HARD_FORK {
1
} else {
2
};
// Now build the block with all the above information. // Now build the block with all the above information.
// Note: We have not validated the block here. // Note: We have not validated the block here.
// Caller must validate the block as necessary. // Caller must validate the block as necessary.
Block { Block {
header: BlockHeader { header: BlockHeader {
version,
height: prev.height + 1, height: prev.height + 1,
timestamp, timestamp,
previous: prev.hash(), previous: prev.hash(),
total_difficulty: difficulty + prev.total_difficulty,
total_kernel_offset, total_kernel_offset,
total_kernel_sum, total_kernel_sum,
pow: ProofOfWork {
total_difficulty: difficulty + prev.pow.total_difficulty,
..Default::default()
},
..Default::default() ..Default::default()
}, },
body: agg_tx.into(), body: agg_tx.into(),

View file

@ -29,7 +29,10 @@ pub fn genesis_dev() -> core::Block {
height: 0, height: 0,
previous: core::hash::Hash([0xff; 32]), previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(1997, 8, 4).and_hms(0, 0, 0), timestamp: Utc.ymd(1997, 8, 4).and_hms(0, 0, 0),
nonce: global::get_genesis_nonce(), pow: core::ProofOfWork {
nonce: global::get_genesis_nonce(),
..Default::default()
},
..Default::default() ..Default::default()
}) })
} }
@ -39,15 +42,19 @@ pub fn genesis_dev() -> core::Block {
pub fn genesis_testnet1() -> core::Block { pub fn genesis_testnet1() -> core::Block {
core::Block::with_header(core::BlockHeader { core::Block::with_header(core::BlockHeader {
height: 0, height: 0,
previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2017, 11, 16).and_hms(20, 0, 0), timestamp: Utc.ymd(2017, 11, 16).and_hms(20, 0, 0),
nonce: 28205, pow: core::ProofOfWork {
pow: core::Proof::new(vec![ total_difficulty: Difficulty::one(),
0x21e, 0x7a2, 0xeae, 0x144e, 0x1b1c, 0x1fbd, 0x203a, 0x214b, 0x293b, 0x2b74, 0x2bfa, scaling_difficulty: 1,
0x2c26, 0x32bb, 0x346a, 0x34c7, 0x37c5, 0x4164, 0x42cc, 0x4cc3, 0x55af, 0x5a70, 0x5b14, nonce: 28205,
0x5e1c, 0x5f76, 0x6061, 0x60f9, 0x61d7, 0x6318, 0x63a1, 0x63fb, 0x649b, 0x64e5, 0x65a1, proof: core::Proof::new(vec![
0x6b69, 0x70f8, 0x71c7, 0x71cd, 0x7492, 0x7b11, 0x7db8, 0x7f29, 0x7ff8, 0x21e, 0x7a2, 0xeae, 0x144e, 0x1b1c, 0x1fbd, 0x203a, 0x214b, 0x293b, 0x2b74,
]), 0x2bfa, 0x2c26, 0x32bb, 0x346a, 0x34c7, 0x37c5, 0x4164, 0x42cc, 0x4cc3, 0x55af,
0x5a70, 0x5b14, 0x5e1c, 0x5f76, 0x6061, 0x60f9, 0x61d7, 0x6318, 0x63a1, 0x63fb,
0x649b, 0x64e5, 0x65a1, 0x6b69, 0x70f8, 0x71c7, 0x71cd, 0x7492, 0x7b11, 0x7db8,
0x7f29, 0x7ff8,
]),
},
..Default::default() ..Default::default()
}) })
} }
@ -58,16 +65,19 @@ pub fn genesis_testnet2() -> core::Block {
height: 0, height: 0,
previous: core::hash::Hash([0xff; 32]), previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 3, 26).and_hms(16, 0, 0), timestamp: Utc.ymd(2018, 3, 26).and_hms(16, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()), pow: core::ProofOfWork {
nonce: 1060, total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
pow: core::Proof::new(vec![ scaling_difficulty: 1,
0x1940730, 0x333b9d0, 0x4739d6f, 0x4c6cfb1, 0x6e3d6c3, 0x74408a3, 0x7ba2bd2, 0x83e2024, nonce: 1060,
0x8ca22b5, 0x9d39ab8, 0xb6646dd, 0xc6698b6, 0xc6f78fe, 0xc99b662, 0xcf2ae8c, 0xcf41eed, proof: core::Proof::new(vec![
0xdd073e6, 0xded6af8, 0xf08d1a5, 0x1156a144, 0x11d1160a, 0x131bb0a5, 0x137ad703, 0x1940730, 0x333b9d0, 0x4739d6f, 0x4c6cfb1, 0x6e3d6c3, 0x74408a3, 0x7ba2bd2,
0x13b0831f, 0x1421683f, 0x147e3c1f, 0x1496fda0, 0x150ba22b, 0x15cc5bc6, 0x16edf697, 0x83e2024, 0x8ca22b5, 0x9d39ab8, 0xb6646dd, 0xc6698b6, 0xc6f78fe, 0xc99b662,
0x17ced40c, 0x17d84f9e, 0x18a515c1, 0x19320d9c, 0x19da4f6d, 0x1b50bcb1, 0x1b8bc72f, 0xcf2ae8c, 0xcf41eed, 0xdd073e6, 0xded6af8, 0xf08d1a5, 0x1156a144, 0x11d1160a,
0x1c7b6964, 0x1d07b3a9, 0x1d189d4d, 0x1d1f9a15, 0x1dafcd41, 0x131bb0a5, 0x137ad703, 0x13b0831f, 0x1421683f, 0x147e3c1f, 0x1496fda0, 0x150ba22b,
]), 0x15cc5bc6, 0x16edf697, 0x17ced40c, 0x17d84f9e, 0x18a515c1, 0x19320d9c, 0x19da4f6d,
0x1b50bcb1, 0x1b8bc72f, 0x1c7b6964, 0x1d07b3a9, 0x1d189d4d, 0x1d1f9a15, 0x1dafcd41,
]),
},
..Default::default() ..Default::default()
}) })
} }
@ -78,16 +88,19 @@ pub fn genesis_testnet3() -> core::Block {
height: 0, height: 0,
previous: core::hash::Hash([0xff; 32]), previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 7, 8).and_hms(18, 0, 0), timestamp: Utc.ymd(2018, 7, 8).and_hms(18, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()), pow: core::ProofOfWork {
nonce: 4956988373127691, total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
pow: core::Proof::new(vec![ scaling_difficulty: 1,
0xa420dc, 0xc8ffee, 0x10e433e, 0x1de9428, 0x2ed4cea, 0x52d907b, 0x5af0e3f, 0x6b8fcae, nonce: 4956988373127691,
0x8319b53, 0x845ca8c, 0x8d2a13e, 0x8d6e4cc, 0x9349e8d, 0xa7a33c5, 0xaeac3cb, 0xb193e23, proof: core::Proof::new(vec![
0xb502e19, 0xb5d9804, 0xc9ac184, 0xd4f4de3, 0xd7a23b8, 0xf1d8660, 0xf443756, 0xa420dc, 0xc8ffee, 0x10e433e, 0x1de9428, 0x2ed4cea, 0x52d907b, 0x5af0e3f,
0x10b833d2, 0x11418fc5, 0x11b8aeaf, 0x131836ec, 0x132ab818, 0x13a46a55, 0x13df89fe, 0x6b8fcae, 0x8319b53, 0x845ca8c, 0x8d2a13e, 0x8d6e4cc, 0x9349e8d, 0xa7a33c5,
0x145d65b5, 0x166f9c3a, 0x166fe0ef, 0x178cb36f, 0x185baf68, 0x1bbfe563, 0x1bd637b4, 0xaeac3cb, 0xb193e23, 0xb502e19, 0xb5d9804, 0xc9ac184, 0xd4f4de3, 0xd7a23b8,
0x1cfc8382, 0x1d1ed012, 0x1e391ca5, 0x1e999b4c, 0x1f7c6d21, 0xf1d8660, 0xf443756, 0x10b833d2, 0x11418fc5, 0x11b8aeaf, 0x131836ec, 0x132ab818,
]), 0x13a46a55, 0x13df89fe, 0x145d65b5, 0x166f9c3a, 0x166fe0ef, 0x178cb36f, 0x185baf68,
0x1bbfe563, 0x1bd637b4, 0x1cfc8382, 0x1d1ed012, 0x1e391ca5, 0x1e999b4c, 0x1f7c6d21,
]),
},
..Default::default() ..Default::default()
}) })
} }
@ -99,9 +112,12 @@ pub fn genesis_main() -> core::Block {
height: 0, height: 0,
previous: core::hash::Hash([0xff; 32]), previous: core::hash::Hash([0xff; 32]),
timestamp: Utc.ymd(2018, 8, 14).and_hms(0, 0, 0), timestamp: Utc.ymd(2018, 8, 14).and_hms(0, 0, 0),
total_difficulty: Difficulty::from_num(global::initial_block_difficulty()), pow: core::ProofOfWork {
nonce: global::get_genesis_nonce(), total_difficulty: Difficulty::from_num(global::initial_block_difficulty()),
pow: core::Proof::zero(consensus::PROOFSIZE), scaling_difficulty: 1,
nonce: global::get_genesis_nonce(),
proof: core::Proof::zero(consensus::PROOFSIZE),
},
..Default::default() ..Default::default()
}) })
} }

View file

@ -50,7 +50,7 @@ use pow::cuckoo::{Cuckoo, Error};
/// satisfies the requirements of the header. /// satisfies the requirements of the header.
pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u8) -> bool { pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u8) -> bool {
Cuckoo::from_hash(bh.pre_pow_hash().as_ref(), cuckoo_sz) Cuckoo::from_hash(bh.pre_pow_hash().as_ref(), cuckoo_sz)
.verify(&bh.pow, consensus::EASINESS as u64) .verify(&bh.pow.proof, consensus::EASINESS as u64)
} }
/// Mines a genesis block using the internal miner /// Mines a genesis block using the internal miner
@ -62,7 +62,7 @@ pub fn mine_genesis_block() -> Result<Block, Error> {
} }
// total_difficulty on the genesis header *is* the difficulty of that block // total_difficulty on the genesis header *is* the difficulty of that block
let genesis_difficulty = gen.header.total_difficulty.clone(); let genesis_difficulty = gen.header.pow.total_difficulty.clone();
let sz = global::min_sizeshift(); let sz = global::min_sizeshift();
let proof_size = global::proofsize(); let proof_size = global::proofsize();
@ -80,11 +80,11 @@ pub fn pow_size(
proof_size: usize, proof_size: usize,
sz: u8, sz: u8,
) -> Result<(), Error> { ) -> Result<(), Error> {
let start_nonce = bh.nonce; let start_nonce = bh.pow.nonce;
// set the nonce for faster solution finding in user testing // set the nonce for faster solution finding in user testing
if bh.height == 0 && global::is_user_testing_mode() { if bh.height == 0 && global::is_user_testing_mode() {
bh.nonce = global::get_genesis_nonce(); bh.pow.nonce = global::get_genesis_nonce();
} }
// try to find a cuckoo cycle on that header hash // try to find a cuckoo cycle on that header hash
@ -93,18 +93,18 @@ pub fn pow_size(
// diff, we're all good // diff, we're all good
if let Ok(proof) = cuckoo::Miner::new(bh, consensus::EASINESS, proof_size, sz).mine() { if let Ok(proof) = cuckoo::Miner::new(bh, consensus::EASINESS, proof_size, sz).mine() {
if proof.to_difficulty() >= diff { if proof.to_difficulty() >= diff {
bh.pow = proof.clone(); bh.pow.proof = proof.clone();
return Ok(()); return Ok(());
} }
} }
// otherwise increment the nonce // otherwise increment the nonce
let (res, _) = bh.nonce.overflowing_add(1); let (res, _) = bh.pow.nonce.overflowing_add(1);
bh.nonce = res; bh.pow.nonce = res;
// and if we're back where we started, update the time (changes the hash as // and if we're back where we started, update the time (changes the hash as
// well) // well)
if bh.nonce == start_nonce { if bh.pow.nonce == start_nonce {
bh.timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc); bh.timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(0, 0), Utc);
} }
} }
@ -122,15 +122,15 @@ mod test {
#[test] #[test]
fn genesis_pow() { fn genesis_pow() {
let mut b = genesis::genesis_dev(); let mut b = genesis::genesis_dev();
b.header.nonce = 485; b.header.pow.nonce = 485;
pow_size( pow_size(
&mut b.header, &mut b.header,
Difficulty::one(), Difficulty::one(),
global::proofsize(), global::proofsize(),
global::min_sizeshift(), global::min_sizeshift(),
).unwrap(); ).unwrap();
assert!(b.header.nonce != 310); assert!(b.header.pow.nonce != 310);
assert!(b.header.pow.to_difficulty() >= Difficulty::one()); assert!(b.header.pow.proof.to_difficulty() >= Difficulty::one());
assert!(verify_size(&b.header, global::min_sizeshift())); assert!(verify_size(&b.header, global::min_sizeshift()));
} }
} }

View file

@ -25,7 +25,7 @@ pub mod common;
use chrono::Duration; use chrono::Duration;
use common::{new_block, tx1i2o, tx2i1o, txspend1i1o}; use common::{new_block, tx1i2o, tx2i1o, txspend1i1o};
use grin_core::consensus::{BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT}; use grin_core::consensus::{self, BLOCK_OUTPUT_WEIGHT, MAX_BLOCK_WEIGHT};
use grin_core::core::block::Error; use grin_core::core::block::Error;
use grin_core::core::hash::Hashed; use grin_core::core::hash::Hashed;
use grin_core::core::id::ShortIdentifiable; use grin_core::core::id::ShortIdentifiable;
@ -436,3 +436,26 @@ fn serialize_deserialize_compact_block() {
assert_eq!(cb1.header, cb2.header); assert_eq!(cb1.header, cb2.header);
assert_eq!(cb1.kern_ids(), cb2.kern_ids()); assert_eq!(cb1.kern_ids(), cb2.kern_ids());
} }
#[test]
fn empty_block_v2_switch() {
let keychain = ExtKeychain::from_random_seed().unwrap();
let mut prev = BlockHeader::default();
prev.height = consensus::HEADER_V2_HARD_FORK - 1;
let key_id = keychain.derive_key_id(1).unwrap();
let b = new_block(vec![], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 1_260;
assert_eq!(b.header.version, 2);
assert_eq!(vec.len(), target_len);
// another try right before v2
prev.height = consensus::HEADER_V2_HARD_FORK - 2;
let b = new_block(vec![], &keychain, &prev, &key_id);
let mut vec = Vec::new();
ser::serialize(&mut vec, &b).expect("serialization failed");
let target_len = 1_252;
assert_eq!(b.header.version, 1);
assert_eq!(vec.len(), target_len);
}

View file

@ -498,12 +498,14 @@ fn next_target_adjustment() {
} }
#[test] #[test]
fn hard_fork_1() { fn hard_forks() {
assert!(valid_header_version(0, 1)); assert!(valid_header_version(0, 1));
assert!(valid_header_version(10, 1)); assert!(valid_header_version(10, 1));
assert!(!valid_header_version(10, 2)); assert!(!valid_header_version(10, 2));
assert!(valid_header_version(250_000, 1)); assert!(valid_header_version(100_000, 2));
assert!(!valid_header_version(250_001, 1)); assert!(valid_header_version(249_999, 2));
assert!(valid_header_version(250_000, 3));
assert!(!valid_header_version(250_000, 1));
assert!(!valid_header_version(500_000, 1)); assert!(!valid_header_version(500_000, 1));
assert!(!valid_header_version(250_001, 2)); assert!(!valid_header_version(250_001, 2));
} }

View file

@ -313,7 +313,7 @@ impl Peers {
debug!( debug!(
LOGGER, LOGGER,
"broadcast_block: {} @ {} [{}] was sent to {} peers.", "broadcast_block: {} @ {} [{}] was sent to {} peers.",
b.header.total_difficulty, b.header.pow.total_difficulty,
b.header.height, b.header.height,
b.hash(), b.hash(),
count, count,
@ -331,7 +331,7 @@ impl Peers {
LOGGER, LOGGER,
"broadcast_compact_block: {}, {} at {}, to {} peers, done.", "broadcast_compact_block: {}, {} at {}, to {} peers, done.",
b.hash(), b.hash(),
b.header.total_difficulty, b.header.pow.total_difficulty,
b.header.height, b.header.height,
count, count,
); );
@ -348,7 +348,7 @@ impl Peers {
LOGGER, LOGGER,
"broadcast_header: {}, {} at {}, to {} peers, done.", "broadcast_header: {}, {} at {}, to {} peers, done.",
bh.hash(), bh.hash(),
bh.total_difficulty, bh.pow.total_difficulty,
bh.height, bh.height,
count, count,
); );

View file

@ -329,9 +329,10 @@ fn headers_header_size(conn: &mut TcpStream, msg_len: u64) -> Result<u64, Error>
} }
let average_header_size = (msg_len - 2) / total_headers; let average_header_size = (msg_len - 2) / total_headers;
// support size of Cuckoo: from Cuckoo 30 to Cuckoo 36 // support size of Cuckoo: from Cuckoo 30 to Cuckoo 36, with version 2
let minimum_size = core::serialized_size_of_header(global::min_sizeshift()); // having slightly larger headers
let maximum_size = core::serialized_size_of_header(global::min_sizeshift() + 6); let minimum_size = core::serialized_size_of_header(1, global::min_sizeshift());
let maximum_size = core::serialized_size_of_header(2, global::min_sizeshift() + 6);
if average_header_size < minimum_size as u64 || average_header_size > maximum_size as u64 { if average_header_size < minimum_size as u64 || average_header_size > maximum_size as u64 {
debug!( debug!(
LOGGER, LOGGER,

View file

@ -179,17 +179,17 @@ fn build_block(
)?; )?;
let mut rng = rand::OsRng::new().unwrap(); let mut rng = rand::OsRng::new().unwrap();
b.header.nonce = rng.gen(); b.header.pow.nonce = rng.gen();
b.header.timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now_sec, 0), Utc);; b.header.timestamp = DateTime::<Utc>::from_utc(NaiveDateTime::from_timestamp(now_sec, 0), Utc);;
let b_difficulty = (b.header.total_difficulty.clone() - head.total_difficulty.clone()).to_num(); let b_difficulty = (b.header.total_difficulty() - head.total_difficulty()).to_num();
debug!( debug!(
LOGGER, LOGGER,
"Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}", "Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
b.inputs().len(), b.inputs().len(),
b.outputs().len(), b.outputs().len(),
b_difficulty, b_difficulty,
b.header.clone().total_difficulty.to_num(), b.header.total_difficulty().to_num(),
); );
// Now set txhashset roots and sizes on the header of the block being built. // Now set txhashset roots and sizes on the header of the block being built.

View file

@ -476,8 +476,8 @@ impl StratumServer {
} }
let mut b: Block = b.unwrap().clone(); let mut b: Block = b.unwrap().clone();
// Reconstruct the block header with this nonce and pow added // Reconstruct the block header with this nonce and pow added
b.header.nonce = params.nonce; b.header.pow.nonce = params.nonce;
b.header.pow.nonces = params.pow; b.header.pow.proof.nonces = params.pow;
// Get share difficulty // Get share difficulty
share_difficulty = b.header.pow.to_difficulty().to_num(); share_difficulty = b.header.pow.to_difficulty().to_num();
// If the difficulty is too low its an error // If the difficulty is too low its an error
@ -532,7 +532,7 @@ impl StratumServer {
"(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}", "(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}",
self.id, self.id,
params.height, params.height,
b.header.nonce, b.header.pow.nonce,
params.job_id, params.job_id,
); );
worker_stats.num_rejected += 1; worker_stats.num_rejected += 1;
@ -554,7 +554,7 @@ impl StratumServer {
self.id, self.id,
b.hash(), b.hash(),
b.header.height, b.header.height,
b.header.nonce, b.header.pow.nonce,
share_difficulty, share_difficulty,
self.current_difficulty, self.current_difficulty,
submitted_by, submitted_by,
@ -733,9 +733,8 @@ impl StratumServer {
self.current_key_id.clone(), self.current_key_id.clone(),
wallet_listener_url, wallet_listener_url,
); );
self.current_difficulty = (new_block.header.total_difficulty.clone() self.current_difficulty =
- head.total_difficulty.clone()) (new_block.header.total_difficulty() - head.total_difficulty).to_num();
.to_num();
self.current_key_id = block_fees.key_id(); self.current_key_id = block_fees.key_id();
current_hash = latest_hash; current_hash = latest_hash;
// set the minimum acceptable share difficulty for this block // set the minimum acceptable share difficulty for this block

View file

@ -89,7 +89,7 @@ impl Miner {
self.debug_output_id, self.debug_output_id,
global::min_sizeshift(), global::min_sizeshift(),
attempt_time_per_block, attempt_time_per_block,
b.header.total_difficulty, b.header.total_difficulty(),
b.header.height, b.header.height,
latest_hash latest_hash
); );
@ -105,14 +105,13 @@ impl Miner {
).mine() ).mine()
{ {
let proof_diff = proof.to_difficulty(); let proof_diff = proof.to_difficulty();
if proof_diff >= (b.header.total_difficulty.clone() - head.total_difficulty.clone()) if proof_diff >= (b.header.total_difficulty() - head.total_difficulty()) {
{
sol = Some(proof); sol = Some(proof);
break; break;
} }
} }
b.header.nonce += 1; b.header.pow.nonce += 1;
*latest_hash = self.chain.head().unwrap().last_block_h; *latest_hash = self.chain.head().unwrap().last_block_h;
iter_count += 1; iter_count += 1;
} }
@ -165,7 +164,7 @@ impl Miner {
// we found a solution, push our block through the chain processing pipeline // we found a solution, push our block through the chain processing pipeline
if let Some(proof) = sol { if let Some(proof) = sol {
b.header.pow = proof; b.header.pow.proof = proof;
info!( info!(
LOGGER, LOGGER,
"(Server ID: {}) Found valid proof of work, adding block {}.", "(Server ID: {}) Found valid proof of work, adding block {}.",