Avoid repeated truncation in difficulty adjust + minor refactoring (#915)

* avoid repeated truncation in difficulty adjust + minor refactoring
* reinstate needed low-diffculty exception on dampening
* leave consensus breaking change for later one line change; obsolete lower testnet1 max target
This commit is contained in:
John Tromp 2018-03-30 20:21:06 +02:00 committed by Ignotus Peverell
parent 3d2796bdd6
commit c3939107a5
4 changed files with 33 additions and 52 deletions

View file

@ -191,22 +191,17 @@ where
// adjustment if there isn't enough window data
// length will be DIFFICULTY_ADJUST_WINDOW+MEDIAN_TIME_WINDOW
let diff_data = global::difficulty_data_to_vector(cursor);
// Get the difficulty sum for averaging later
// Which in this case is the sum of the last
// DIFFICULTY_ADJUST_WINDOW elements
let diff_sum = diff_data
.iter()
.skip(MEDIAN_TIME_WINDOW as usize)
.take(DIFFICULTY_ADJUST_WINDOW as usize)
.fold(Difficulty::zero(), |sum, d| sum + d.clone().unwrap().1);
// Obtain the median window for the earlier time period
// which is just the first MEDIAN_TIME_WINDOW elements
// the first MEDIAN_TIME_WINDOW elements
let mut window_earliest: Vec<u64> = diff_data
.iter()
.take(MEDIAN_TIME_WINDOW as usize)
.map(|n| n.clone().unwrap().0)
.collect();
// pick median
window_earliest.sort();
let earliest_ts = window_earliest[MEDIAN_TIME_INDEX as usize];
// Obtain the median window for the latest time period
// i.e. the last MEDIAN_TIME_WINDOW elements
@ -215,23 +210,24 @@ where
.skip(DIFFICULTY_ADJUST_WINDOW as usize)
.map(|n| n.clone().unwrap().0)
.collect();
// And obtain our median values
window_earliest.sort();
// pick median
window_latest.sort();
let latest_ts = window_latest[MEDIAN_TIME_INDEX as usize];
let earliest_ts = window_earliest[MEDIAN_TIME_INDEX as usize];
// Calculate the average difficulty
let diff_avg = diff_sum.into_num() / Difficulty::from_num(DIFFICULTY_ADJUST_WINDOW).into_num();
// Actual undampened time delta
// median time delta
let ts_delta = latest_ts - earliest_ts;
// Apply dampening
let ts_damp = match diff_avg {
n if n >= DAMP_FACTOR => ((DAMP_FACTOR - 1) * BLOCK_TIME_WINDOW + ts_delta) / DAMP_FACTOR,
_ => ts_delta,
// Get the difficulty sum of the last DIFFICULTY_ADJUST_WINDOW elements
let diff_sum = diff_data
.iter()
.skip(MEDIAN_TIME_WINDOW as usize)
.fold(0, |sum, d| sum + d.clone().unwrap().1.into_num());
// Apply dampening except when difficulty is near 1
let ts_damp = if diff_sum < DAMP_FACTOR * DIFFICULTY_ADJUST_WINDOW {
ts_delta
} else {
(1 * ts_delta + (DAMP_FACTOR-1) * BLOCK_TIME_WINDOW) / DAMP_FACTOR
};
// Apply time bounds
@ -243,10 +239,12 @@ where
ts_damp
};
let difficulty = diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW).into_num()
/ Difficulty::from_num(adj_ts).into_num();
// AVOID BREAKING CONSENSUS FOR NOW WITH OLD DOUBLE TRUNCATION CALC
let difficulty = (diff_sum / DIFFICULTY_ADJUST_WINDOW) * BLOCK_TIME_WINDOW / adj_ts;
// EVENTUALLY BREAK CONSENSUS WITH THIS IMPROVED SINGLE TRUNCATION DIFF CALC
// let difficulty = diff_sum * BLOCK_TIME_SEC / adj_ts;
Ok(max(Difficulty::from_num(difficulty), Difficulty::one()))
Ok(Difficulty::from_num(max(difficulty, 1)))
}
/// Consensus rule that collections of items are sorted lexicographically.

View file

@ -21,6 +21,7 @@ use std::cmp::min;
use std::{fmt, ops};
use std::convert::AsRef;
use std::ops::Add;
use byteorder::{BigEndian, ByteOrder};
use blake2::blake2b::Blake2b;
@ -77,6 +78,11 @@ impl Hash {
let bytes = util::from_hex(hex.to_string()).unwrap();
Ok(Hash::from_vec(bytes))
}
/// Most significant 64 bits
pub fn to_u64(&self) -> u64 {
BigEndian::read_u64(&self.0)
}
}
impl ops::Index<usize> for Hash {

View file

@ -23,11 +23,10 @@ use std::fmt;
use std::ops::{Add, Div, Mul, Sub};
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use byteorder::{BigEndian, ByteOrder};
use std::cmp::max;
use core::hash::Hash;
use ser::{self, Readable, Reader, Writeable, Writer};
use core::global;
/// The difficulty is defined as the maximum target divided by the block hash.
#[derive(Debug, Clone, PartialEq, PartialOrd, Eq, Ord)]
@ -56,13 +55,10 @@ impl Difficulty {
/// Computes the difficulty from a hash. Divides the maximum target by the
/// provided hash.
pub fn from_hash(h: &Hash) -> Difficulty {
let max_target = BigEndian::read_u64(&global::max_proof_target());
// Use the first 64 bits of the given hash
let mut in_vec = h.to_vec();
in_vec.truncate(8);
let num = BigEndian::read_u64(&in_vec);
let max_target = <u64>::max_value();
let num = h.to_u64();
Difficulty {
num: max_target / num,
num: max_target / max(num, 1),
}
}

View file

@ -60,13 +60,6 @@ pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
/// Testnet 2 initial block difficulty, high to see how it goes
pub const TESTNET2_INITIAL_DIFFICULTY: u64 = 1000;
/// The target is the 32-bytes hash block hashes must be lower than.
pub const MAX_PROOF_TARGET: [u8; 8] = [0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
/// We want to slow this right down for user testing at cuckoo 16, so pick a
/// smaller max
pub const MAX_PROOF_TARGET_TESTING: [u8; 8] = [0x05, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
/// Types of chain a server can run with, dictates the genesis block and
/// and mining parameters used.
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
@ -141,18 +134,6 @@ pub fn coinbase_maturity() -> u64 {
}
}
/// Max Proof Target
pub fn max_proof_target() -> [u8; 8] {
let param_ref = CHAIN_TYPE.read().unwrap();
match *param_ref {
ChainTypes::AutomatedTesting => MAX_PROOF_TARGET_TESTING,
ChainTypes::UserTesting => MAX_PROOF_TARGET_TESTING,
ChainTypes::Testnet1 => MAX_PROOF_TARGET_TESTING,
ChainTypes::Testnet2 => MAX_PROOF_TARGET,
ChainTypes::Mainnet => MAX_PROOF_TARGET,
}
}
/// Initial mining difficulty
pub fn initial_block_difficulty() -> u64 {
let param_ref = CHAIN_TYPE.read().unwrap();