Secondary PoW scaling factor dampening, cleanup ()

* Remove useless time median window
* Secondary PoW factor dampening
* Fix off-by-one in time window, cleanup dampening, fix tests
This commit is contained in:
Ignotus Peverell 2018-10-16 16:55:40 -07:00 committed by GitHub
parent 67bc891455
commit fffe5154d2
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
4 changed files with 37 additions and 93 deletions
core
servers/src/grin

View file

@ -135,12 +135,6 @@ pub fn valid_header_version(height: u64, version: u16) -> bool {
}
}
/// Time window in blocks to calculate block time median
pub const MEDIAN_TIME_WINDOW: u64 = 11;
/// Index at half the desired median
pub const MEDIAN_TIME_INDEX: u64 = MEDIAN_TIME_WINDOW / 2;
/// Number of blocks used to calculate difficulty adjustments
pub const DIFFICULTY_ADJUST_WINDOW: u64 = HOUR_HEIGHT;
@ -256,39 +250,27 @@ where
{
// Create vector of difficulty data running from earliest
// to latest, and pad with simulated pre-genesis data to allow earlier
// adjustment if there isn't enough window data
// length will be DIFFICULTY_ADJUST_WINDOW+MEDIAN_TIME_WINDOW
// adjustment if there isn't enough window data length will be
// DIFFICULTY_ADJUST_WINDOW + 1 (for initial block time bound)
let diff_data = global::difficulty_data_to_vector(cursor);
// First, get the ratio of secondary PoW vs primary
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data);
// Obtain the median window for the earlier time period
// the first MEDIAN_TIME_WINDOW elements
let earliest_ts = time_window_median(&diff_data, 0, MEDIAN_TIME_WINDOW as usize);
let earliest_ts = diff_data[0].timestamp;
let latest_ts = diff_data[diff_data.len()-1].timestamp;
// Obtain the median window for the latest time period
// i.e. the last MEDIAN_TIME_WINDOW elements
let latest_ts = time_window_median(
&diff_data,
DIFFICULTY_ADJUST_WINDOW as usize,
MEDIAN_TIME_WINDOW as usize,
);
// median time delta
// time delta within the window
let ts_delta = latest_ts - earliest_ts;
// Get the difficulty sum of the last DIFFICULTY_ADJUST_WINDOW elements
let diff_sum = diff_data
.iter()
.skip(MEDIAN_TIME_WINDOW as usize)
.fold(0, |sum, d| sum + d.difficulty.to_num());
let diff_sum: u64 = diff_data.iter().skip(1).map(|dd| dd.difficulty.to_num()).sum();
// Apply dampening except when difficulty is near 1
let ts_damp = if diff_sum < DAMP_FACTOR * DIFFICULTY_ADJUST_WINDOW {
let ts_damp = if diff_sum < DAMP_FACTOR * DIFFICULTY_ADJUST_WINDOW {
ts_delta
} else {
(1 * ts_delta + (DAMP_FACTOR - 1) * BLOCK_TIME_WINDOW) / DAMP_FACTOR
(ts_delta + (DAMP_FACTOR - 1) * BLOCK_TIME_WINDOW) / DAMP_FACTOR
};
// Apply time bounds
@ -308,10 +290,7 @@ where
/// Factor by which the secondary proof of work difficulty will be adjusted
pub fn secondary_pow_scaling(height: u64, diff_data: &Vec<HeaderInfo>) -> u32 {
// median of past scaling factors, scaling is 1 if none found
let mut scalings = diff_data
.iter()
.map(|n| n.secondary_scaling)
.collect::<Vec<_>>();
let mut scalings = diff_data.iter().map(|n| n.secondary_scaling).collect::<Vec<_>>();
if scalings.len() == 0 {
return 1;
}
@ -319,11 +298,14 @@ pub fn secondary_pow_scaling(height: u64, diff_data: &Vec<HeaderInfo>) -> u32 {
let scaling_median = scalings[scalings.len() / 2] as u64;
let secondary_count = max(diff_data.iter().filter(|n| n.is_secondary).count(), 1) as u64;
// what's the ideal ratio at the current height
// calculate and dampen ideal secondary count so it can be compared with the
// actual, both are multiplied by a factor of 100 to increase resolution
let ratio = secondary_pow_ratio(height);
let ideal_secondary_count = diff_data.len() as u64 * ratio;
let dampened_secondary_count = (secondary_count * 100 + (DAMP_FACTOR - 1) * ideal_secondary_count) / DAMP_FACTOR;
// adjust the past median based on ideal ratio vs actual ratio
let scaling = scaling_median * diff_data.len() as u64 * ratio / 100 / secondary_count as u64;
let scaling = scaling_median * ideal_secondary_count / dampened_secondary_count as u64;
// various bounds
let bounded_scaling = if scaling < scaling_median / 2 || scaling == 0 {
@ -336,20 +318,6 @@ pub fn secondary_pow_scaling(height: u64, diff_data: &Vec<HeaderInfo>) -> u32 {
bounded_scaling as u32
}
/// Median timestamp within the time window starting at `from` with the
/// provided `length`.
fn time_window_median(diff_data: &Vec<HeaderInfo>, from: usize, length: usize) -> u64 {
let mut window_latest: Vec<u64> = diff_data
.iter()
.skip(from)
.take(length)
.map(|n| n.timestamp)
.collect();
// pick median
window_latest.sort();
window_latest[MEDIAN_TIME_INDEX as usize]
}
/// Consensus rule that collections of items are sorted lexicographically.
pub trait VerifySortOrder<T> {
/// Verify a collection of items is sorted as required.

View file

@ -19,8 +19,8 @@
use consensus::HeaderInfo;
use consensus::{
BASE_EDGE_BITS, BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON,
DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, MEDIAN_TIME_WINDOW, PROOFSIZE,
SECOND_POW_EDGE_BITS, DAY_HEIGHT
DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, PROOFSIZE, DAY_HEIGHT,
SECOND_POW_EDGE_BITS,
};
use pow::{self, CuckatooContext, EdgeType, PoWContext};
/// An enum collecting sets of parameters used throughout the
@ -256,7 +256,7 @@ where
T: IntoIterator<Item = HeaderInfo>,
{
// Convert iterator to vector, so we can append to it if necessary
let needed_block_count = (MEDIAN_TIME_WINDOW + DIFFICULTY_ADJUST_WINDOW) as usize;
let needed_block_count = DIFFICULTY_ADJUST_WINDOW as usize + 1;
let mut last_n: Vec<HeaderInfo> = cursor.into_iter().take(needed_block_count).collect();
// Sort blocks from earliest to latest (to keep conceptually easier)

View file

@ -122,35 +122,13 @@ fn get_diff_stats(chain_sim: &Vec<HeaderInfo>) -> DiffStats {
let tip_height = chain_sim.len();
let earliest_block_height = tip_height as i64 - last_blocks.len() as i64;
// Obtain the median window for the earlier time period
// the first MEDIAN_TIME_WINDOW elements
let mut window_earliest: Vec<u64> = last_blocks
.clone()
.iter()
.take(MEDIAN_TIME_WINDOW as usize)
.map(|n| n.clone().timestamp)
.collect();
// pick median
window_earliest.sort();
let earliest_ts = window_earliest[MEDIAN_TIME_INDEX as usize];
// Obtain the median window for the latest time period
// i.e. the last MEDIAN_TIME_WINDOW elements
let mut window_latest: Vec<u64> = last_blocks
.clone()
.iter()
.skip(DIFFICULTY_ADJUST_WINDOW as usize)
.map(|n| n.clone().timestamp)
.collect();
// pick median
window_latest.sort();
let latest_ts = window_latest[MEDIAN_TIME_INDEX as usize];
let earliest_ts = last_blocks[0].timestamp;
let latest_ts = last_blocks[last_blocks.len()-1].timestamp;
let mut i = 1;
let sum_blocks: Vec<HeaderInfo> = global::difficulty_data_to_vector(diff_iter.iter().cloned())
.into_iter()
.skip(MEDIAN_TIME_WINDOW as usize)
.take(DIFFICULTY_ADJUST_WINDOW as usize)
.collect();
@ -263,7 +241,6 @@ fn print_chain_sim(chain_sim: Vec<(HeaderInfo, DiffStats)>) {
println!("Constants");
println!("DIFFICULTY_ADJUST_WINDOW: {}", DIFFICULTY_ADJUST_WINDOW);
println!("BLOCK_TIME_WINDOW: {}", BLOCK_TIME_WINDOW);
println!("MEDIAN_TIME_WINDOW: {}", MEDIAN_TIME_WINDOW);
println!("UPPER_TIME_BOUND: {}", UPPER_TIME_BOUND);
println!("DAMP_FACTOR: {}", DAMP_FACTOR);
chain_sim.iter().enumerate().for_each(|(i, b)| {
@ -338,7 +315,7 @@ fn adjustment_scenarios() {
println!("*********************************************************");
print_chain_sim(chain_sim);
println!("*********************************************************");
let just_enough = (DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW) as usize;
let just_enough = (DIFFICULTY_ADJUST_WINDOW) as usize;
// Steady difficulty for a good while, then a sudden drop
let chain_sim = create_chain_sim(global::initial_block_difficulty());
@ -408,17 +385,17 @@ fn next_target_adjustment() {
let diff_one = Difficulty::one();
assert_eq!(
next_difficulty(1, vec![HeaderInfo::from_ts_diff(cur_time, diff_one)]),
HeaderInfo::from_diff_scaling(Difficulty::one(), 2),
HeaderInfo::from_diff_scaling(Difficulty::one(), 1),
);
assert_eq!(
next_difficulty(1, vec![HeaderInfo::new(cur_time, diff_one, 10, true)]),
HeaderInfo::from_diff_scaling(Difficulty::one(), 2),
HeaderInfo::from_diff_scaling(Difficulty::one(), 1),
);
let mut hi = HeaderInfo::from_diff_scaling(diff_one, 1);
assert_eq!(
next_difficulty(1, repeat(60, hi.clone(), DIFFICULTY_ADJUST_WINDOW, None)),
HeaderInfo::from_diff_scaling(Difficulty::one(), 2),
HeaderInfo::from_diff_scaling(Difficulty::one(), 1),
);
hi.is_secondary = true;
assert_eq!(
@ -428,7 +405,7 @@ fn next_target_adjustment() {
hi.secondary_scaling = 100;
assert_eq!(
next_difficulty(1, repeat(60, hi.clone(), DIFFICULTY_ADJUST_WINDOW, None)),
HeaderInfo::from_diff_scaling(Difficulty::one(), 106),
HeaderInfo::from_diff_scaling(Difficulty::one(), 96),
);
// Check we don't get stuck on difficulty 1
@ -439,7 +416,7 @@ fn next_target_adjustment() {
);
// just enough data, right interval, should stay constant
let just_enough = DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW;
let just_enough = DIFFICULTY_ADJUST_WINDOW + 1;
hi.difficulty = Difficulty::from_num(1000);
assert_eq!(
next_difficulty(1, repeat(60, hi.clone(), just_enough, None)).difficulty,
@ -448,7 +425,7 @@ fn next_target_adjustment() {
// checking averaging works
hi.difficulty = Difficulty::from_num(500);
let sec = DIFFICULTY_ADJUST_WINDOW / 2 + MEDIAN_TIME_WINDOW;
let sec = DIFFICULTY_ADJUST_WINDOW / 2;
let mut s1 = repeat(60, hi.clone(), sec, Some(cur_time));
let mut s2 = repeat_offs(
cur_time + (sec * 60) as u64,
@ -513,22 +490,22 @@ fn next_target_adjustment() {
#[test]
fn secondary_pow_scale() {
let window = DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW;
let window = DIFFICULTY_ADJUST_WINDOW;
let mut hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 100);
// all primary, factor should be multiplied by 4 (max adjustment) so it
// becomes easier to find a high difficulty block
// all primary, factor should increase so it becomes easier to find a high
// difficulty block
assert_eq!(
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()),
200
148
);
// all secondary on 90%, factor should lose 10%
// all secondary on 90%, factor should go down a bit
hi.is_secondary = true;
assert_eq!(
secondary_pow_scaling(1, &(0..window).map(|_| hi.clone()).collect()),
90
96
);
// all secondary on 1%, should be divided by 4 (max adjustment)
// all secondary on 1%, factor should go down to bound (divide by 2)
assert_eq!(
secondary_pow_scaling(890_000, &(0..window).map(|_| hi.clone()).collect()),
50
@ -552,7 +529,7 @@ fn secondary_pow_scale() {
),
100
);
// 95% secondary, should come down
// 95% secondary, should come down based on 100 median
assert_eq!(
secondary_pow_scaling(
1,
@ -561,9 +538,9 @@ fn secondary_pow_scale() {
.chain((0..(window * 95 / 100)).map(|_| hi.clone()))
.collect()
),
94
98
);
// 40% secondary, should come up
// 40% secondary, should come up based on 50 median
assert_eq!(
secondary_pow_scaling(
1,
@ -572,7 +549,7 @@ fn secondary_pow_scale() {
.chain((0..(window * 4 / 10)).map(|_| hi.clone()))
.collect()
),
100
61
);
}

View file

@ -400,7 +400,6 @@ impl Server {
let last_blocks: Vec<consensus::HeaderInfo> =
global::difficulty_data_to_vector(self.chain.difficulty_iter())
.into_iter()
.skip(consensus::MEDIAN_TIME_WINDOW as usize)
.take(consensus::DIFFICULTY_ADJUST_WINDOW as usize)
.collect();