2016-11-11 03:02:47 +03:00
|
|
|
// Copyright 2016 The Grin Developers
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
//! All the rules required for a cryptocurrency to have reach consensus across
|
|
|
|
//! the whole network are complex and hard to completely isolate. Some can be
|
|
|
|
//! simple parameters (like block reward), others complex algorithms (like
|
|
|
|
//! Merkle sum trees or reorg rules). However, as long as they're simple
|
2016-11-14 22:21:18 +03:00
|
|
|
//! enough, consensus-relevant constants and short functions should be kept
|
2016-11-11 03:02:47 +03:00
|
|
|
//! here.
|
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
use std::fmt;
|
2017-11-14 03:45:10 +03:00
|
|
|
use std::cmp::max;
|
2016-11-16 01:37:49 +03:00
|
|
|
|
2016-12-27 02:39:31 +03:00
|
|
|
use core::target::Difficulty;
|
2016-11-16 01:37:49 +03:00
|
|
|
|
2017-11-15 04:14:07 +03:00
|
|
|
/// A grin is divisible to 10^9, following the SI prefixes
|
2017-10-22 13:56:55 +03:00
|
|
|
pub const GRIN_BASE: u64 = 1_000_000_000;
|
2017-11-15 04:14:07 +03:00
|
|
|
/// Milligrin, a thousand of a grin
|
|
|
|
pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000;
|
|
|
|
/// Microgrin, a thousand of a milligrin
|
|
|
|
pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000;
|
|
|
|
/// Nanogrin, smallest unit, takes a billion to make a grin
|
|
|
|
pub const NANO_GRIN: u64 = 1;
|
2017-10-22 13:56:55 +03:00
|
|
|
|
2018-01-07 07:21:50 +03:00
|
|
|
/// The block subsidy amount, one grin per second on average
|
|
|
|
pub const REWARD: u64 = 60 * GRIN_BASE;
|
2016-11-11 03:02:47 +03:00
|
|
|
|
2017-10-05 10:23:04 +03:00
|
|
|
/// Actual block reward for a given total fee amount
|
|
|
|
pub fn reward(fee: u64) -> u64 {
|
|
|
|
REWARD + fee / 2
|
|
|
|
}
|
|
|
|
|
2017-09-12 20:24:24 +03:00
|
|
|
/// Number of blocks before a coinbase matures and can be spent
|
2017-10-04 20:44:22 +03:00
|
|
|
pub const COINBASE_MATURITY: u64 = 1_000;
|
2017-09-12 20:24:24 +03:00
|
|
|
|
2018-01-22 23:55:27 +03:00
|
|
|
/// Max number of coinbase outputs in a valid block.
|
|
|
|
/// This is to prevent a miner generating an excessively large "compact block".
|
|
|
|
/// But we do techincally support blocks with multiple coinbase outputs/kernels.
|
|
|
|
pub const MAX_BLOCK_COINBASE_OUTPUTS: u64 = 1;
|
|
|
|
|
|
|
|
/// Max number of coinbase kernels in a valid block.
|
|
|
|
/// This is to prevent a miner generating an excessively large "compact block".
|
|
|
|
/// But we do techincally support blocks with multiple coinbase outputs/kernels.
|
|
|
|
pub const MAX_BLOCK_COINBASE_KERNELS: u64 = 1;
|
|
|
|
|
2016-11-16 01:37:49 +03:00
|
|
|
/// Block interval, in seconds, the network will tune its next_target for. Note
|
|
|
|
/// that we may reduce this value in the future as we get more data on mining
|
|
|
|
/// with Cuckoo Cycle, networks improve and block propagation is optimized
|
|
|
|
/// (adjusting the reward accordingly).
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const BLOCK_TIME_SEC: u64 = 60;
|
2016-11-11 03:02:47 +03:00
|
|
|
|
|
|
|
/// Cuckoo-cycle proof size (cycle length)
|
|
|
|
pub const PROOFSIZE: usize = 42;
|
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
/// Default Cuckoo Cycle size shift used for mining and validating.
|
|
|
|
pub const DEFAULT_SIZESHIFT: u8 = 30;
|
2016-11-16 01:37:49 +03:00
|
|
|
|
2016-11-11 03:02:47 +03:00
|
|
|
/// Default Cuckoo Cycle easiness, high enough to have good likeliness to find
|
|
|
|
/// a solution.
|
|
|
|
pub const EASINESS: u32 = 50;
|
|
|
|
|
2017-06-18 06:17:26 +03:00
|
|
|
/// Default number of blocks in the past when cross-block cut-through will start
|
|
|
|
/// happening. Needs to be long enough to not overlap with a long reorg.
|
|
|
|
/// Rational
|
|
|
|
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
|
|
|
|
/// add an order of magnitude to be safe and round to 48h of blocks to make it
|
|
|
|
/// easier to reason about.
|
|
|
|
pub const CUT_THROUGH_HORIZON: u32 = 48 * 3600 / (BLOCK_TIME_SEC as u32);
|
|
|
|
|
|
|
|
/// The maximum size we're willing to accept for any message. Enforced by the
|
|
|
|
/// peer-to-peer networking layer only for DoS protection.
|
|
|
|
pub const MAX_MSG_LEN: u64 = 20_000_000;
|
|
|
|
|
2017-10-06 06:34:35 +03:00
|
|
|
/// Weight of an input when counted against the max block weigth capacity
|
|
|
|
pub const BLOCK_INPUT_WEIGHT: usize = 1;
|
|
|
|
|
|
|
|
/// Weight of an output when counted against the max block weight capacity
|
|
|
|
pub const BLOCK_OUTPUT_WEIGHT: usize = 10;
|
|
|
|
|
|
|
|
/// Weight of a kernel when counted against the max block weight capacity
|
|
|
|
pub const BLOCK_KERNEL_WEIGHT: usize = 2;
|
|
|
|
|
|
|
|
/// Total maximum block weight
|
|
|
|
pub const MAX_BLOCK_WEIGHT: usize = 80_000;
|
|
|
|
|
2017-11-20 03:59:07 +03:00
|
|
|
/// Maximum inputs for a block (issue#261)
|
|
|
|
/// Hundreds of inputs + 1 output might be slow to validate (issue#258)
|
|
|
|
pub const MAX_BLOCK_INPUTS: usize = 300_000; // soft fork down when too_high
|
|
|
|
|
2017-10-06 06:34:35 +03:00
|
|
|
/// Whether a block exceeds the maximum acceptable weight
|
|
|
|
pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool {
|
2017-11-01 02:32:33 +03:00
|
|
|
input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT
|
|
|
|
+ kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT
|
2017-11-20 03:59:07 +03:00
|
|
|
|| input_len > MAX_BLOCK_INPUTS
|
2017-10-06 06:34:35 +03:00
|
|
|
}
|
|
|
|
|
2017-10-10 03:08:17 +03:00
|
|
|
/// Fork every 250,000 blocks for first 2 years, simple number and just a
|
|
|
|
/// little less than 6 months.
|
|
|
|
pub const HARD_FORK_INTERVAL: u64 = 250_000;
|
|
|
|
|
|
|
|
/// Check whether the block version is valid at a given height, implements
|
|
|
|
/// 6 months interval scheduled hard forks for the first 2 years.
|
|
|
|
pub fn valid_header_version(height: u64, version: u16) -> bool {
|
|
|
|
// uncomment below as we go from hard fork to hard fork
|
|
|
|
if height <= HARD_FORK_INTERVAL && version == 1 {
|
|
|
|
true
|
|
|
|
/* } else if height <= 2 * HARD_FORK_INTERVAL && version == 2 {
|
|
|
|
true */
|
|
|
|
/* } else if height <= 3 * HARD_FORK_INTERVAL && version == 3 {
|
|
|
|
true */
|
|
|
|
/* } else if height <= 4 * HARD_FORK_INTERVAL && version == 4 {
|
|
|
|
true */
|
|
|
|
/* } else if height > 4 * HARD_FORK_INTERVAL && version > 4 {
|
|
|
|
true */
|
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-29 17:49:11 +03:00
|
|
|
/// The minimum mining difficulty we'll allow
|
2018-01-19 20:48:18 +03:00
|
|
|
pub const MINIMUM_DIFFICULTY: u64 = 1;
|
2017-06-29 17:49:11 +03:00
|
|
|
|
2017-07-20 17:22:40 +03:00
|
|
|
/// Time window in blocks to calculate block time median
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const MEDIAN_TIME_WINDOW: u64 = 11;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-07-20 17:22:40 +03:00
|
|
|
/// Number of blocks used to calculate difficulty adjustments
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const DIFFICULTY_ADJUST_WINDOW: u64 = 23;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-07-20 17:22:40 +03:00
|
|
|
/// Average time span of the difficulty adjustment window
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const BLOCK_TIME_WINDOW: u64 = DIFFICULTY_ADJUST_WINDOW * BLOCK_TIME_SEC;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-11-14 03:45:10 +03:00
|
|
|
/// Maximum size time window used for difficulty adjustments
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const UPPER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 4 / 3;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-11-14 03:45:10 +03:00
|
|
|
/// Minimum size time window used for difficulty adjustments
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const LOWER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 5 / 6;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-12-22 20:15:44 +03:00
|
|
|
/// Consensus errors
|
|
|
|
#[derive(Clone, Debug, PartialEq)]
|
|
|
|
pub enum Error {
|
|
|
|
/// Inputs/outputs/kernels must be sorted lexicographically.
|
|
|
|
SortError,
|
|
|
|
}
|
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
/// Error when computing the next difficulty adjustment.
|
2017-06-18 06:17:26 +03:00
|
|
|
#[derive(Debug, Clone)]
|
2017-06-19 18:59:56 +03:00
|
|
|
pub struct TargetError(pub String);
|
|
|
|
|
|
|
|
impl fmt::Display for TargetError {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
write!(f, "Error computing new difficulty: {}", self.0)
|
|
|
|
}
|
2017-06-18 06:17:26 +03:00
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
|
|
|
|
/// Computes the proof-of-work difficulty that the next block should comply
|
|
|
|
/// with. Takes an iterator over past blocks, from latest (highest height) to
|
|
|
|
/// oldest (lowest height). The iterator produces pairs of timestamp and
|
|
|
|
/// difficulty for each block.
|
|
|
|
///
|
|
|
|
/// The difficulty calculation is based on both Digishield and GravityWave
|
|
|
|
/// family of difficulty computation, coming to something very close to Zcash.
|
|
|
|
/// The refence difficulty is an average of the difficulty over a window of
|
|
|
|
/// 23 blocks. The corresponding timespan is calculated by using the
|
|
|
|
/// difference between the median timestamps at the beginning and the end
|
|
|
|
/// of the window.
|
|
|
|
pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
|
2017-09-29 21:44:25 +03:00
|
|
|
where
|
|
|
|
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
|
2017-06-18 06:17:26 +03:00
|
|
|
{
|
|
|
|
// Block times at the begining and end of the adjustment window, used to
|
2017-11-01 02:32:33 +03:00
|
|
|
// calculate medians later.
|
2017-06-18 06:17:26 +03:00
|
|
|
let mut window_begin = vec![];
|
|
|
|
let mut window_end = vec![];
|
|
|
|
|
|
|
|
// Sum of difficulties in the window, used to calculate the average later.
|
|
|
|
let mut diff_sum = Difficulty::zero();
|
|
|
|
|
|
|
|
// Enumerating backward over blocks
|
|
|
|
for (n, head_info) in cursor.into_iter().enumerate() {
|
2017-08-03 19:57:55 +03:00
|
|
|
let m = n as u64;
|
2017-06-18 06:17:26 +03:00
|
|
|
let (ts, diff) = head_info?;
|
|
|
|
|
|
|
|
// Sum each element in the adjustment window. In addition, retain
|
2017-11-01 02:32:33 +03:00
|
|
|
// timestamps within median windows (at ]start;start-11] and ]end;end-11]
|
|
|
|
// to later calculate medians.
|
2017-06-18 06:17:26 +03:00
|
|
|
if m < DIFFICULTY_ADJUST_WINDOW {
|
|
|
|
diff_sum = diff_sum + diff;
|
|
|
|
|
|
|
|
if m < MEDIAN_TIME_WINDOW {
|
|
|
|
window_begin.push(ts);
|
|
|
|
}
|
|
|
|
} else if m < DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW {
|
|
|
|
window_end.push(ts);
|
|
|
|
} else {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check we have enough blocks
|
|
|
|
if window_end.len() < (MEDIAN_TIME_WINDOW as usize) {
|
2017-11-14 03:45:10 +03:00
|
|
|
return Ok(Difficulty::minimum());
|
2017-06-18 06:17:26 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
// Calculating time medians at the beginning and end of the window.
|
|
|
|
window_begin.sort();
|
|
|
|
window_end.sort();
|
|
|
|
let begin_ts = window_begin[window_begin.len() / 2];
|
|
|
|
let end_ts = window_end[window_end.len() / 2];
|
|
|
|
|
|
|
|
// Average difficulty and dampened average time
|
2018-01-22 23:55:27 +03:00
|
|
|
let diff_avg = diff_sum.into_num() as f64 /
|
2018-01-19 20:48:18 +03:00
|
|
|
Difficulty::from_num(DIFFICULTY_ADJUST_WINDOW).into_num() as f64;
|
2017-06-18 06:17:26 +03:00
|
|
|
let ts_damp = (3 * BLOCK_TIME_WINDOW + (begin_ts - end_ts)) / 4;
|
|
|
|
|
|
|
|
// Apply time bounds
|
|
|
|
let adj_ts = if ts_damp < LOWER_TIME_BOUND {
|
|
|
|
LOWER_TIME_BOUND
|
|
|
|
} else if ts_damp > UPPER_TIME_BOUND {
|
|
|
|
UPPER_TIME_BOUND
|
|
|
|
} else {
|
|
|
|
ts_damp
|
|
|
|
};
|
|
|
|
|
2017-11-14 03:45:10 +03:00
|
|
|
let difficulty =
|
2018-01-22 23:55:27 +03:00
|
|
|
diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW).into_num() as f64
|
2018-01-19 20:48:18 +03:00
|
|
|
/ Difficulty::from_num(adj_ts).into_num() as f64;
|
|
|
|
// All this ceil and f64 business is so that difficulty can always adjust
|
|
|
|
// for smaller numbers < 10
|
|
|
|
Ok(max(Difficulty::from_num(difficulty.ceil() as u64), Difficulty::minimum()))
|
2017-06-18 06:17:26 +03:00
|
|
|
}
|
|
|
|
|
2017-12-22 20:15:44 +03:00
|
|
|
/// Consensus rule that collections of items are sorted lexicographically.
|
2017-10-13 20:23:18 +03:00
|
|
|
pub trait VerifySortOrder<T> {
|
|
|
|
/// Verify a collection of items is sorted as required.
|
2017-12-22 20:15:44 +03:00
|
|
|
fn verify_sort_order(&self) -> Result<(), Error>;
|
2017-10-13 20:23:18 +03:00
|
|
|
}
|
|
|
|
|
2017-08-03 19:57:55 +03:00
|
|
|
#[cfg(test)]
|
|
|
|
use std;
|
|
|
|
|
2016-11-16 01:37:49 +03:00
|
|
|
#[cfg(test)]
|
|
|
|
mod test {
|
2016-12-27 02:39:31 +03:00
|
|
|
use core::target::Difficulty;
|
|
|
|
|
2016-11-16 01:37:49 +03:00
|
|
|
use super::*;
|
|
|
|
|
2017-06-18 06:17:26 +03:00
|
|
|
// Builds an iterator for next difficulty calculation with the provided
|
2017-11-01 02:32:33 +03:00
|
|
|
// constant time interval, difficulty and total length.
|
2017-08-03 19:57:55 +03:00
|
|
|
fn repeat(interval: u64, diff: u64, len: u64) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
2017-09-29 21:44:25 +03:00
|
|
|
// watch overflow here, length shouldn't be ridiculous anyhow
|
2017-08-03 19:57:55 +03:00
|
|
|
assert!(len < std::usize::MAX as u64);
|
2017-06-18 06:17:26 +03:00
|
|
|
let diffs = vec![Difficulty::from_num(diff); len as usize];
|
2017-08-03 19:57:55 +03:00
|
|
|
let times = (0..(len as usize)).map(|n| n * interval as usize).rev();
|
2017-06-18 06:17:26 +03:00
|
|
|
let pairs = times.zip(diffs.iter());
|
2017-09-29 21:44:25 +03:00
|
|
|
pairs
|
|
|
|
.map(|(t, d)| Ok((t as u64, d.clone())))
|
|
|
|
.collect::<Vec<_>>()
|
2017-06-18 06:17:26 +03:00
|
|
|
}
|
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
fn repeat_offs(
|
|
|
|
from: u64,
|
|
|
|
interval: u64,
|
|
|
|
diff: u64,
|
|
|
|
len: u64,
|
|
|
|
) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
|
|
|
map_vec!(repeat(interval, diff, len), |e| match e.clone() {
|
|
|
|
Err(e) => Err(e),
|
|
|
|
Ok((t, d)) => Ok((t + from, d)),
|
2017-06-18 06:17:26 +03:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-11-16 01:37:49 +03:00
|
|
|
/// Checks different next_target adjustments and difficulty boundaries
|
2017-06-18 06:17:26 +03:00
|
|
|
#[test]
|
2016-11-16 01:37:49 +03:00
|
|
|
fn next_target_adjustment() {
|
2017-06-18 06:17:26 +03:00
|
|
|
// not enough data
|
2017-09-29 21:44:25 +03:00
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(vec![]).unwrap(),
|
|
|
|
Difficulty::from_num(MINIMUM_DIFFICULTY)
|
|
|
|
);
|
2017-09-12 20:24:24 +03:00
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(vec![Ok((60, Difficulty::one()))]).unwrap(),
|
|
|
|
Difficulty::from_num(MINIMUM_DIFFICULTY)
|
|
|
|
);
|
2017-09-12 20:24:24 +03:00
|
|
|
|
2017-09-29 21:44:25 +03:00
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(60, 10, DIFFICULTY_ADJUST_WINDOW)).unwrap(),
|
|
|
|
Difficulty::from_num(MINIMUM_DIFFICULTY)
|
|
|
|
);
|
2017-06-18 06:17:26 +03:00
|
|
|
|
|
|
|
// just enough data, right interval, should stay constant
|
2017-09-12 20:24:24 +03:00
|
|
|
|
2017-06-18 06:17:26 +03:00
|
|
|
let just_enough = DIFFICULTY_ADJUST_WINDOW + MEDIAN_TIME_WINDOW;
|
2017-09-29 21:44:25 +03:00
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(60, 1000, just_enough)).unwrap(),
|
|
|
|
Difficulty::from_num(1000)
|
|
|
|
);
|
2017-06-18 06:17:26 +03:00
|
|
|
|
|
|
|
// checking averaging works, window length is odd so need to compensate a little
|
|
|
|
let sec = DIFFICULTY_ADJUST_WINDOW / 2 + 1 + MEDIAN_TIME_WINDOW;
|
|
|
|
let mut s1 = repeat(60, 500, sec);
|
2017-08-03 19:57:55 +03:00
|
|
|
let mut s2 = repeat_offs((sec * 60) as u64, 60, 1545, DIFFICULTY_ADJUST_WINDOW / 2);
|
2017-06-18 06:17:26 +03:00
|
|
|
s2.append(&mut s1);
|
2018-01-19 20:48:18 +03:00
|
|
|
assert_eq!(next_difficulty(s2).unwrap(), Difficulty::from_num(1000));
|
2017-06-18 06:17:26 +03:00
|
|
|
|
|
|
|
// too slow, diff goes down
|
2017-09-29 21:44:25 +03:00
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(90, 1000, just_enough)).unwrap(),
|
2018-01-19 20:48:18 +03:00
|
|
|
Difficulty::from_num(890)
|
2017-09-29 21:44:25 +03:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(120, 1000, just_enough)).unwrap(),
|
|
|
|
Difficulty::from_num(800)
|
|
|
|
);
|
2017-06-18 06:17:26 +03:00
|
|
|
|
|
|
|
// too fast, diff goes up
|
2017-09-29 21:44:25 +03:00
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(55, 1000, just_enough)).unwrap(),
|
2018-01-19 20:48:18 +03:00
|
|
|
Difficulty::from_num(1022)
|
2017-09-29 21:44:25 +03:00
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(45, 1000, just_enough)).unwrap(),
|
2018-01-19 20:48:18 +03:00
|
|
|
Difficulty::from_num(1068)
|
2017-09-29 21:44:25 +03:00
|
|
|
);
|
2017-06-18 06:17:26 +03:00
|
|
|
|
|
|
|
// hitting lower time bound, should always get the same result below
|
2017-09-29 21:44:25 +03:00
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(20, 1000, just_enough)).unwrap(),
|
|
|
|
Difficulty::from_num(1200)
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(10, 1000, just_enough)).unwrap(),
|
|
|
|
Difficulty::from_num(1200)
|
|
|
|
);
|
2017-06-18 06:17:26 +03:00
|
|
|
|
|
|
|
// hitting higher time bound, should always get the same result above
|
2017-09-29 21:44:25 +03:00
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(160, 1000, just_enough)).unwrap(),
|
|
|
|
Difficulty::from_num(750)
|
|
|
|
);
|
|
|
|
assert_eq!(
|
|
|
|
next_difficulty(repeat(200, 1000, just_enough)).unwrap(),
|
|
|
|
Difficulty::from_num(750)
|
|
|
|
);
|
2017-11-14 03:45:10 +03:00
|
|
|
|
2018-01-19 20:48:18 +03:00
|
|
|
// We should never drop below MINIMUM_DIFFICULTY (1)
|
2017-11-14 03:45:10 +03:00
|
|
|
assert_eq!(
|
2018-01-19 20:48:18 +03:00
|
|
|
next_difficulty(repeat(90, 0, just_enough)).unwrap(),
|
|
|
|
Difficulty::from_num(1)
|
2017-11-14 03:45:10 +03:00
|
|
|
);
|
2016-11-16 01:37:49 +03:00
|
|
|
}
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-10-10 03:08:17 +03:00
|
|
|
#[test]
|
|
|
|
fn hard_fork_1() {
|
|
|
|
assert!(valid_header_version(0, 1));
|
|
|
|
assert!(valid_header_version(10, 1));
|
|
|
|
assert!(!valid_header_version(10, 2));
|
|
|
|
assert!(valid_header_version(250_000, 1));
|
|
|
|
assert!(!valid_header_version(250_001, 1));
|
|
|
|
assert!(!valid_header_version(500_000, 1));
|
|
|
|
assert!(!valid_header_version(250_001, 2));
|
|
|
|
}
|
|
|
|
|
|
|
|
// #[test]
|
2017-11-01 02:32:33 +03:00
|
|
|
// fn hard_fork_2() {
|
|
|
|
// assert!(valid_header_version(0, 1));
|
|
|
|
// assert!(valid_header_version(10, 1));
|
|
|
|
// assert!(valid_header_version(10, 2));
|
|
|
|
// assert!(valid_header_version(250_000, 1));
|
|
|
|
// assert!(!valid_header_version(250_001, 1));
|
|
|
|
// assert!(!valid_header_version(500_000, 1));
|
|
|
|
// assert!(valid_header_version(250_001, 2));
|
|
|
|
// assert!(valid_header_version(500_000, 2));
|
|
|
|
// assert!(!valid_header_version(500_001, 2));
|
|
|
|
// }
|
2016-11-16 01:37:49 +03:00
|
|
|
}
|