mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
fixDAA; implement wtema (#3477)
* implement wtema * reduce FTL from 12 to 5 mins; fix inaccuracies in cuckoo doc * rename difficulty window for DampedMovingAverage DAA * fix FTL error msg * secondary_scale becomes extended nonce past HF4 * secondary_scale change actually belongs in hardfork4 PR * make future time limit configurable * define constant global::DEFAULT_FUTURE_TIME_LIMIT and refactor a bit * fix typos * as_ref() feels safer than as_mut() :-) * remove obsolete mut * make future_time_limit non-mandatory in grin-server.toml
This commit is contained in:
parent
a5b8968826
commit
f86102b2a6
18 changed files with 399 additions and 244 deletions
|
@ -368,7 +368,7 @@ impl Default for Tip {
|
|||
height: 0,
|
||||
last_block_h: ZERO_HASH,
|
||||
prev_block_h: ZERO_HASH,
|
||||
total_difficulty: Difficulty::min(),
|
||||
total_difficulty: Difficulty::min_dma(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let reward = libtx::reward::output(&keychain, &builder, &key_id1, 0, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, &[], Difficulty::min_dma(), reward).unwrap();
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
||||
|
@ -113,7 +113,7 @@ fn test_coinbase_maturity() {
|
|||
let txs = &[coinbase_txn.clone()];
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward = libtx::reward::output(&keychain, &builder, &key_id3, fees, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min_dma(), reward).unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
@ -149,7 +149,8 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let reward = libtx::reward::output(&keychain, &builder, &key_id1, 0, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, &[], Difficulty::min_dma(), reward).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
@ -195,7 +196,8 @@ fn test_coinbase_maturity() {
|
|||
let txs = &[coinbase_txn.clone()];
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward = libtx::reward::output(&keychain, &builder, &key_id3, fees, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, txs, Difficulty::min_dma(), reward).unwrap();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
@ -231,7 +233,7 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let reward = libtx::reward::output(&keychain, &builder, &pk, 0, false).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, &[], Difficulty::min(), reward).unwrap();
|
||||
core::core::Block::new(&prev, &[], Difficulty::min_dma(), reward).unwrap();
|
||||
let next_header_info =
|
||||
consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
|
@ -262,7 +264,8 @@ fn test_coinbase_maturity() {
|
|||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let next_header_info = consensus::next_difficulty(1, chain.difficulty_iter().unwrap());
|
||||
let reward = libtx::reward::output(&keychain, &builder, &key_id4, fees, false).unwrap();
|
||||
let mut block = core::core::Block::new(&prev, txs, Difficulty::min(), reward).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, txs, Difficulty::min_dma(), reward).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + Duration::seconds(60);
|
||||
block.header.pow.secondary_scaling = next_header_info.secondary_scaling;
|
||||
|
|
|
@ -93,6 +93,19 @@ fn comments() -> HashMap<String, String> {
|
|||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"future_time_limit".to_string(),
|
||||
"
|
||||
#The Future Time Limit (FTL) is a limit on how far into the future,
|
||||
#relative to a node's local time, the timestamp on a new block can be,
|
||||
#in order for the block to be accepted.
|
||||
#At Hard Fork 4, this was reduced from 12 minutes down to 5 minutes,
|
||||
#so as to limit possible timestamp manipulation on the new
|
||||
#wtema difficulty adjustment algorithm
|
||||
"
|
||||
.to_string(),
|
||||
);
|
||||
|
||||
retval.insert(
|
||||
"chain_validation_mode".to_string(),
|
||||
"
|
||||
|
|
|
@ -47,8 +47,11 @@ pub fn reward(fee: u64) -> u64 {
|
|||
REWARD.saturating_add(fee)
|
||||
}
|
||||
|
||||
/// an hour in seconds
|
||||
pub const HOUR_SEC: u64 = 60 * 60;
|
||||
|
||||
/// Nominal height for standard time intervals, hour is 60 blocks
|
||||
pub const HOUR_HEIGHT: u64 = 3600 / BLOCK_TIME_SEC;
|
||||
pub const HOUR_HEIGHT: u64 = HOUR_SEC / BLOCK_TIME_SEC;
|
||||
/// A day is 1440 blocks
|
||||
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
|
||||
/// A week is 10_080 blocks
|
||||
|
@ -66,12 +69,6 @@ pub fn secondary_pow_ratio(height: u64) -> u64 {
|
|||
90u64.saturating_sub(height / (2 * YEAR_HEIGHT / 90))
|
||||
}
|
||||
|
||||
/// The AR scale damping factor to use. Dependent on block height
|
||||
/// to account for pre HF behavior on testnet4.
|
||||
fn ar_scale_damp_factor(_height: u64) -> u64 {
|
||||
AR_SCALE_DAMP_FACTOR
|
||||
}
|
||||
|
||||
/// Cuckoo-cycle proof size (cycle length)
|
||||
pub const PROOFSIZE: usize = 42;
|
||||
|
||||
|
@ -174,18 +171,21 @@ pub fn valid_header_version(height: u64, version: HeaderVersion) -> bool {
|
|||
version == header_version(height)
|
||||
}
|
||||
|
||||
/// Number of blocks used to calculate difficulty adjustments
|
||||
pub const DIFFICULTY_ADJUST_WINDOW: u64 = HOUR_HEIGHT;
|
||||
/// Number of blocks used to calculate difficulty adjustment by Damped Moving Average
|
||||
pub const DMA_WINDOW: u64 = HOUR_HEIGHT;
|
||||
|
||||
/// Average time span of the difficulty adjustment window
|
||||
pub const BLOCK_TIME_WINDOW: u64 = DIFFICULTY_ADJUST_WINDOW * BLOCK_TIME_SEC;
|
||||
/// Difficulty adjustment half life is 4 hours
|
||||
pub const WTEMA_HALF_LIFE: u64 = 4 * HOUR_SEC;
|
||||
|
||||
/// Clamp factor to use for difficulty adjustment
|
||||
/// Average time span of the DMA difficulty adjustment window
|
||||
pub const BLOCK_TIME_WINDOW: u64 = DMA_WINDOW * BLOCK_TIME_SEC;
|
||||
|
||||
/// Clamp factor to use for DMA difficulty adjustment
|
||||
/// Limit value to within this factor of goal
|
||||
pub const CLAMP_FACTOR: u64 = 2;
|
||||
|
||||
/// Dampening factor to use for difficulty adjustment
|
||||
pub const DIFFICULTY_DAMP_FACTOR: u64 = 3;
|
||||
/// Dampening factor to use for DMA difficulty adjustment
|
||||
pub const DMA_DAMP_FACTOR: u64 = 3;
|
||||
|
||||
/// Dampening factor to use for AR scale calculation.
|
||||
pub const AR_SCALE_DAMP_FACTOR: u64 = 13;
|
||||
|
@ -205,9 +205,16 @@ pub fn graph_weight(height: u64, edge_bits: u8) -> u64 {
|
|||
(2u64 << (edge_bits - global::base_edge_bits()) as u64) * xpr_edge_bits
|
||||
}
|
||||
|
||||
/// Minimum difficulty, enforced in diff retargetting
|
||||
/// minimum solution difficulty after HardFork4 when PoW becomes primary only Cuckatoo32+
|
||||
pub const C32_GRAPH_WEIGHT: u64 = (2u64 << (32 - BASE_EDGE_BITS) as u64) * 32; // 16384
|
||||
|
||||
/// Minimum difficulty, enforced in Damped Moving Average diff retargetting
|
||||
/// avoids getting stuck when trying to increase difficulty subject to dampening
|
||||
pub const MIN_DIFFICULTY: u64 = DIFFICULTY_DAMP_FACTOR;
|
||||
pub const MIN_DMA_DIFFICULTY: u64 = DMA_DAMP_FACTOR;
|
||||
|
||||
/// Minimum difficulty, enforced in Weighted Target Exponential Moving Average diff retargetting
|
||||
/// avoids getting stuck when trying to increase difficulty
|
||||
pub const MIN_WTEMA_DIFFICULTY: u64 = C32_GRAPH_WEIGHT;
|
||||
|
||||
/// Minimum scaling factor for AR pow, enforced in diff retargetting
|
||||
/// avoids getting stuck when trying to increase ar_scale subject to dampening
|
||||
|
@ -293,37 +300,43 @@ pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 {
|
|||
max(goal / clamp_factor, min(actual, goal * clamp_factor))
|
||||
}
|
||||
|
||||
/// Computes the proof-of-work difficulty that the next block should comply
|
||||
/// with. Takes an iterator over past block headers information, from latest
|
||||
/// Computes the proof-of-work difficulty that the next block should comply with.
|
||||
/// Takes an iterator over past block headers information, from latest
|
||||
/// (highest height) to oldest (lowest height).
|
||||
///
|
||||
/// The difficulty calculation is based on both Digishield and GravityWave
|
||||
/// family of difficulty computation, coming to something very close to Zcash.
|
||||
/// The reference difficulty is an average of the difficulty over a window of
|
||||
/// DIFFICULTY_ADJUST_WINDOW blocks. The corresponding timespan is calculated
|
||||
/// by using the difference between the median timestamps at the beginning
|
||||
/// and the end of the window.
|
||||
///
|
||||
/// The secondary proof-of-work factor is calculated along the same lines, as
|
||||
/// an adjustment on the deviation against the ideal value.
|
||||
/// Uses either the old dma DAA or, starting from HF4, the new wtema DAA
|
||||
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
|
||||
where
|
||||
T: IntoIterator<Item = HeaderInfo>,
|
||||
{
|
||||
if header_version(height) < HeaderVersion(5) {
|
||||
next_dma_difficulty(height, cursor)
|
||||
} else {
|
||||
next_wtema_difficulty(height, cursor)
|
||||
}
|
||||
}
|
||||
|
||||
/// Difficulty calculation based on a Damped Moving Average
|
||||
/// of difficulty over a window of DMA_WINDOW blocks.
|
||||
/// The corresponding timespan is calculated
|
||||
/// by using the difference between the timestamps at the beginning
|
||||
/// and the end of the window, with a damping toward the target block time.
|
||||
pub fn next_dma_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
|
||||
where
|
||||
T: IntoIterator<Item = HeaderInfo>,
|
||||
{
|
||||
// Create vector of difficulty data running from earliest
|
||||
// to latest, and pad with simulated pre-genesis data to allow earlier
|
||||
// adjustment if there isn't enough window data length will be
|
||||
// DIFFICULTY_ADJUST_WINDOW + 1 (for initial block time bound)
|
||||
// DMA_WINDOW + 1 (for initial block time bound)
|
||||
let diff_data = global::difficulty_data_to_vector(cursor);
|
||||
|
||||
// First, get the ratio of secondary PoW vs primary, skipping initial header
|
||||
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data[1..]);
|
||||
|
||||
// Get the timestamp delta across the window
|
||||
let ts_delta: u64 =
|
||||
diff_data[DIFFICULTY_ADJUST_WINDOW as usize].timestamp - diff_data[0].timestamp;
|
||||
let ts_delta: u64 = diff_data[DMA_WINDOW as usize].timestamp - diff_data[0].timestamp;
|
||||
|
||||
// Get the difficulty sum of the last DIFFICULTY_ADJUST_WINDOW elements
|
||||
// Get the difficulty sum of the last DMA_WINDOW elements
|
||||
let diff_sum: u64 = diff_data
|
||||
.iter()
|
||||
.skip(1)
|
||||
|
@ -332,29 +345,58 @@ where
|
|||
|
||||
// adjust time delta toward goal subject to dampening and clamping
|
||||
let adj_ts = clamp(
|
||||
damp(ts_delta, BLOCK_TIME_WINDOW, DIFFICULTY_DAMP_FACTOR),
|
||||
damp(ts_delta, BLOCK_TIME_WINDOW, DMA_DAMP_FACTOR),
|
||||
BLOCK_TIME_WINDOW,
|
||||
CLAMP_FACTOR,
|
||||
);
|
||||
// minimum difficulty avoids getting stuck due to dampening
|
||||
let difficulty = max(MIN_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts);
|
||||
let difficulty = max(MIN_DMA_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts);
|
||||
|
||||
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling)
|
||||
}
|
||||
|
||||
/// Difficulty calculation based on a Weighted Target Exponential Moving Average
|
||||
/// of difficulty, using the ratio of the last block time over the target block time.
|
||||
pub fn next_wtema_difficulty<T>(_height: u64, cursor: T) -> HeaderInfo
|
||||
where
|
||||
T: IntoIterator<Item = HeaderInfo>,
|
||||
{
|
||||
let mut last_headers = cursor.into_iter();
|
||||
|
||||
// last two headers
|
||||
let last_header = last_headers.next().unwrap();
|
||||
let prev_header = last_headers.next().unwrap();
|
||||
|
||||
let last_block_time: u64 = last_header.timestamp - prev_header.timestamp;
|
||||
|
||||
let last_diff = last_header.difficulty.to_num();
|
||||
|
||||
// wtema difficulty update
|
||||
let next_diff =
|
||||
last_diff * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - BLOCK_TIME_SEC + last_block_time);
|
||||
|
||||
// minimum difficulty at graph_weight(32) ensures difficulty increase on 59s block
|
||||
// since 16384 * WTEMA_HALF_LIFE / (WTEMA_HALF_LIFE - 1) > 16384
|
||||
let difficulty = max(MIN_WTEMA_DIFFICULTY, next_diff);
|
||||
|
||||
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), 0) // no more secondary PoW
|
||||
}
|
||||
|
||||
/// Count, in units of 1/100 (a percent), the number of "secondary" (AR) blocks in the provided window of blocks.
|
||||
pub fn ar_count(_height: u64, diff_data: &[HeaderInfo]) -> u64 {
|
||||
100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64
|
||||
}
|
||||
|
||||
/// The secondary proof-of-work factor is calculated along the same lines as in next_dma_difficulty,
|
||||
/// as an adjustment on the deviation against the ideal value.
|
||||
/// Factor by which the secondary proof of work difficulty will be adjusted
|
||||
pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
|
||||
// Get the scaling factor sum of the last DIFFICULTY_ADJUST_WINDOW elements
|
||||
// Get the scaling factor sum of the last DMA_WINDOW elements
|
||||
let scale_sum: u64 = diff_data.iter().map(|dd| dd.secondary_scaling as u64).sum();
|
||||
|
||||
// compute ideal 2nd_pow_fraction in pct and across window
|
||||
let target_pct = secondary_pow_ratio(height);
|
||||
let target_count = DIFFICULTY_ADJUST_WINDOW * target_pct;
|
||||
let target_count = DMA_WINDOW * target_pct;
|
||||
|
||||
// Get the secondary count across the window, adjusting count toward goal
|
||||
// subject to dampening and clamping.
|
||||
|
@ -362,7 +404,7 @@ pub fn secondary_pow_scaling(height: u64, diff_data: &[HeaderInfo]) -> u32 {
|
|||
damp(
|
||||
ar_count(height, diff_data),
|
||||
target_count,
|
||||
ar_scale_damp_factor(height),
|
||||
AR_SCALE_DAMP_FACTOR,
|
||||
),
|
||||
target_count,
|
||||
CLAMP_FACTOR,
|
||||
|
@ -383,12 +425,12 @@ mod test {
|
|||
|
||||
// initial weights
|
||||
assert_eq!(graph_weight(1, 31), 256 * 31);
|
||||
assert_eq!(graph_weight(1, 32), 512 * 32);
|
||||
assert_eq!(graph_weight(1, 32), C32_GRAPH_WEIGHT);
|
||||
assert_eq!(graph_weight(1, 33), 1024 * 33);
|
||||
|
||||
// one year in, 31 starts going down, the rest stays the same
|
||||
assert_eq!(graph_weight(YEAR_HEIGHT, 31), 256 * 30);
|
||||
assert_eq!(graph_weight(YEAR_HEIGHT, 32), 512 * 32);
|
||||
assert_eq!(graph_weight(YEAR_HEIGHT, 32), C32_GRAPH_WEIGHT);
|
||||
assert_eq!(graph_weight(YEAR_HEIGHT, 33), 1024 * 33);
|
||||
|
||||
// 31 loses one factor per week
|
||||
|
@ -398,29 +440,33 @@ mod test {
|
|||
|
||||
// 2 years in, 31 still at 0, 32 starts decreasing
|
||||
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 31), 0);
|
||||
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), 512 * 32);
|
||||
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 32), C32_GRAPH_WEIGHT);
|
||||
assert_eq!(graph_weight(2 * YEAR_HEIGHT, 33), 1024 * 33);
|
||||
|
||||
// 32 phaseout on hold
|
||||
assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32), 512 * 32);
|
||||
assert_eq!(
|
||||
graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 32),
|
||||
C32_GRAPH_WEIGHT
|
||||
);
|
||||
assert_eq!(graph_weight(2 * YEAR_HEIGHT + WEEK_HEIGHT, 31), 0);
|
||||
assert_eq!(
|
||||
graph_weight(2 * YEAR_HEIGHT + 30 * WEEK_HEIGHT, 32),
|
||||
512 * 32
|
||||
C32_GRAPH_WEIGHT
|
||||
);
|
||||
assert_eq!(
|
||||
graph_weight(2 * YEAR_HEIGHT + 31 * WEEK_HEIGHT, 32),
|
||||
512 * 32
|
||||
C32_GRAPH_WEIGHT
|
||||
);
|
||||
|
||||
// 3 years in, nothing changes
|
||||
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 31), 0);
|
||||
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), 512 * 32);
|
||||
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 32), C32_GRAPH_WEIGHT);
|
||||
assert_eq!(graph_weight(3 * YEAR_HEIGHT, 33), 1024 * 33);
|
||||
|
||||
// 4 years in, still on hold
|
||||
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 31), 0);
|
||||
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), 512 * 32);
|
||||
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 32), C32_GRAPH_WEIGHT);
|
||||
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
|
||||
assert_eq!(graph_weight(4 * YEAR_HEIGHT, 33), 1024 * 33);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -432,14 +432,14 @@ pub struct UntrustedBlockHeader(BlockHeader);
|
|||
impl Readable for UntrustedBlockHeader {
|
||||
fn read<R: Reader>(reader: &mut R) -> Result<UntrustedBlockHeader, ser::Error> {
|
||||
let header = read_block_header(reader)?;
|
||||
if header.timestamp
|
||||
> Utc::now() + Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
|
||||
{
|
||||
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
|
||||
let ftl = global::get_future_time_limit();
|
||||
if header.timestamp > Utc::now() + Duration::seconds(ftl as i64) {
|
||||
// refuse blocks whose timestamp is too far in the future
|
||||
// this future_time_limit (FTL) is specified in grin-server.toml
|
||||
// TODO add warning in p2p code if local time is too different from peers
|
||||
error!(
|
||||
"block header {} validation error: block time is more than 12 blocks in future",
|
||||
header.hash()
|
||||
"block header {} validation error: block time is more than {} seconds in the future",
|
||||
header.hash(), ftl
|
||||
);
|
||||
return Err(ser::Error::CorruptedData);
|
||||
}
|
||||
|
|
|
@ -19,8 +19,8 @@
|
|||
use crate::consensus::{
|
||||
graph_weight, header_version, HeaderInfo, BASE_EDGE_BITS, BLOCK_KERNEL_WEIGHT,
|
||||
BLOCK_OUTPUT_WEIGHT, BLOCK_TIME_SEC, COINBASE_MATURITY, CUT_THROUGH_HORIZON, DAY_HEIGHT,
|
||||
DEFAULT_MIN_EDGE_BITS, DIFFICULTY_ADJUST_WINDOW, INITIAL_DIFFICULTY, MAX_BLOCK_WEIGHT,
|
||||
PROOFSIZE, SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
|
||||
DEFAULT_MIN_EDGE_BITS, DMA_WINDOW, INITIAL_DIFFICULTY, MAX_BLOCK_WEIGHT, PROOFSIZE,
|
||||
SECOND_POW_EDGE_BITS, STATE_SYNC_THRESHOLD,
|
||||
};
|
||||
use crate::core::block::HeaderVersion;
|
||||
use crate::pow::{
|
||||
|
@ -81,6 +81,9 @@ pub const TESTING_INITIAL_DIFFICULTY: u64 = 1;
|
|||
/// Testing max_block_weight (artifically low, just enough to support a few txs).
|
||||
pub const TESTING_MAX_BLOCK_WEIGHT: u64 = 250;
|
||||
|
||||
/// default Future Time Limit (FTL) of 5 minutes
|
||||
pub const DEFAULT_FUTURE_TIME_LIMIT: u64 = 5 * 60;
|
||||
|
||||
/// If a peer's last updated difficulty is 2 hours ago and its difficulty's lower than ours,
|
||||
/// we're sure this peer is a stuck node, and we will kick out such kind of stuck peers.
|
||||
pub const STUCK_PEER_KICK_TIME: i64 = 2 * 3600 * 1000;
|
||||
|
@ -142,6 +145,11 @@ lazy_static! {
|
|||
/// to be overridden on a per-thread basis (for testing).
|
||||
pub static ref GLOBAL_CHAIN_TYPE: OneTime<ChainTypes> = OneTime::new();
|
||||
|
||||
/// Global future time limit that must be initialized once on node startup.
|
||||
/// This is accessed via get_future_time_limit() which allows the global value
|
||||
/// to be overridden on a per-thread basis (for testing).
|
||||
pub static ref GLOBAL_FUTURE_TIME_LIMIT: OneTime<u64> = OneTime::new();
|
||||
|
||||
/// Global feature flag for NRD kernel support.
|
||||
/// If enabled NRD kernels are treated as valid after HF3 (based on header version).
|
||||
/// If disabled NRD kernels are invalid regardless of header version or block height.
|
||||
|
@ -152,10 +160,19 @@ thread_local! {
|
|||
/// Mainnet|Testnet|UserTesting|AutomatedTesting
|
||||
pub static CHAIN_TYPE: Cell<Option<ChainTypes>> = Cell::new(None);
|
||||
|
||||
/// maximum number of seconds into future for timestamp of block to be acceptable
|
||||
pub static FUTURE_TIME_LIMIT: Cell<Option<u64>> = Cell::new(None);
|
||||
|
||||
/// Local feature flag for NRD kernel support.
|
||||
pub static NRD_FEATURE_ENABLED: Cell<Option<bool>> = Cell::new(None);
|
||||
}
|
||||
|
||||
/// One time initialization of the global chain_type.
|
||||
/// Will panic if we attempt to re-initialize this (via OneTime).
|
||||
pub fn init_global_chain_type(new_type: ChainTypes) {
|
||||
GLOBAL_CHAIN_TYPE.init(new_type)
|
||||
}
|
||||
|
||||
/// Set the chain type on a per-thread basis via thread_local storage.
|
||||
pub fn set_local_chain_type(new_type: ChainTypes) {
|
||||
CHAIN_TYPE.with(|chain_type| chain_type.set(Some(new_type)))
|
||||
|
@ -165,31 +182,53 @@ pub fn set_local_chain_type(new_type: ChainTypes) {
|
|||
pub fn get_chain_type() -> ChainTypes {
|
||||
CHAIN_TYPE.with(|chain_type| match chain_type.get() {
|
||||
None => {
|
||||
if GLOBAL_CHAIN_TYPE.is_init() {
|
||||
let chain_type = GLOBAL_CHAIN_TYPE.borrow();
|
||||
set_local_chain_type(chain_type);
|
||||
chain_type
|
||||
} else {
|
||||
if !GLOBAL_CHAIN_TYPE.is_init() {
|
||||
panic!("GLOBAL_CHAIN_TYPE and CHAIN_TYPE unset. Consider set_local_chain_type() in tests.");
|
||||
}
|
||||
let chain_type = GLOBAL_CHAIN_TYPE.borrow();
|
||||
set_local_chain_type(chain_type);
|
||||
chain_type
|
||||
}
|
||||
Some(chain_type) => chain_type,
|
||||
})
|
||||
}
|
||||
|
||||
/// One time initialization of the global chain_type.
|
||||
/// One time initialization of the global future time limit
|
||||
/// Will panic if we attempt to re-initialize this (via OneTime).
|
||||
pub fn init_global_chain_type(new_type: ChainTypes) {
|
||||
GLOBAL_CHAIN_TYPE.init(new_type)
|
||||
pub fn init_global_future_time_limit(new_ftl: u64) {
|
||||
GLOBAL_FUTURE_TIME_LIMIT.init(new_ftl)
|
||||
}
|
||||
|
||||
/// One time initialization of the global chain_type.
|
||||
/// Set the future time limit on a per-thread basis via thread_local storage.
|
||||
pub fn set_local_future_time_limit(new_ftl: u64) {
|
||||
FUTURE_TIME_LIMIT.with(|ftl| ftl.set(Some(new_ftl)))
|
||||
}
|
||||
|
||||
/// Future Time Limit (FTL)
|
||||
/// Look at thread local config first. If not set fallback to global config.
|
||||
/// Default to false if global config unset.
|
||||
pub fn get_future_time_limit() -> u64 {
|
||||
FUTURE_TIME_LIMIT.with(|ftl| match ftl.get() {
|
||||
None => {
|
||||
let ftl = if GLOBAL_FUTURE_TIME_LIMIT.is_init() {
|
||||
GLOBAL_FUTURE_TIME_LIMIT.borrow()
|
||||
} else {
|
||||
DEFAULT_FUTURE_TIME_LIMIT
|
||||
};
|
||||
set_local_future_time_limit(ftl);
|
||||
ftl
|
||||
}
|
||||
Some(ftl) => ftl,
|
||||
})
|
||||
}
|
||||
|
||||
/// One time initialization of the global NRD feature flag.
|
||||
/// Will panic if we attempt to re-initialize this (via OneTime).
|
||||
pub fn init_global_nrd_enabled(enabled: bool) {
|
||||
GLOBAL_NRD_FEATURE_ENABLED.init(enabled)
|
||||
}
|
||||
|
||||
/// Explicitly enable the NRD global feature flag.
|
||||
/// Explicitly enable the local NRD feature flag.
|
||||
pub fn set_local_nrd_enabled(enabled: bool) {
|
||||
NRD_FEATURE_ENABLED.with(|flag| flag.set(Some(enabled)))
|
||||
}
|
||||
|
@ -371,7 +410,7 @@ where
|
|||
T: IntoIterator<Item = HeaderInfo>,
|
||||
{
|
||||
// Convert iterator to vector, so we can append to it if necessary
|
||||
let needed_block_count = DIFFICULTY_ADJUST_WINDOW as usize + 1;
|
||||
let needed_block_count = DMA_WINDOW as usize + 1;
|
||||
let mut last_n: Vec<HeaderInfo> = cursor.into_iter().take(needed_block_count).collect();
|
||||
|
||||
// Only needed just after blockchain launch... basically ensures there's
|
||||
|
|
|
@ -140,14 +140,14 @@ mod test {
|
|||
println!("proof {}", global::proofsize());
|
||||
pow_size(
|
||||
&mut b.header,
|
||||
Difficulty::min(),
|
||||
Difficulty::min_dma(),
|
||||
global::proofsize(),
|
||||
global::min_edge_bits(),
|
||||
)
|
||||
.unwrap();
|
||||
println!("nonce {}", b.header.pow.nonce);
|
||||
assert_ne!(b.header.pow.nonce, 310);
|
||||
assert!(b.header.pow.to_difficulty(0) >= Difficulty::min());
|
||||
assert!(b.header.pow.to_difficulty(0) >= Difficulty::min_dma());
|
||||
assert!(verify_size(&b.header).is_ok());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -12,7 +12,9 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::consensus::{graph_weight, MIN_DIFFICULTY, SECOND_POW_EDGE_BITS};
|
||||
use crate::consensus::{
|
||||
graph_weight, MIN_DMA_DIFFICULTY, MIN_WTEMA_DIFFICULTY, SECOND_POW_EDGE_BITS,
|
||||
};
|
||||
use crate::core::hash::{DefaultHashable, Hashed};
|
||||
use crate::global;
|
||||
use crate::pow::error::Error;
|
||||
|
@ -56,9 +58,16 @@ impl Difficulty {
|
|||
}
|
||||
|
||||
/// Difficulty of MIN_DIFFICULTY
|
||||
pub fn min() -> Difficulty {
|
||||
pub fn min_dma() -> Difficulty {
|
||||
Difficulty {
|
||||
num: MIN_DIFFICULTY,
|
||||
num: MIN_DMA_DIFFICULTY,
|
||||
}
|
||||
}
|
||||
|
||||
/// Difficulty of MIN_DIFFICULTY
|
||||
pub fn min_wtema() -> Difficulty {
|
||||
Difficulty {
|
||||
num: MIN_WTEMA_DIFFICULTY,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -227,7 +236,7 @@ impl Default for ProofOfWork {
|
|||
fn default() -> ProofOfWork {
|
||||
let proof_size = global::proofsize();
|
||||
ProofOfWork {
|
||||
total_difficulty: Difficulty::min(),
|
||||
total_difficulty: Difficulty::min_dma(),
|
||||
secondary_scaling: 1,
|
||||
nonce: 0,
|
||||
proof: Proof::zero(proof_size),
|
||||
|
|
|
@ -431,7 +431,7 @@ fn set_pow(header: &mut BlockHeader) {
|
|||
header.pow.proof.edge_bits = edge_bits;
|
||||
pow::pow_size(
|
||||
header,
|
||||
pow::Difficulty::min(),
|
||||
pow::Difficulty::min_dma(),
|
||||
global::proofsize(),
|
||||
edge_bits,
|
||||
)
|
||||
|
|
|
@ -122,7 +122,7 @@ where
|
|||
{
|
||||
let fees = txs.iter().map(|tx| tx.fee()).sum();
|
||||
let reward_output = reward::output(keychain, builder, &key_id, fees, false).unwrap();
|
||||
Block::new(&previous_header, txs, Difficulty::min(), reward_output).unwrap()
|
||||
Block::new(&previous_header, txs, Difficulty::min_dma(), reward_output).unwrap()
|
||||
}
|
||||
|
||||
// utility producing a transaction that spends an output with the provided
|
||||
|
|
|
@ -13,113 +13,180 @@
|
|||
|
||||
use chrono::Utc;
|
||||
use grin_core::consensus::{
|
||||
next_difficulty, HeaderInfo, AR_SCALE_DAMP_FACTOR, BLOCK_TIME_SEC, DIFFICULTY_ADJUST_WINDOW,
|
||||
MIN_DIFFICULTY,
|
||||
next_dma_difficulty, next_wtema_difficulty, HeaderInfo, AR_SCALE_DAMP_FACTOR, BLOCK_TIME_SEC,
|
||||
DMA_WINDOW, MIN_AR_SCALE, MIN_DMA_DIFFICULTY, MIN_WTEMA_DIFFICULTY, YEAR_HEIGHT,
|
||||
};
|
||||
use grin_core::global;
|
||||
use grin_core::pow::Difficulty;
|
||||
|
||||
/// Checks different next_target adjustments and difficulty boundaries
|
||||
/// Checks different next_dma_difficulty adjustments and difficulty boundaries
|
||||
#[test]
|
||||
fn next_target_adjustment() {
|
||||
fn next_dma_difficulty_adjustment() {
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
let cur_time = Utc::now().timestamp() as u64;
|
||||
let diff_min = Difficulty::min();
|
||||
let diff_min = Difficulty::min_dma();
|
||||
|
||||
// Check we don't get stuck on difficulty <= MIN_DIFFICULTY (at 4x faster blocks at least)
|
||||
// Check we don't get stuck on difficulty <= MIN_DMA_DIFFICULTY (at 4x faster blocks at least)
|
||||
let mut hi = HeaderInfo::from_diff_scaling(diff_min, AR_SCALE_DAMP_FACTOR as u32);
|
||||
hi.is_secondary = false;
|
||||
let hinext = next_difficulty(
|
||||
1,
|
||||
repeat(
|
||||
BLOCK_TIME_SEC / 4,
|
||||
hi.clone(),
|
||||
DIFFICULTY_ADJUST_WINDOW,
|
||||
None,
|
||||
),
|
||||
);
|
||||
let hinext = next_dma_difficulty(1, repeat(BLOCK_TIME_SEC / 4, hi.clone(), DMA_WINDOW, None));
|
||||
|
||||
assert_ne!(hinext.difficulty, diff_min);
|
||||
|
||||
// Check we don't get stuck on scale MIN_DIFFICULTY, when primary frequency is too high
|
||||
assert_ne!(hinext.secondary_scaling, MIN_DIFFICULTY as u32);
|
||||
// Check we don't get stuck on scale MIN_DMA_DIFFICULTY, when primary frequency is too high
|
||||
assert_ne!(hinext.secondary_scaling, MIN_AR_SCALE as u32);
|
||||
|
||||
// just enough data, right interval, should stay constant
|
||||
let just_enough = DIFFICULTY_ADJUST_WINDOW + 1;
|
||||
let just_enough = DMA_WINDOW + 1;
|
||||
hi.difficulty = Difficulty::from_num(10000);
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(BLOCK_TIME_SEC, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(BLOCK_TIME_SEC, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(10000)
|
||||
);
|
||||
|
||||
// check pre difficulty_data_to_vector effect on retargetting
|
||||
assert_eq!(
|
||||
next_difficulty(1, vec![HeaderInfo::from_ts_diff(42, hi.difficulty)]).difficulty,
|
||||
next_dma_difficulty(1, vec![HeaderInfo::from_ts_diff(42, hi.difficulty)]).difficulty,
|
||||
Difficulty::from_num(14913)
|
||||
);
|
||||
|
||||
// checking averaging works
|
||||
hi.difficulty = Difficulty::from_num(500);
|
||||
let sec = DIFFICULTY_ADJUST_WINDOW / 2;
|
||||
let sec = DMA_WINDOW / 2;
|
||||
let mut s1 = repeat(BLOCK_TIME_SEC, hi.clone(), sec, Some(cur_time));
|
||||
let mut s2 = repeat_offs(
|
||||
cur_time + (sec * BLOCK_TIME_SEC) as u64,
|
||||
BLOCK_TIME_SEC,
|
||||
1500,
|
||||
DIFFICULTY_ADJUST_WINDOW / 2,
|
||||
sec,
|
||||
cur_time + (sec * BLOCK_TIME_SEC) as u64,
|
||||
);
|
||||
s2.append(&mut s1);
|
||||
assert_eq!(
|
||||
next_difficulty(1, s2).difficulty,
|
||||
next_dma_difficulty(1, s2).difficulty,
|
||||
Difficulty::from_num(1000)
|
||||
);
|
||||
|
||||
// too slow, diff goes down
|
||||
hi.difficulty = Difficulty::from_num(1000);
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(90, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(90, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(857)
|
||||
);
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(120, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(120, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(750)
|
||||
);
|
||||
|
||||
// too fast, diff goes up
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(55, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(55, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(1028)
|
||||
);
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(45, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(45, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(1090)
|
||||
);
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(30, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(30, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(1200)
|
||||
);
|
||||
|
||||
// hitting lower time bound, should always get the same result below
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(0, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(0, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(1500)
|
||||
);
|
||||
|
||||
// hitting higher time bound, should always get the same result above
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(300, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(300, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(500)
|
||||
);
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(400, hi.clone(), just_enough, None)).difficulty,
|
||||
next_dma_difficulty(1, repeat(400, hi.clone(), just_enough, None)).difficulty,
|
||||
Difficulty::from_num(500)
|
||||
);
|
||||
|
||||
// We should never drop below minimum
|
||||
hi.difficulty = Difficulty::zero();
|
||||
assert_eq!(
|
||||
next_difficulty(1, repeat(90, hi, just_enough, None)).difficulty,
|
||||
Difficulty::min()
|
||||
next_dma_difficulty(1, repeat(90, hi, just_enough, None)).difficulty,
|
||||
Difficulty::min_dma()
|
||||
);
|
||||
}
|
||||
|
||||
/// Checks different next_wtema_difficulty adjustments and difficulty boundaries
|
||||
#[test]
|
||||
fn next_wtema_difficulty_adjustment() {
|
||||
global::set_local_chain_type(global::ChainTypes::AutomatedTesting);
|
||||
let cur_time = Utc::now().timestamp() as u64;
|
||||
let hf4 = 2 * YEAR_HEIGHT; // height of HardFork4, switching to wtema DAA
|
||||
let diff_min = Difficulty::min_wtema();
|
||||
|
||||
// Check we don't get stuck on difficulty <= MIN_WTEMA_DIFFICULTY (at 4x faster blocks at least)
|
||||
let mut hi = HeaderInfo::from_diff_scaling(diff_min, 0);
|
||||
hi.is_secondary = false;
|
||||
let hinext = next_wtema_difficulty(hf4, repeat(BLOCK_TIME_SEC - 1, hi.clone(), 2, None));
|
||||
|
||||
assert_ne!(hinext.difficulty, diff_min);
|
||||
|
||||
// 2 headers of data (last&prev), right interval, should stay constant
|
||||
let last2 = 2;
|
||||
hi.difficulty = Difficulty::from_num(20000);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(BLOCK_TIME_SEC, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(20000)
|
||||
);
|
||||
|
||||
// too slow, diff goes down
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(61, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(19998)
|
||||
);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(90, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(19958)
|
||||
);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(120, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(19917)
|
||||
);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(300, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(19672)
|
||||
);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(400, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(19538)
|
||||
);
|
||||
|
||||
// too fast, diff goes up
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(59, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(20001)
|
||||
);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(55, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(20006)
|
||||
);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(45, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(20020)
|
||||
);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(30, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(20041)
|
||||
);
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(0, hi.clone(), last2, None)).difficulty,
|
||||
Difficulty::from_num(20083)
|
||||
);
|
||||
|
||||
// We should never drop below minimum
|
||||
hi.difficulty = Difficulty::zero();
|
||||
assert_eq!(
|
||||
next_wtema_difficulty(hf4, repeat(90, hi, last2, None)).difficulty,
|
||||
Difficulty::min_wtema()
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -148,7 +215,7 @@ fn repeat(interval: u64, diff: HeaderInfo, len: u64, cur_time: Option<u64>) -> V
|
|||
.collect::<Vec<_>>()
|
||||
}
|
||||
|
||||
fn repeat_offs(from: u64, interval: u64, diff: u64, len: u64) -> Vec<HeaderInfo> {
|
||||
fn repeat_offs(interval: u64, diff: u64, len: u64, from: u64) -> Vec<HeaderInfo> {
|
||||
repeat(
|
||||
interval,
|
||||
HeaderInfo::from_ts_diff(1, Difficulty::from_num(diff)),
|
||||
|
|
|
@ -109,7 +109,7 @@ fn get_diff_stats(chain_sim: &[HeaderInfo]) -> DiffStats {
|
|||
|
||||
let sum_blocks: Vec<HeaderInfo> = global::difficulty_data_to_vector(diff_iter.iter().cloned())
|
||||
.into_iter()
|
||||
.take(DIFFICULTY_ADJUST_WINDOW as usize)
|
||||
.take(DMA_WINDOW as usize)
|
||||
.collect();
|
||||
|
||||
let sum_entries: Vec<DiffBlock> = sum_blocks
|
||||
|
@ -155,9 +155,9 @@ fn get_diff_stats(chain_sim: &[HeaderInfo]) -> DiffStats {
|
|||
DiffStats {
|
||||
height: tip_height as u64,
|
||||
last_blocks: diff_entries,
|
||||
average_block_time: block_time_sum / (DIFFICULTY_ADJUST_WINDOW),
|
||||
average_difficulty: block_diff_sum / (DIFFICULTY_ADJUST_WINDOW),
|
||||
window_size: DIFFICULTY_ADJUST_WINDOW,
|
||||
average_block_time: block_time_sum / DMA_WINDOW,
|
||||
average_difficulty: block_diff_sum / DMA_WINDOW,
|
||||
window_size: DMA_WINDOW,
|
||||
block_time_sum: block_time_sum,
|
||||
block_diff_sum: block_diff_sum,
|
||||
latest_ts: latest_ts,
|
||||
|
@ -208,10 +208,10 @@ fn print_chain_sim(chain_sim: Vec<(HeaderInfo, DiffStats)>) {
|
|||
let mut last_time = 0;
|
||||
let mut first = true;
|
||||
println!("Constants");
|
||||
println!("DIFFICULTY_ADJUST_WINDOW: {}", DIFFICULTY_ADJUST_WINDOW);
|
||||
println!("DIFFICULTY_ADJUST_WINDOW: {}", DMA_WINDOW);
|
||||
println!("BLOCK_TIME_WINDOW: {}", BLOCK_TIME_WINDOW);
|
||||
println!("CLAMP_FACTOR: {}", CLAMP_FACTOR);
|
||||
println!("DAMP_FACTOR: {}", DIFFICULTY_DAMP_FACTOR);
|
||||
println!("DAMP_FACTOR: {}", DMA_DAMP_FACTOR);
|
||||
chain_sim.iter().enumerate().for_each(|(i, b)| {
|
||||
let block = b.0.clone();
|
||||
let stats = b.1.clone();
|
||||
|
@ -274,7 +274,7 @@ fn adjustment_scenarios() {
|
|||
println!("*********************************************************");
|
||||
print_chain_sim(chain_sim);
|
||||
println!("*********************************************************");
|
||||
let just_enough = (DIFFICULTY_ADJUST_WINDOW) as usize;
|
||||
let just_enough = DMA_WINDOW as usize;
|
||||
|
||||
// Steady difficulty for a good while, then a sudden drop
|
||||
let chain_sim = create_chain_sim(global::initial_block_difficulty());
|
||||
|
@ -360,7 +360,7 @@ fn test_secondary_pow_ratio() {
|
|||
fn test_secondary_pow_scale() {
|
||||
global::set_local_chain_type(global::ChainTypes::Mainnet);
|
||||
|
||||
let window = DIFFICULTY_ADJUST_WINDOW;
|
||||
let window = DMA_WINDOW;
|
||||
let mut hi = HeaderInfo::from_diff_scaling(Difficulty::from_num(10), 100);
|
||||
|
||||
// all primary, factor should increase so it becomes easier to find a high
|
||||
|
|
185
doc/pow/pow.md
185
doc/pow/pow.md
|
@ -1,6 +1,6 @@
|
|||
# Grin's Proof-of-Work
|
||||
# Cuckoo Cycle
|
||||
|
||||
*Read this document in other languages: [Korean](pow_KR.md).*
|
||||
> *Read this document in other languages: [Korean](https://github.com/mimblewimble/grin/blob/master/doc/pow/pow_KR.md).*
|
||||
|
||||
This document is meant to outline, at a level suitable for someone without prior knowledge,
|
||||
the algorithms and processes currently involved in Grin's Proof-of-Work system. We'll start
|
||||
|
@ -8,36 +8,27 @@ with a general overview of cycles in a graph and the Cuckoo Cycle algorithm whic
|
|||
basis of Grin's proof-of-work. We'll then move on to Grin-specific details, which will outline
|
||||
the other systems that combine with Cuckoo Cycle to form the entirety of mining in Grin.
|
||||
|
||||
Please note that Grin is currently under active development, and any and all of this is subject to
|
||||
(and will) change before a general release.
|
||||
|
||||
## Graphs and Cuckoo Cycle
|
||||
|
||||
Grin's basic Proof-of-Work algorithm is called Cuckoo Cycle, which is specifically designed
|
||||
to be resistant to Bitcoin style hardware arms-races. It is primarily a memory bound algorithm,
|
||||
which, (at least in theory,) means that solution time is bound by memory bandwidth
|
||||
rather than raw processor or GPU speed. As such, mining Cuckoo Cycle solutions should be viable on
|
||||
most commodity hardware, and require far less energy than most other GPU, CPU or ASIC-bound
|
||||
proof of work algorithms.
|
||||
to differ from Bitcoin's Hashcash that's purely computational. It is primarily a memory bound algorithm,
|
||||
which (at least in theory) means that solving performance is bound by memory bandwidth
|
||||
rather than raw computational speed. As such, eventual ASICs should run much cooler than hashing chips,
|
||||
and optimization will remain more digital than analog in nature (as they are for Bitcoin).
|
||||
|
||||
The Cuckoo Cycle POW is the work of John Tromp, and the most up-to-date documentation and implementations
|
||||
can be found in [his github repository](https://github.com/tromp/cuckoo). The
|
||||
[white paper](https://github.com/tromp/cuckoo/blob/master/doc/cuckoo.pdf) is the best source of
|
||||
further technical details.
|
||||
|
||||
There is also a [podcast with Mike from Monero Monitor](https://moneromonitor.libsyn.com/e14-john-tromp-on-cuckoo-cycle-pow-john-mcafee-on-monero)
|
||||
in which John Tromp talks at length about Cuckoo Cycle; recommended listening for anyone wanting
|
||||
more background on Cuckoo Cycle, including more technical detail, the history of the algorithm's development
|
||||
and some of the motivations behind it.
|
||||
|
||||
### Cycles in a Graph
|
||||
|
||||
Cuckoo Cycle is an algorithm meant to detect cycles in a bipartite graph of N nodes
|
||||
and M edges. In plain terms, a bipartite graph is one in which edges (i.e. lines connecting nodes)
|
||||
travel only between 2 separate groups of nodes. In the case of the Cuckoo hashtable in Cuckoo Cycle,
|
||||
one side of the graph is an array numbered with odd indices (up to the size of the graph), and the other is numbered with even
|
||||
indices. A node is simply a numbered 'space' on either side of the Cuckoo Table, and an Edge is a
|
||||
line connecting two nodes on opposite sides. The simple graph below denotes just such a graph,
|
||||
Cuckoo Cycle is an algorithm meant to detect cycles in a bipartite graph of M edges on N+N nodes
|
||||
In plain terms, a bipartite graph is one in which edges (i.e. lines connecting nodes)
|
||||
travel only between 2 separate groups of nodes.
|
||||
We can number the nodes so that one group consist of even numbered nodes, while the other group consists of odd numbered nodes.
|
||||
Each edge will then connect an even node with an odd node.
|
||||
The simple graph below denotes just such a graph,
|
||||
with 4 nodes on the 'even' side (top), 4 nodes on the odd side (bottom) and zero Edges
|
||||
(i.e. no lines connecting any nodes.)
|
||||
|
||||
|
@ -51,16 +42,15 @@ Let's throw a few Edges into the graph now, randomly:
|
|||
|
||||
*8 Nodes with 4 Edges, no solution*
|
||||
|
||||
We now have a randomly-generated graph with 8 nodes (N) and 4 edges (M), or an NxM graph where
|
||||
N=8 and M=4. Our basic Proof-of-Work is now concerned with finding 'cycles' of a certain length
|
||||
within this random graph, or, put simply, a series of connected nodes starting and ending on the
|
||||
same node. So, if we were looking for a cycle of length 4 (a path connecting 4 nodes, starting
|
||||
and ending on the same node), one cannot be detected in this graph.
|
||||
We now have a randomly-generated graph with N+N=4+4 nodes and M=4 edges.
|
||||
Our basic Proof-of-Work is now concerned with finding 'cycles' of a certain length
|
||||
within this graph, or, put simply, a path of connected nodes starting and ending at the
|
||||
same node. So, if we were looking for a cycle of length 4, one cannot be detected in this graph.
|
||||
|
||||
Adjusting the number of Edges M relative to the number of Nodes N changes the difficulty of the
|
||||
cycle-finding problem, and the probability that a cycle exists in the current graph. For instance,
|
||||
if our POW problem were concerned with finding a cycle of length 4 in the graph, the current difficulty of 4/8 (M/N)
|
||||
would mean that all 4 edges would need to be randomly generated in a perfect cycle (from 0-5-4-1-0)
|
||||
Adjusting the number of Edges M relative to the number of Nodes N changes
|
||||
the probability that a cycle exists in the graph. For instance,
|
||||
if our POW problem were concerned with finding a cycle of length 4 in the graph, having M=4
|
||||
means that all 4 edges would need to be randomly generated in a perfect cycle (from 0-5-4-1-0)
|
||||
in order for there to be a solution.
|
||||
|
||||
Let's add a few more edges, again at random:
|
||||
|
@ -93,31 +83,28 @@ does this graph have a cycle of length 8, i.e. 8 connected nodes starting and en
|
|||
The answer is left as an exercise to the reader, but the overall takeaways are:
|
||||
|
||||
* Detecting cycles in a graph becomes more difficult exercise as the size of a graph grows.
|
||||
* The probability of a cycle of a given length in a graph increases as M/N becomes larger,
|
||||
i.e. you add more edges relative to the number of nodes in a graph.
|
||||
* The probability of a cycle of a given length in a graph increases as the average degree of M/N becomes larger.
|
||||
|
||||
### Cuckoo Cycle
|
||||
|
||||
The Cuckoo Cycle algorithm is a specialized algorithm designed to solve exactly this problem, and it does
|
||||
so by inserting values into a structure called a 'Cuckoo Hashtable' according to a hash which maps nodes
|
||||
into possible locations in two separate arrays. This document won't go into detail on the base algorithm, as
|
||||
it's outlined plainly enough in section 5 of the
|
||||
The basic Cuckoo Cycle algorithm is a specialized algorithm designed to solve exactly this problem for the case M=N.
|
||||
This document won't go into detail on the base algorithm, as it's outlined plainly enough in section 5 of the
|
||||
[white paper](https://github.com/tromp/cuckoo/blob/master/doc/cuckoo.pdf). There are also several
|
||||
variants on the algorithm that make various speed/memory tradeoffs, again beyond the scope of this document.
|
||||
However, there are a few details following from the above that we need to keep in mind before going on to more
|
||||
technical aspects of Grin's proof-of-work.
|
||||
|
||||
* The 'random' edges in the graph demonstrated above are not actually random but are generated by
|
||||
putting edge indices (0..N) through a seeded hash function, SIPHASH. Each edge index is put through the
|
||||
putting edge indices (0..N-1) through a keyed hash function, SIPHASH. Each edge index is put through the
|
||||
SIPHASH function twice to create two edge endpoints, with the first input value being 2 * edge_index,
|
||||
and the second 2 * edge_index+1. The seed for this function is based on a hash of a block header,
|
||||
and the second 2 * edge_index+1. The key for this function is based on a hash of a block header,
|
||||
outlined further below.
|
||||
* The 'Proof' created by this algorithm is a set of nonces that generate a cycle of length 42,
|
||||
* The 'Proof' created by this algorithm is a set of edge indices that generate a cycle of length 42 (sorted by edge index)
|
||||
which can be trivially validated by other peers.
|
||||
* Two main parameters, as explained above, are passed into the Cuckoo Cycle algorithm that affect the probability of a solution, and the
|
||||
time it takes to search the graph for a solution:
|
||||
* The M/N ratio outlined above, which controls the number of edges relative to the size of the graph.
|
||||
Cuckoo Cycle fixes M at N/2, which limits the number of cycles to a few at most.
|
||||
* The M/N ratio outlined above, which controls the average node degree.
|
||||
Cuckoo Cycle fixes M=N, which limits the number of cycles to a small number.
|
||||
* The size of the graph itself
|
||||
|
||||
How these parameters interact in practice is looked at in more [detail below](#mining-loop-difficulty-control-and-timing).
|
||||
|
@ -126,7 +113,19 @@ Now, (hopefully) armed with a basic understanding of what the Cuckoo Cycle algor
|
|||
|
||||
## Mining in Grin
|
||||
|
||||
The Cuckoo Cycle outlined above forms the basis of Grin's mining process, however Grin uses Cuckoo Cycle in tandem with several other systems to create a Proof-of-Work.
|
||||
The Cuckoo Cycle outlined above forms the basis of Grin's mining process, however Grin uses two variantion of Cuckoo Cycle in tandem with several other systems to create a Proof-of-Work.
|
||||
|
||||
1. for GPUs: Cuckaroo on 2^29 edges
|
||||
* Tweaked every 6 months to maitain ASIC resistance.
|
||||
* 90% of rewards at launch, linearly decreasing to 0% in 2 years.
|
||||
* Variant of Cuckoo that enforces so-called ``mean'' mining.
|
||||
* Takes 5.5GB of memory (perhaps 4GB with slowdown).
|
||||
2. for ASICs: [Cuckatoo](https://youtu.be/h4AJDKoeO9E?t=2071) on 2^31 or more edges
|
||||
* Variant of Cuckoo that simplifies ASIC design.
|
||||
* 10% of rewards at launch, linearly increasing to 100% in 2 years.
|
||||
* Mean mineable in high memory GPUs.
|
||||
* Takes 512 MB of SRAM memory for so-called ``lean'' mining.
|
||||
* Smoothly transitions to 2^32 or more by July 2020, called Cuckatoo32+ (explained why [here](https://forum.grin.mw/t/grin-improvement-proposal-1-put-later-phase-outs-on-hold-and-rephrase-primary-pow-commitment/4653))
|
||||
|
||||
### Additional Difficulty Control
|
||||
|
||||
|
@ -134,24 +133,24 @@ In order to provide additional difficulty control in a manner that meets the nee
|
|||
availability, a further Hashcash-based difficulty check is applied to potential solution sets as follows:
|
||||
|
||||
If the Blake2b hash
|
||||
of a potential set of solution nonces (currently an array of 42 u32s representing the cycle nonces,)
|
||||
of a solution (a sorted array of 42 packed edge indices representing the edges in a 42-cycle)
|
||||
is less than an evolving difficulty target T, then the solution is considered valid. More precisely,
|
||||
the proof difficulty is calculated as the maximum target hash (2^256) divided by the current hash,
|
||||
rounded to give an integer. If this integer is larger than the evolving network difficulty, the POW
|
||||
is considered valid and the block is submit to the chain for validation.
|
||||
the solution difficulty is calculated as the maximum target hash (2^256) divided by the solution hash,
|
||||
rounded to a 64-bit integer. If this integer is at least the evolving network difficulty, the POW
|
||||
is considered valid and the block is submitted to the chain for validation.
|
||||
|
||||
In other words, a potential proof, as well as containing a valid Cuckoo Cycle, also needs to hash to a value higher than the target difficulty. This difficulty is derived from:
|
||||
In other words, a potential proof, as well as containing a valid Cuckoo Cycle, also needs to exceed the network difficulty.
|
||||
This difficulty is derived from:
|
||||
|
||||
### Evolving Network Difficulty
|
||||
|
||||
The difficulty target is intended to evolve according to the available network hashpower, with the goal of
|
||||
keeping the average block solution time within range of a target (currently 60 seconds, though this is subject
|
||||
to change).
|
||||
keeping the average block solution time within range of a target of exactly 60 seconds.
|
||||
|
||||
The difficulty calculation is based on both Digishield and GravityWave family of difficulty computation,
|
||||
coming to something very close to ZCash. The reference difficulty is an average of the difficulty over a window of
|
||||
23 blocks (the current consensus value). The corresponding timespan is calculated by using the difference between
|
||||
the median timestamps at the beginning and the end of the window. If the timespan is higher or lower than a certain
|
||||
The difficulty calculation is based on a simple moving average computation, somewhat similar to ZCash.
|
||||
The reference difficulty is an average of the difficulty over a window of
|
||||
60 blocks (nominally one hour). The corresponding timespan is calculated by using the difference between
|
||||
the timestamps at the beginning and the end of the window. If the timespan is higher or lower than a certain
|
||||
range, (adjusted with a dampening factor to allow for normal variation,) then the difficulty is raised or lowered
|
||||
to a value aiming for the target block solve time.
|
||||
|
||||
|
@ -160,61 +159,27 @@ to a value aiming for the target block solve time.
|
|||
All of these systems are put together in the mining loop, which attempts to create
|
||||
valid Proofs-of-Work to create the latest block in the chain. The following is an outline of what the main mining loop does during a single iteration:
|
||||
|
||||
* Get the latest chain state and build a block on top of it, which includes
|
||||
* A Block Header with new values particular to this mining attempt, which are:
|
||||
1. Get the latest chain state and build a block on top of it, which includes a Block Header with new values particular to this mining attempt:
|
||||
* The latest target difficulty as selected by the [evolving network difficulty](#evolving-network-difficulty) algorithm
|
||||
* A set of transactions available for validation selected from the transaction pool
|
||||
* A coinbase transaction (which we're hoping to give to ourselves)
|
||||
* The current timestamp
|
||||
* Roots of Merkle Mountain Ranges for headers, outputs, and kernels
|
||||
* A randomly generated nonce to add further randomness to the header's hash
|
||||
* The merkle root of the UTXO set and fees (not yet implemented)
|
||||
* Then, a sub-loop runs for a set amount of time, currently configured at 2 seconds, where the following happens:
|
||||
* The new block header is hashed to create a hash value
|
||||
* The cuckoo graph generator is initialized, which accepts as parameters:
|
||||
* The hash of the potential block header, which is to be used as the key to a SIPHASH function
|
||||
that will generate pairs of locations for each element in a set of nonces 0..N in the graph.
|
||||
* The size of the graph (a consensus value).
|
||||
* An easiness value, (a consensus value) representing the M/N ratio described above denoting the probability
|
||||
of a solution appearing in the graph
|
||||
* The Cuckoo Cycle detection algorithm tries to find a solution (i.e. a cycle of length 42) within the generated
|
||||
graph.
|
||||
* If a cycle is found, a Blake2b hash of the proof is created and is compared to the current target
|
||||
difficulty, as outlined in [Additional Difficulty Control](#additional-difficulty-control) above.
|
||||
* If the Blake2b Hash difficulty is greater than or equal to the target difficulty, the block is sent to the
|
||||
transaction pool, propagated amongst peers for validation, and work begins on the next block.
|
||||
* If the Blake2b Hash difficulty is less than the target difficulty, the proof is thrown out and the timed loop continues.
|
||||
* If no solution is found, increment the nonce in the header by 1, and update the header's timestamp so the next iteration
|
||||
hashes a different value for seeding the next loop's graph generation step.
|
||||
* If the loop times out with no solution found, start over again from the top, collecting new transactions and creating
|
||||
a new block altogether.
|
||||
2. Then, a sub-loop runs for a set amount of time, where the following happens:
|
||||
1. The new block header is hashed to create a hash value
|
||||
2. The cuckoo graph generator is initialized, which accepts as parameters:
|
||||
|
||||
### Mining Loop Difficulty Control and Timing
|
||||
* The hash of the potential block header, which is to be used as
|
||||
* the key to a SIPHASH function that will generate pairs of locations for each element in a set of nonces 0..N in the graph.
|
||||
* The size of the graph (chosen by the miner).
|
||||
|
||||
Controlling the overall difficulty of the mining loop requires finding a balance between the three values outlined above:
|
||||
|
||||
* Graph size (currently represented as a bit-shift value n representing a size of 2^n nodes, consensus value
|
||||
DEFAULT_SIZESHIFT). Smaller graphs can be exhaustively searched more quickly, but will also have fewer
|
||||
solutions for a given easiness value. A very small graph needs a higher easiness value to have the same
|
||||
chance to have a solution as a larger graph with a lower easiness value.
|
||||
* The 'Easiness' consensus value, or the M/N ratio of the graph expressed as a percentage. The higher this value, the more likely
|
||||
it is a generated graph will contain a solution. In tandem with the above, the larger the graph, the more solutions
|
||||
it will contain for a given easiness value. The Cuckoo Cycle implementations fix this M to N/2, giving
|
||||
a ratio of 50%
|
||||
* The evolving network difficulty hash.
|
||||
|
||||
These values need to be carefully tweaked in order for the mining algorithm to find the right balance between the
|
||||
cuckoo graph size and the evolving difficulty. The POW needs to remain mostly Cuckoo Cycle based, but still allow for
|
||||
reasonably short block times that allow new transactions to be quickly processed.
|
||||
|
||||
If the graph size is too low and the easiness too high, for instance, then many cuckoo cycle solutions can easily be
|
||||
found for a given block, and the POW will start to favour those who can hash faster, precisely what Cuckoo Cycle is
|
||||
trying to avoid. If the graph is too large and easiness too low, however, then it can potentially take any solver a
|
||||
long time to find a solution in a single graph, well outside a window in which you'd like to stop to collect new
|
||||
transactions.
|
||||
|
||||
These values are currently set to 2^12 for the graph size and 50% (as fixed by Cuckoo Cycle) for the easiness value,
|
||||
however the size is only a temporary values for testing. The current miner implementation is very unoptimized,
|
||||
and the graph size will need to be changed as faster and more optimized Cuckoo Cycle algorithms are put in place.
|
||||
3. The Cuckoo Cycle detection algorithm tries to find a solution (i.e. a cycle of length 42) within the generated graph.
|
||||
4. If a cycle is found, a Blake2b hash of the proof is created, a solution difficulty computed, and compared to the current target difficulty, as outlined in [Additional Difficulty Control](#additional-difficulty-control) above.
|
||||
5. If the solution difficulty is greater than or equal to the target difficulty, the block is sent to the transaction pool, propagated amongst peers for validation, and work begins on the next block.
|
||||
6. If the solution difficulty is less than the target difficulty, the proof is thrown out and the timed loop continues.
|
||||
7. If no solution is found, increment the nonce in the header by 1, and update the header's timestamp so the next iteration hashes a different value for seeding the next loop's graph generation step.
|
||||
8. If the loop times out with no solution found, start over again from the top, collecting new transactions and creating a new block altogether.
|
||||
|
||||
### Pooling Capability
|
||||
|
||||
|
@ -225,14 +190,16 @@ enables 'poolability' as well as a level of fairness among all miners.
|
|||
|
||||
#### Progress Freeness
|
||||
|
||||
Progress-freeness is central to the 'poolability' of a proof-of-work, and is simply based on the idea that a solution
|
||||
to a POW problem can be found within a reasonable amount of time. For instance, if a blockchain
|
||||
has a one minute POW time and miners have to spend one minute on average to find a solution, this still satisfies the POW
|
||||
requirement but gives a strong advantage to big miners. In such a setup, small miners will generally lose at least one minute
|
||||
every time while larger miners can move on as soon as they find a solution. So in order to keep mining relatively progress-free,
|
||||
a POW that requires multiple solution attempts with each attempt taking a relatively small amount of time is desirable.
|
||||
Progress-freeness is central to the 'poolability' of a proof-of-work, and is simply based on the idea that a single solution attempt
|
||||
takes a relatively small amount of time. For instance, if a blockchain
|
||||
has a one minute block time and a slow miner needs 10 seconds per solution
|
||||
attempt, then it will have wasted on average 5 seconds per minutewhen its
|
||||
current attempt is aborted due to arrival of a new block. Faster miners will
|
||||
have less waste, giving an extra advantage on top of their higher attempt
|
||||
throughput. So in order to keep mining relatively progress-free,
|
||||
a POW that requires many solution attempts with each attempt taking a relatively small amount of time is desirable.
|
||||
|
||||
Following from this, Grin's progress-freeness is due to the fact that a solution to a Cuckoo with Grin's default parameters
|
||||
can typically be found in under a second on most GPUs, and there is the additional requirement of the Blake2b difficulty check
|
||||
on top of that. Members of a pool are thus able to prove they're working on a solution to a block by submitting valid Cuckoo solutions
|
||||
can typically be found in under a second on most GPUs.
|
||||
Members of a pool are thus able to prove they're working on a solution to a block by submitting valid Cuckoo solutions
|
||||
(or a small bundle of them) that simply fall under the current network target difficulty.
|
||||
|
|
|
@ -284,7 +284,7 @@ pub struct DummyAdapter {}
|
|||
|
||||
impl ChainAdapter for DummyAdapter {
|
||||
fn total_difficulty(&self) -> Result<Difficulty, chain::Error> {
|
||||
Ok(Difficulty::min())
|
||||
Ok(Difficulty::min_dma())
|
||||
}
|
||||
fn total_height(&self) -> Result<u64, chain::Error> {
|
||||
Ok(0)
|
||||
|
|
|
@ -81,7 +81,7 @@ fn peer_handshake() {
|
|||
let peer = Peer::connect(
|
||||
socket,
|
||||
p2p::Capabilities::UNKNOWN,
|
||||
Difficulty::min(),
|
||||
Difficulty::min_dma(),
|
||||
my_addr,
|
||||
&p2p::handshake::Handshake::new(Hash::from_vec(&vec![]), p2p_config.clone()),
|
||||
net_adapter,
|
||||
|
@ -92,10 +92,10 @@ fn peer_handshake() {
|
|||
|
||||
thread::sleep(time::Duration::from_secs(1));
|
||||
|
||||
peer.send_ping(Difficulty::min(), 0).unwrap();
|
||||
peer.send_ping(Difficulty::min_dma(), 0).unwrap();
|
||||
thread::sleep(time::Duration::from_secs(1));
|
||||
|
||||
let server_peer = server.peers.get_connected_peer(my_addr).unwrap();
|
||||
assert_eq!(server_peer.info.total_difficulty(), Difficulty::min());
|
||||
assert_eq!(server_peer.info.total_difficulty(), Difficulty::min_dma());
|
||||
assert!(server.peers.iter().connected().count() > 0);
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ use rand::prelude::*;
|
|||
|
||||
use crate::api;
|
||||
use crate::chain;
|
||||
use crate::core::global::ChainTypes;
|
||||
use crate::core::global::{ChainTypes, DEFAULT_FUTURE_TIME_LIMIT};
|
||||
use crate::core::{core, libtx, pow};
|
||||
use crate::keychain;
|
||||
use crate::p2p;
|
||||
|
@ -160,6 +160,10 @@ pub struct ServerConfig {
|
|||
#[serde(default)]
|
||||
pub chain_type: ChainTypes,
|
||||
|
||||
/// Future Time Limit
|
||||
#[serde(default = "default_future_time_limit")]
|
||||
pub future_time_limit: u64,
|
||||
|
||||
/// Automatically run full chain validation during normal block processing?
|
||||
#[serde(default)]
|
||||
pub chain_validation_mode: ChainValidationMode,
|
||||
|
@ -201,6 +205,10 @@ pub struct ServerConfig {
|
|||
pub webhook_config: WebHooksConfig,
|
||||
}
|
||||
|
||||
fn default_future_time_limit() -> u64 {
|
||||
DEFAULT_FUTURE_TIME_LIMIT
|
||||
}
|
||||
|
||||
impl Default for ServerConfig {
|
||||
fn default() -> ServerConfig {
|
||||
ServerConfig {
|
||||
|
@ -214,6 +222,7 @@ impl Default for ServerConfig {
|
|||
dandelion_config: pool::DandelionConfig::default(),
|
||||
stratum_mining_config: Some(StratumServerConfig::default()),
|
||||
chain_type: ChainTypes::default(),
|
||||
future_time_limit: default_future_time_limit(),
|
||||
archive_mode: Some(false),
|
||||
chain_validation_mode: ChainValidationMode::default(),
|
||||
pool_config: pool::PoolConfig::default(),
|
||||
|
|
|
@ -487,9 +487,9 @@ impl Server {
|
|||
DiffStats {
|
||||
height: height as u64,
|
||||
last_blocks: diff_entries,
|
||||
average_block_time: block_time_sum / (consensus::DIFFICULTY_ADJUST_WINDOW - 1),
|
||||
average_difficulty: block_diff_sum / (consensus::DIFFICULTY_ADJUST_WINDOW - 1),
|
||||
window_size: consensus::DIFFICULTY_ADJUST_WINDOW,
|
||||
average_block_time: block_time_sum / (consensus::DMA_WINDOW - 1),
|
||||
average_difficulty: block_diff_sum / (consensus::DMA_WINDOW - 1),
|
||||
window_size: consensus::DMA_WINDOW,
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -123,9 +123,9 @@ fn real_main() -> i32 {
|
|||
}
|
||||
}
|
||||
|
||||
let mut config = node_config.clone().unwrap();
|
||||
let mut logging_config = config.members.as_mut().unwrap().logging.clone().unwrap();
|
||||
logging_config.tui_running = config.members.as_mut().unwrap().server.run_tui;
|
||||
let config = node_config.clone().unwrap();
|
||||
let mut logging_config = config.members.as_ref().unwrap().logging.clone().unwrap();
|
||||
logging_config.tui_running = config.members.as_ref().unwrap().server.run_tui;
|
||||
|
||||
let (logs_tx, logs_rx) = if logging_config.tui_running.unwrap() {
|
||||
let (logs_tx, logs_rx) = mpsc::sync_channel::<LogEntry>(200);
|
||||
|
@ -146,9 +146,9 @@ fn real_main() -> i32 {
|
|||
|
||||
log_build_info();
|
||||
|
||||
// Initialize our global chain_type and feature flags (NRD kernel support currently).
|
||||
// Initialize our global chain_type, feature flags (NRD kernel support currently), and future_time_limit.
|
||||
// These are read via global and not read from config beyond this point.
|
||||
global::init_global_chain_type(config.members.unwrap().server.chain_type);
|
||||
global::init_global_chain_type(config.members.as_ref().unwrap().server.chain_type);
|
||||
info!("Chain: {:?}", global::get_chain_type());
|
||||
match global::get_chain_type() {
|
||||
global::ChainTypes::Mainnet => {
|
||||
|
@ -160,6 +160,8 @@ fn real_main() -> i32 {
|
|||
global::init_global_nrd_enabled(true);
|
||||
}
|
||||
}
|
||||
global::init_global_future_time_limit(config.members.unwrap().server.future_time_limit);
|
||||
info!("Future Time Limit: {:?}", global::get_future_time_limit());
|
||||
log_feature_flags();
|
||||
|
||||
// Execute subcommand
|
||||
|
|
Loading…
Reference in a new issue