grin/core/src/consensus.rs

332 lines
12 KiB
Rust
Raw Normal View History

// Copyright 2018 The Grin Developers
2016-11-11 03:02:47 +03:00
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! All the rules required for a cryptocurrency to have reach consensus across
//! the whole network are complex and hard to completely isolate. Some can be
//! simple parameters (like block reward), others complex algorithms (like
//! Merkle sum trees or reorg rules). However, as long as they're simple
2016-11-14 22:21:18 +03:00
//! enough, consensus-relevant constants and short functions should be kept
2016-11-11 03:02:47 +03:00
//! here.
use std::cmp::{max, min};
use std::fmt;
use global;
use pow::Difficulty;
/// A grin is divisible to 10^9, following the SI prefixes
pub const GRIN_BASE: u64 = 1_000_000_000;
/// Milligrin, a thousand of a grin
pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000;
/// Microgrin, a thousand of a milligrin
pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000;
/// Nanogrin, smallest unit, takes a billion to make a grin
pub const NANO_GRIN: u64 = 1;
/// Block interval, in seconds, the network will tune its next_target for. Note
/// that we may reduce this value in the future as we get more data on mining
/// with Cuckoo Cycle, networks improve and block propagation is optimized
/// (adjusting the reward accordingly).
pub const BLOCK_TIME_SEC: u64 = 60;
2018-01-07 07:21:50 +03:00
/// The block subsidy amount, one grin per second on average
pub const REWARD: u64 = BLOCK_TIME_SEC * GRIN_BASE;
2016-11-11 03:02:47 +03:00
/// Actual block reward for a given total fee amount
pub fn reward(fee: u64) -> u64 {
2018-03-13 05:39:22 +03:00
REWARD + fee
}
2018-10-17 21:16:20 +03:00
/// Nominal height for standard time intervals, hour is 60 blocks
pub const HOUR_HEIGHT: u64 = 3600 / BLOCK_TIME_SEC;
2018-10-17 21:16:20 +03:00
/// A day is 1440 blocks
pub const DAY_HEIGHT: u64 = 24 * HOUR_HEIGHT;
2018-10-17 21:16:20 +03:00
/// A week is 10_080 blocks
pub const WEEK_HEIGHT: u64 = 7 * DAY_HEIGHT;
2018-10-17 21:16:20 +03:00
/// A year is 524_160 blocks
pub const YEAR_HEIGHT: u64 = 52 * WEEK_HEIGHT;
2016-11-11 03:02:47 +03:00
/// Number of blocks before a coinbase matures and can be spent
pub const COINBASE_MATURITY: u64 = DAY_HEIGHT;
/// Ratio the secondary proof of work should take over the primary, as a
/// function of block height (time). Starts at 90% losing a percent
/// approximately every week. Represented as an integer between 0 and 100.
pub fn secondary_pow_ratio(height: u64) -> u64 {
90u64.saturating_sub(height / WEEK_HEIGHT)
}
2016-11-11 03:02:47 +03:00
/// Cuckoo-cycle proof size (cycle length)
pub const PROOFSIZE: usize = 42;
/// Default Cuckoo Cycle edge_bits, used for mining and validating.
pub const DEFAULT_MIN_EDGE_BITS: u8 = 30;
/// Secondary proof-of-work edge_bits, meant to be ASIC resistant.
pub const SECOND_POW_EDGE_BITS: u8 = 29;
/// Original reference edge_bits to compute difficulty factors for higher
/// Cuckoo graph sizes, changing this would hard fork
pub const BASE_EDGE_BITS: u8 = 24;
/// Maximum scaling factor for secondary pow, enforced in diff retargetting
/// increasing scaling factor increases frequency of secondary blocks
/// ONLY IN TESTNET4 LIMITED TO ABOUT 8 TIMES THE NATURAL SCALE
pub const MAX_SECONDARY_SCALING: u64 = 8 << 11;
2016-11-11 03:02:47 +03:00
/// Default number of blocks in the past when cross-block cut-through will start
/// happening. Needs to be long enough to not overlap with a long reorg.
/// Rational
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
/// easier to reason about.
pub const CUT_THROUGH_HORIZON: u32 = WEEK_HEIGHT as u32;
/// Weight of an input when counted against the max block weight capacity
2017-10-06 06:34:35 +03:00
pub const BLOCK_INPUT_WEIGHT: usize = 1;
/// Weight of an output when counted against the max block weight capacity
pub const BLOCK_OUTPUT_WEIGHT: usize = 10;
/// Weight of a kernel when counted against the max block weight capacity
pub const BLOCK_KERNEL_WEIGHT: usize = 2;
/// Total maximum block weight. At current sizes, this means a maximum
/// theoretical size of:
2018-08-14 00:06:53 +03:00
/// * `(674 + 33 + 1) * 4_000 = 2_832_000` for a block with only outputs
/// * `(1 + 8 + 8 + 33 + 64) * 20_000 = 2_280_000` for a block with only kernels
/// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs
///
/// Given that a block needs to have at least one kernel for the coinbase,
2018-08-14 00:06:53 +03:00
/// and one kernel for the transaction, practical maximum size is 2_831_440,
/// (ignoring the edge case of a miner producing a block with all coinbase
/// outputs and a single kernel).
///
/// A more "standard" block, filled with transactions of 2 inputs, 2 outputs
/// and one kernel, should be around 2.66 MB
2018-08-14 00:06:53 +03:00
pub const MAX_BLOCK_WEIGHT: usize = 40_000;
2017-10-06 06:34:35 +03:00
/// Fork every 6 months.
pub const HARD_FORK_INTERVAL: u64 = YEAR_HEIGHT / 2;
2017-10-10 03:08:17 +03:00
/// Check whether the block version is valid at a given height, implements
/// 6 months interval scheduled hard forks for the first 2 years.
pub fn valid_header_version(height: u64, version: u16) -> bool {
// uncomment below as we go from hard fork to hard fork
if height < HARD_FORK_INTERVAL {
version == 1
/* } else if height < 2 * HARD_FORK_INTERVAL {
version == 2
} else if height < 3 * HARD_FORK_INTERVAL {
version == 3
} else if height < 4 * HARD_FORK_INTERVAL {
version == 4
} else if height >= 5 * HARD_FORK_INTERVAL {
version > 4 */
2017-10-10 03:08:17 +03:00
} else {
false
}
}
/// Number of blocks used to calculate difficulty adjustments
pub const DIFFICULTY_ADJUST_WINDOW: u64 = HOUR_HEIGHT;
/// Average time span of the difficulty adjustment window
pub const BLOCK_TIME_WINDOW: u64 = DIFFICULTY_ADJUST_WINDOW * BLOCK_TIME_SEC;
/// Clamp factor to use for difficulty adjustment
/// Limit value to within this factor of goal
pub const CLAMP_FACTOR: u64 = 2;
/// Dampening factor to use for difficulty adjustment
pub const DAMP_FACTOR: u64 = 3;
/// Compute weight of a graph as number of siphash bits defining the graph
/// Must be made dependent on height to phase out smaller size over the years
/// This can wait until end of 2019 at latest
pub fn graph_weight(edge_bits: u8) -> u64 {
(2 << (edge_bits - global::base_edge_bits()) as u64) * (edge_bits as u64)
}
2018-10-18 21:37:33 +03:00
/// minimum difficulty to avoid getting stuck when trying to increase subject to dampening
pub const MIN_DIFFICULTY: u64 = DAMP_FACTOR;
/// unit difficulty, equal to graph_weight(SECOND_POW_EDGE_BITS)
pub const UNIT_DIFFICULTY: u64 =
((2 as u64) << (SECOND_POW_EDGE_BITS - BASE_EDGE_BITS)) * (SECOND_POW_EDGE_BITS as u64);
/// The initial difficulty at launch. This should be over-estimated
/// and difficulty should come down at launch rather than up
/// Currently grossly over-estimated at 10% of current
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1 in one block interval)
pub const INITIAL_DIFFICULTY: u64 = 1_000_000 * UNIT_DIFFICULTY;
/// Consensus errors
#[derive(Clone, Debug, Eq, PartialEq, Fail)]
pub enum Error {
/// Inputs/outputs/kernels must be sorted lexicographically.
SortError,
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Sort Error")
}
}
/// Minimal header information required for the Difficulty calculation to
/// take place
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct HeaderInfo {
/// Timestamp of the header, 1 when not used (returned info)
pub timestamp: u64,
/// Network difficulty or next difficulty to use
pub difficulty: Difficulty,
/// Network secondary PoW factor or factor to use
pub secondary_scaling: u32,
/// Whether the header is a secondary proof of work
pub is_secondary: bool,
}
impl HeaderInfo {
/// Default constructor
pub fn new(
timestamp: u64,
difficulty: Difficulty,
secondary_scaling: u32,
is_secondary: bool,
) -> HeaderInfo {
HeaderInfo {
timestamp,
difficulty,
secondary_scaling,
is_secondary,
}
}
/// Constructor from a timestamp and difficulty, setting a default secondary
/// PoW factor
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
HeaderInfo {
timestamp,
difficulty,
secondary_scaling: global::initial_graph_weight(),
2018-10-18 21:37:33 +03:00
is_secondary: true,
}
}
/// Constructor from a difficulty and secondary factor, setting a default
/// timestamp
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
HeaderInfo {
timestamp: 1,
difficulty,
secondary_scaling,
2018-10-18 21:37:33 +03:00
is_secondary: true,
}
}
}
2018-10-18 21:37:33 +03:00
/// Move value linearly toward a goal
pub fn damp(actual: u64, goal: u64, damp_factor: u64) -> u64 {
(1 * actual + (damp_factor - 1) * goal) / damp_factor
}
2018-10-18 21:37:33 +03:00
/// limit value to be within some factor from a goal
pub fn clamp(actual: u64, goal: u64, clamp_factor: u64) -> u64 {
max(goal / clamp_factor, min(actual, goal * clamp_factor))
}
/// Computes the proof-of-work difficulty that the next block should comply
/// with. Takes an iterator over past block headers information, from latest
/// (highest height) to oldest (lowest height).
///
/// The difficulty calculation is based on both Digishield and GravityWave
/// family of difficulty computation, coming to something very close to Zcash.
/// The reference difficulty is an average of the difficulty over a window of
/// DIFFICULTY_ADJUST_WINDOW blocks. The corresponding timespan is calculated
/// by using the difference between the median timestamps at the beginning
/// and the end of the window.
///
/// The secondary proof-of-work factor is calculated along the same lines, as
/// an adjustment on the deviation against the ideal value.
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
2017-09-29 21:44:25 +03:00
where
T: IntoIterator<Item = HeaderInfo>,
{
// Create vector of difficulty data running from earliest
// to latest, and pad with simulated pre-genesis data to allow earlier
// adjustment if there isn't enough window data length will be
// DIFFICULTY_ADJUST_WINDOW + 1 (for initial block time bound)
let diff_data = global::difficulty_data_to_vector(cursor);
// First, get the ratio of secondary PoW vs primary
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data);
// Get the timestamp delta across the window
let ts_delta: u64 =
diff_data[DIFFICULTY_ADJUST_WINDOW as usize].timestamp - diff_data[0].timestamp;
// Get the difficulty sum of the last DIFFICULTY_ADJUST_WINDOW elements
let diff_sum: u64 = diff_data
.iter()
.skip(1)
.map(|dd| dd.difficulty.to_num())
.sum();
// adjust time delta toward goal subject to dampening and clamping
let adj_ts = clamp(
damp(ts_delta, BLOCK_TIME_WINDOW, DAMP_FACTOR),
BLOCK_TIME_WINDOW,
CLAMP_FACTOR,
);
2018-10-18 22:18:16 +03:00
// minimum difficulty avoids getting stuck due to dampening
let difficulty = max(MIN_DIFFICULTY, diff_sum * BLOCK_TIME_SEC / adj_ts);
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling)
}
/// Factor by which the secondary proof of work difficulty will be adjusted
pub fn secondary_pow_scaling(height: u64, diff_data: &Vec<HeaderInfo>) -> u32 {
// Get the secondary count across the window, in pct (100 * 60 * 2nd_pow_fraction)
let snd_count = 100 * diff_data.iter().filter(|n| n.is_secondary).count() as u64;
// Get the scaling factor sum of the last DIFFICULTY_ADJUST_WINDOW elements
let scale_sum: u64 = diff_data
.iter()
.skip(1)
.map(|dd| dd.secondary_scaling as u64)
.sum();
// compute ideal 2nd_pow_fraction in pct and across window
let target_pct = secondary_pow_ratio(height);
let target_count = DIFFICULTY_ADJUST_WINDOW * target_pct;
// adjust count toward goal subject to dampening and clamping
let adj_count = clamp(
damp(snd_count, target_count, DAMP_FACTOR),
target_count,
CLAMP_FACTOR,
);
let scale = scale_sum * target_pct / adj_count;
2018-10-18 22:18:16 +03:00
// minimum difficulty avoids getting stuck due to dampening
max(MIN_DIFFICULTY, min(scale, MAX_SECONDARY_SCALING)) as u32
}
/// Consensus rule that collections of items are sorted lexicographically.
pub trait VerifySortOrder<T> {
/// Verify a collection of items is sorted as required.
fn verify_sort_order(&self) -> Result<(), Error>;
}