2018-01-27 10:48:53 +03:00
|
|
|
// Copyright 2018 The Grin Developers
|
2016-11-11 03:02:47 +03:00
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
|
|
|
//! All the rules required for a cryptocurrency to have reach consensus across
|
|
|
|
//! the whole network are complex and hard to completely isolate. Some can be
|
|
|
|
//! simple parameters (like block reward), others complex algorithms (like
|
|
|
|
//! Merkle sum trees or reorg rules). However, as long as they're simple
|
2016-11-14 22:21:18 +03:00
|
|
|
//! enough, consensus-relevant constants and short functions should be kept
|
2016-11-11 03:02:47 +03:00
|
|
|
//! here.
|
|
|
|
|
2018-10-16 00:18:00 +03:00
|
|
|
use std::cmp::{max, min};
|
2018-06-01 17:06:59 +03:00
|
|
|
use std::fmt;
|
2016-11-16 01:37:49 +03:00
|
|
|
|
2018-01-27 10:48:53 +03:00
|
|
|
use global;
|
2018-09-19 01:12:57 +03:00
|
|
|
use pow::Difficulty;
|
2016-11-16 01:37:49 +03:00
|
|
|
|
2017-11-15 04:14:07 +03:00
|
|
|
/// A grin is divisible to 10^9, following the SI prefixes
|
2017-10-22 13:56:55 +03:00
|
|
|
pub const GRIN_BASE: u64 = 1_000_000_000;
|
2017-11-15 04:14:07 +03:00
|
|
|
/// Milligrin, a thousand of a grin
|
|
|
|
pub const MILLI_GRIN: u64 = GRIN_BASE / 1_000;
|
|
|
|
/// Microgrin, a thousand of a milligrin
|
|
|
|
pub const MICRO_GRIN: u64 = MILLI_GRIN / 1_000;
|
|
|
|
/// Nanogrin, smallest unit, takes a billion to make a grin
|
|
|
|
pub const NANO_GRIN: u64 = 1;
|
2017-10-22 13:56:55 +03:00
|
|
|
|
2018-01-07 07:21:50 +03:00
|
|
|
/// The block subsidy amount, one grin per second on average
|
|
|
|
pub const REWARD: u64 = 60 * GRIN_BASE;
|
2016-11-11 03:02:47 +03:00
|
|
|
|
2017-10-05 10:23:04 +03:00
|
|
|
/// Actual block reward for a given total fee amount
|
|
|
|
pub fn reward(fee: u64) -> u64 {
|
2018-03-13 05:39:22 +03:00
|
|
|
REWARD + fee
|
2017-10-05 10:23:04 +03:00
|
|
|
}
|
|
|
|
|
2016-11-16 01:37:49 +03:00
|
|
|
/// Block interval, in seconds, the network will tune its next_target for. Note
|
|
|
|
/// that we may reduce this value in the future as we get more data on mining
|
|
|
|
/// with Cuckoo Cycle, networks improve and block propagation is optimized
|
|
|
|
/// (adjusting the reward accordingly).
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const BLOCK_TIME_SEC: u64 = 60;
|
2016-11-11 03:02:47 +03:00
|
|
|
|
2018-09-04 12:59:55 +03:00
|
|
|
/// Number of blocks before a coinbase matures and can be spent
|
|
|
|
/// set to nominal number of block in one day (1440 with 1-minute blocks)
|
2018-09-19 01:12:57 +03:00
|
|
|
pub const COINBASE_MATURITY: u64 = 24 * 60 * 60 / BLOCK_TIME_SEC;
|
2018-09-04 12:59:55 +03:00
|
|
|
|
2018-10-13 23:57:01 +03:00
|
|
|
/// Ratio the secondary proof of work should take over the primary, as a
|
|
|
|
/// function of block height (time). Starts at 90% losing a percent
|
|
|
|
/// approximately every week (10000 blocks). Represented as an integer
|
|
|
|
/// between 0 and 100.
|
|
|
|
pub fn secondary_pow_ratio(height: u64) -> u64 {
|
|
|
|
90u64.saturating_sub(height / 10000)
|
|
|
|
}
|
|
|
|
|
2016-11-11 03:02:47 +03:00
|
|
|
/// Cuckoo-cycle proof size (cycle length)
|
|
|
|
pub const PROOFSIZE: usize = 42;
|
|
|
|
|
2017-06-19 18:59:56 +03:00
|
|
|
/// Default Cuckoo Cycle size shift used for mining and validating.
|
2018-06-29 20:41:28 +03:00
|
|
|
pub const DEFAULT_MIN_SIZESHIFT: u8 = 30;
|
|
|
|
|
2018-09-19 01:12:57 +03:00
|
|
|
/// Secondary proof-of-work size shift, meant to be ASIC resistant.
|
|
|
|
pub const SECOND_POW_SIZESHIFT: u8 = 29;
|
|
|
|
|
2018-06-29 20:41:28 +03:00
|
|
|
/// Original reference sizeshift to compute difficulty factors for higher
|
|
|
|
/// Cuckoo graph sizes, changing this would hard fork
|
|
|
|
pub const REFERENCE_SIZESHIFT: u8 = 30;
|
2016-11-16 01:37:49 +03:00
|
|
|
|
2016-11-11 03:02:47 +03:00
|
|
|
/// Default Cuckoo Cycle easiness, high enough to have good likeliness to find
|
|
|
|
/// a solution.
|
|
|
|
pub const EASINESS: u32 = 50;
|
|
|
|
|
2017-06-18 06:17:26 +03:00
|
|
|
/// Default number of blocks in the past when cross-block cut-through will start
|
|
|
|
/// happening. Needs to be long enough to not overlap with a long reorg.
|
|
|
|
/// Rational
|
|
|
|
/// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We
|
2018-10-13 19:34:16 +03:00
|
|
|
/// add an order of magnitude to be safe and round to 7x24h of blocks to make it
|
2017-06-18 06:17:26 +03:00
|
|
|
/// easier to reason about.
|
2018-10-13 19:34:16 +03:00
|
|
|
pub const CUT_THROUGH_HORIZON: u32 = 7 * 24 * 3600 / (BLOCK_TIME_SEC as u32);
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2018-06-13 19:03:34 +03:00
|
|
|
/// Weight of an input when counted against the max block weight capacity
|
2017-10-06 06:34:35 +03:00
|
|
|
pub const BLOCK_INPUT_WEIGHT: usize = 1;
|
|
|
|
|
|
|
|
/// Weight of an output when counted against the max block weight capacity
|
|
|
|
pub const BLOCK_OUTPUT_WEIGHT: usize = 10;
|
|
|
|
|
|
|
|
/// Weight of a kernel when counted against the max block weight capacity
|
|
|
|
pub const BLOCK_KERNEL_WEIGHT: usize = 2;
|
|
|
|
|
2018-08-12 23:02:30 +03:00
|
|
|
/// Total maximum block weight. At current sizes, this means a maximum
|
|
|
|
/// theoretical size of:
|
2018-08-14 00:06:53 +03:00
|
|
|
/// * `(674 + 33 + 1) * 4_000 = 2_832_000` for a block with only outputs
|
|
|
|
/// * `(1 + 8 + 8 + 33 + 64) * 20_000 = 2_280_000` for a block with only kernels
|
|
|
|
/// * `(1 + 33) * 40_000 = 1_360_000` for a block with only inputs
|
2018-08-12 23:02:30 +03:00
|
|
|
///
|
|
|
|
/// Given that a block needs to have at least one kernel for the coinbase,
|
2018-08-14 00:06:53 +03:00
|
|
|
/// and one kernel for the transaction, practical maximum size is 2_831_440,
|
2018-09-03 14:09:28 +03:00
|
|
|
/// (ignoring the edge case of a miner producing a block with all coinbase
|
2018-08-12 23:02:30 +03:00
|
|
|
/// outputs and a single kernel).
|
|
|
|
///
|
|
|
|
/// A more "standard" block, filled with transactions of 2 inputs, 2 outputs
|
2018-08-14 00:06:53 +03:00
|
|
|
/// and one kernel, should be around 2_663_333 bytes.
|
|
|
|
pub const MAX_BLOCK_WEIGHT: usize = 40_000;
|
2017-10-06 06:34:35 +03:00
|
|
|
|
2017-10-10 03:08:17 +03:00
|
|
|
/// Fork every 250,000 blocks for first 2 years, simple number and just a
|
|
|
|
/// little less than 6 months.
|
|
|
|
pub const HARD_FORK_INTERVAL: u64 = 250_000;
|
|
|
|
|
|
|
|
/// Check whether the block version is valid at a given height, implements
|
|
|
|
/// 6 months interval scheduled hard forks for the first 2 years.
|
|
|
|
pub fn valid_header_version(height: u64, version: u16) -> bool {
|
|
|
|
// uncomment below as we go from hard fork to hard fork
|
2018-10-13 23:57:01 +03:00
|
|
|
if height < HARD_FORK_INTERVAL {
|
2018-09-11 01:36:57 +03:00
|
|
|
version == 1
|
2018-10-13 23:57:01 +03:00
|
|
|
/* } else if height < 2 * HARD_FORK_INTERVAL {
|
2018-09-11 01:36:57 +03:00
|
|
|
version == 2
|
2018-10-13 23:57:01 +03:00
|
|
|
} else if height < 3 * HARD_FORK_INTERVAL {
|
2018-09-11 01:36:57 +03:00
|
|
|
version == 3
|
2018-10-13 23:57:01 +03:00
|
|
|
} else if height < 4 * HARD_FORK_INTERVAL {
|
|
|
|
version == 4
|
|
|
|
} else if height >= 5 * HARD_FORK_INTERVAL {
|
2018-09-11 01:36:57 +03:00
|
|
|
version > 4 */
|
2017-10-10 03:08:17 +03:00
|
|
|
} else {
|
|
|
|
false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-20 17:22:40 +03:00
|
|
|
/// Time window in blocks to calculate block time median
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const MEDIAN_TIME_WINDOW: u64 = 11;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2018-02-07 20:23:48 +03:00
|
|
|
/// Index at half the desired median
|
2018-01-27 10:48:53 +03:00
|
|
|
pub const MEDIAN_TIME_INDEX: u64 = MEDIAN_TIME_WINDOW / 2;
|
|
|
|
|
2017-07-20 17:22:40 +03:00
|
|
|
/// Number of blocks used to calculate difficulty adjustments
|
2018-01-27 10:48:53 +03:00
|
|
|
pub const DIFFICULTY_ADJUST_WINDOW: u64 = 60;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-07-20 17:22:40 +03:00
|
|
|
/// Average time span of the difficulty adjustment window
|
2017-08-03 19:57:55 +03:00
|
|
|
pub const BLOCK_TIME_WINDOW: u64 = DIFFICULTY_ADJUST_WINDOW * BLOCK_TIME_SEC;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-11-14 03:45:10 +03:00
|
|
|
/// Maximum size time window used for difficulty adjustments
|
2018-01-27 10:48:53 +03:00
|
|
|
pub const UPPER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW * 2;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-11-14 03:45:10 +03:00
|
|
|
/// Minimum size time window used for difficulty adjustments
|
2018-01-27 10:48:53 +03:00
|
|
|
pub const LOWER_TIME_BOUND: u64 = BLOCK_TIME_WINDOW / 2;
|
|
|
|
|
|
|
|
/// Dampening factor to use for difficulty adjustment
|
|
|
|
pub const DAMP_FACTOR: u64 = 3;
|
|
|
|
|
|
|
|
/// The initial difficulty at launch. This should be over-estimated
|
|
|
|
/// and difficulty should come down at launch rather than up
|
|
|
|
/// Currently grossly over-estimated at 10% of current
|
|
|
|
/// ethereum GPUs (assuming 1GPU can solve a block at diff 1
|
|
|
|
/// in one block interval)
|
|
|
|
pub const INITIAL_DIFFICULTY: u64 = 1_000_000;
|
2017-06-18 06:17:26 +03:00
|
|
|
|
2017-12-22 20:15:44 +03:00
|
|
|
/// Consensus errors
|
2018-07-01 01:36:38 +03:00
|
|
|
#[derive(Clone, Debug, Eq, PartialEq, Fail)]
|
2017-12-22 20:15:44 +03:00
|
|
|
pub enum Error {
|
|
|
|
/// Inputs/outputs/kernels must be sorted lexicographically.
|
|
|
|
SortError,
|
|
|
|
}
|
|
|
|
|
2018-07-01 01:36:38 +03:00
|
|
|
impl fmt::Display for Error {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
write!(f, "Sort Error")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-10-13 23:57:01 +03:00
|
|
|
/// Minimal header information required for the Difficulty calculation to
|
|
|
|
/// take place
|
|
|
|
#[derive(Clone, Debug, Eq, PartialEq)]
|
|
|
|
pub struct HeaderInfo {
|
|
|
|
/// Timestamp of the header, 1 when not used (returned info)
|
|
|
|
pub timestamp: u64,
|
|
|
|
/// Network difficulty or next difficulty to use
|
|
|
|
pub difficulty: Difficulty,
|
|
|
|
/// Network secondary PoW factor or factor to use
|
|
|
|
pub secondary_scaling: u32,
|
|
|
|
/// Whether the header is a secondary proof of work
|
|
|
|
pub is_secondary: bool,
|
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
|
2018-10-13 23:57:01 +03:00
|
|
|
impl HeaderInfo {
|
|
|
|
/// Default constructor
|
|
|
|
pub fn new(
|
|
|
|
timestamp: u64,
|
|
|
|
difficulty: Difficulty,
|
|
|
|
secondary_scaling: u32,
|
|
|
|
is_secondary: bool,
|
|
|
|
) -> HeaderInfo {
|
|
|
|
HeaderInfo {
|
|
|
|
timestamp,
|
|
|
|
difficulty,
|
|
|
|
secondary_scaling,
|
|
|
|
is_secondary,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Constructor from a timestamp and difficulty, setting a default secondary
|
|
|
|
/// PoW factor
|
|
|
|
pub fn from_ts_diff(timestamp: u64, difficulty: Difficulty) -> HeaderInfo {
|
|
|
|
HeaderInfo {
|
|
|
|
timestamp,
|
|
|
|
difficulty,
|
|
|
|
secondary_scaling: 1,
|
|
|
|
is_secondary: false,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Constructor from a difficulty and secondary factor, setting a default
|
|
|
|
/// timestamp
|
|
|
|
pub fn from_diff_scaling(difficulty: Difficulty, secondary_scaling: u32) -> HeaderInfo {
|
|
|
|
HeaderInfo {
|
|
|
|
timestamp: 1,
|
|
|
|
difficulty,
|
|
|
|
secondary_scaling,
|
|
|
|
is_secondary: false,
|
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
}
|
2017-06-18 06:17:26 +03:00
|
|
|
}
|
2017-06-19 18:59:56 +03:00
|
|
|
|
|
|
|
/// Computes the proof-of-work difficulty that the next block should comply
|
2018-10-13 23:57:01 +03:00
|
|
|
/// with. Takes an iterator over past block headers information, from latest
|
|
|
|
/// (highest height) to oldest (lowest height).
|
2017-06-19 18:59:56 +03:00
|
|
|
///
|
|
|
|
/// The difficulty calculation is based on both Digishield and GravityWave
|
|
|
|
/// family of difficulty computation, coming to something very close to Zcash.
|
2018-06-13 19:03:34 +03:00
|
|
|
/// The reference difficulty is an average of the difficulty over a window of
|
2018-02-07 20:23:48 +03:00
|
|
|
/// DIFFICULTY_ADJUST_WINDOW blocks. The corresponding timespan is calculated
|
|
|
|
/// by using the difference between the median timestamps at the beginning
|
2018-01-27 10:48:53 +03:00
|
|
|
/// and the end of the window.
|
2018-10-13 23:57:01 +03:00
|
|
|
///
|
|
|
|
/// The secondary proof-of-work factor is calculated along the same lines, as
|
|
|
|
/// an adjustment on the deviation against the ideal value.
|
|
|
|
pub fn next_difficulty<T>(height: u64, cursor: T) -> HeaderInfo
|
2017-09-29 21:44:25 +03:00
|
|
|
where
|
2018-10-13 23:57:01 +03:00
|
|
|
T: IntoIterator<Item = HeaderInfo>,
|
2017-06-18 06:17:26 +03:00
|
|
|
{
|
2018-01-27 10:48:53 +03:00
|
|
|
// Create vector of difficulty data running from earliest
|
|
|
|
// to latest, and pad with simulated pre-genesis data to allow earlier
|
|
|
|
// adjustment if there isn't enough window data
|
|
|
|
// length will be DIFFICULTY_ADJUST_WINDOW+MEDIAN_TIME_WINDOW
|
|
|
|
let diff_data = global::difficulty_data_to_vector(cursor);
|
|
|
|
|
2018-10-13 23:57:01 +03:00
|
|
|
// First, get the ratio of secondary PoW vs primary
|
|
|
|
let sec_pow_scaling = secondary_pow_scaling(height, &diff_data);
|
|
|
|
|
2018-01-27 10:48:53 +03:00
|
|
|
// Obtain the median window for the earlier time period
|
2018-03-30 21:21:06 +03:00
|
|
|
// the first MEDIAN_TIME_WINDOW elements
|
2018-10-13 23:57:01 +03:00
|
|
|
let earliest_ts = time_window_median(&diff_data, 0, MEDIAN_TIME_WINDOW as usize);
|
2018-01-27 10:48:53 +03:00
|
|
|
|
|
|
|
// Obtain the median window for the latest time period
|
2018-07-05 15:18:09 +03:00
|
|
|
// i.e. the last MEDIAN_TIME_WINDOW elements
|
2018-10-13 23:57:01 +03:00
|
|
|
let latest_ts = time_window_median(
|
|
|
|
&diff_data,
|
|
|
|
DIFFICULTY_ADJUST_WINDOW as usize,
|
|
|
|
MEDIAN_TIME_WINDOW as usize,
|
|
|
|
);
|
2018-01-27 10:48:53 +03:00
|
|
|
|
2018-03-30 21:21:06 +03:00
|
|
|
// median time delta
|
2018-01-27 10:48:53 +03:00
|
|
|
let ts_delta = latest_ts - earliest_ts;
|
|
|
|
|
2018-03-30 21:21:06 +03:00
|
|
|
// Get the difficulty sum of the last DIFFICULTY_ADJUST_WINDOW elements
|
|
|
|
let diff_sum = diff_data
|
|
|
|
.iter()
|
|
|
|
.skip(MEDIAN_TIME_WINDOW as usize)
|
2018-10-13 23:57:01 +03:00
|
|
|
.fold(0, |sum, d| sum + d.difficulty.to_num());
|
2018-03-30 21:21:06 +03:00
|
|
|
|
|
|
|
// Apply dampening except when difficulty is near 1
|
|
|
|
let ts_damp = if diff_sum < DAMP_FACTOR * DIFFICULTY_ADJUST_WINDOW {
|
|
|
|
ts_delta
|
|
|
|
} else {
|
2018-04-23 21:55:25 +03:00
|
|
|
(1 * ts_delta + (DAMP_FACTOR - 1) * BLOCK_TIME_WINDOW) / DAMP_FACTOR
|
2018-01-27 10:48:53 +03:00
|
|
|
};
|
2017-06-18 06:17:26 +03:00
|
|
|
|
|
|
|
// Apply time bounds
|
|
|
|
let adj_ts = if ts_damp < LOWER_TIME_BOUND {
|
|
|
|
LOWER_TIME_BOUND
|
|
|
|
} else if ts_damp > UPPER_TIME_BOUND {
|
|
|
|
UPPER_TIME_BOUND
|
|
|
|
} else {
|
|
|
|
ts_damp
|
|
|
|
};
|
|
|
|
|
2018-10-13 23:57:01 +03:00
|
|
|
let difficulty = max(diff_sum * BLOCK_TIME_SEC / adj_ts, 1);
|
|
|
|
|
|
|
|
HeaderInfo::from_diff_scaling(Difficulty::from_num(difficulty), sec_pow_scaling)
|
|
|
|
}
|
2018-01-27 10:48:53 +03:00
|
|
|
|
2018-10-16 00:18:00 +03:00
|
|
|
pub const MAX_SECONDARY_SCALING: u64 = (::std::u32::MAX / 70) as u64;
|
|
|
|
|
2018-10-13 23:57:01 +03:00
|
|
|
/// Factor by which the secondary proof of work difficulty will be adjusted
|
2018-10-16 00:18:00 +03:00
|
|
|
pub fn secondary_pow_scaling(height: u64, diff_data: &Vec<HeaderInfo>) -> u32 {
|
2018-10-13 23:57:01 +03:00
|
|
|
// median of past scaling factors, scaling is 1 if none found
|
|
|
|
let mut scalings = diff_data
|
|
|
|
.iter()
|
|
|
|
.map(|n| n.secondary_scaling)
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
if scalings.len() == 0 {
|
|
|
|
return 1;
|
|
|
|
}
|
|
|
|
scalings.sort();
|
|
|
|
let scaling_median = scalings[scalings.len() / 2] as u64;
|
2018-10-16 00:18:00 +03:00
|
|
|
let secondary_count = max(diff_data.iter().filter(|n| n.is_secondary).count(), 1) as u64;
|
2018-10-13 23:57:01 +03:00
|
|
|
|
|
|
|
// what's the ideal ratio at the current height
|
|
|
|
let ratio = secondary_pow_ratio(height);
|
|
|
|
|
|
|
|
// adjust the past median based on ideal ratio vs actual ratio
|
2018-10-16 00:18:00 +03:00
|
|
|
let scaling = scaling_median * diff_data.len() as u64 * ratio / 100 / secondary_count as u64;
|
|
|
|
|
|
|
|
// various bounds
|
|
|
|
let bounded_scaling = if scaling < scaling_median / 4 || scaling == 0 {
|
|
|
|
max(scaling_median / 4, 1)
|
|
|
|
} else if scaling > MAX_SECONDARY_SCALING || scaling > scaling_median * 4 {
|
|
|
|
min(MAX_SECONDARY_SCALING, scaling_median * 4)
|
2018-10-13 23:57:01 +03:00
|
|
|
} else {
|
2018-10-16 00:18:00 +03:00
|
|
|
scaling
|
|
|
|
};
|
|
|
|
bounded_scaling as u32
|
2018-10-13 23:57:01 +03:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Median timestamp within the time window starting at `from` with the
|
|
|
|
/// provided `length`.
|
|
|
|
fn time_window_median(diff_data: &Vec<HeaderInfo>, from: usize, length: usize) -> u64 {
|
|
|
|
let mut window_latest: Vec<u64> = diff_data
|
|
|
|
.iter()
|
|
|
|
.skip(from)
|
|
|
|
.take(length)
|
|
|
|
.map(|n| n.timestamp)
|
|
|
|
.collect();
|
|
|
|
// pick median
|
|
|
|
window_latest.sort();
|
|
|
|
window_latest[MEDIAN_TIME_INDEX as usize]
|
2017-06-18 06:17:26 +03:00
|
|
|
}
|
|
|
|
|
2017-12-22 20:15:44 +03:00
|
|
|
/// Consensus rule that collections of items are sorted lexicographically.
|
2017-10-13 20:23:18 +03:00
|
|
|
pub trait VerifySortOrder<T> {
|
|
|
|
/// Verify a collection of items is sorted as required.
|
2017-12-22 20:15:44 +03:00
|
|
|
fn verify_sort_order(&self) -> Result<(), Error>;
|
2017-10-13 20:23:18 +03:00
|
|
|
}
|