diff --git a/core/src/consensus.rs b/core/src/consensus.rs index ef6949790..f74ac626b 100644 --- a/core/src/consensus.rs +++ b/core/src/consensus.rs @@ -72,9 +72,9 @@ pub const EASINESS: u32 = 50; /// happening. Needs to be long enough to not overlap with a long reorg. /// Rational /// behind the value is the longest bitcoin fork was about 30 blocks, so 5h. We -/// add an order of magnitude to be safe and round to 48h of blocks to make it +/// add an order of magnitude to be safe and round to 7x24h of blocks to make it /// easier to reason about. -pub const CUT_THROUGH_HORIZON: u32 = 48 * 3600 / (BLOCK_TIME_SEC as u32); +pub const CUT_THROUGH_HORIZON: u32 = 7 * 24 * 3600 / (BLOCK_TIME_SEC as u32); /// Weight of an input when counted against the max block weight capacity pub const BLOCK_INPUT_WEIGHT: usize = 1; diff --git a/core/src/global.rs b/core/src/global.rs index 51c71f866..4425d36c0 100644 --- a/core/src/global.rs +++ b/core/src/global.rs @@ -72,6 +72,11 @@ pub const TESTNET3_INITIAL_DIFFICULTY: u64 = 30000; /// Testnet 4 initial block difficulty pub const TESTNET4_INITIAL_DIFFICULTY: u64 = 1; +/// Trigger compaction check on average every 1440 blocks (i.e. one day) for FAST_SYNC_NODE, +/// roll the dice on every block to decide, +/// all blocks lower than (BodyHead.height - CUT_THROUGH_HORIZON) will be removed. +pub const COMPACTION_CHECK: u64 = 1440; + /// Types of chain a server can run with, dictates the genesis block and /// and mining parameters used. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] diff --git a/servers/src/common/adapters.rs b/servers/src/common/adapters.rs index 363ebbf30..4b4998a3c 100644 --- a/servers/src/common/adapters.rs +++ b/servers/src/common/adapters.rs @@ -21,7 +21,7 @@ use std::sync::{Arc, RwLock, Weak}; use std::thread; use std::time::Instant; -use chain::{self, ChainAdapter, Options, Tip}; +use chain::{self, ChainAdapter, Options}; use chrono::prelude::{DateTime, Utc}; use common::types::{self, ChainValidationMode, ServerConfig, SyncState, SyncStatus}; use core::core::hash::{Hash, Hashed}; @@ -32,6 +32,7 @@ use core::pow::Difficulty; use core::{core, global}; use p2p; use pool; +use rand::prelude::*; use store; use util::{OneTime, LOGGER}; @@ -470,9 +471,9 @@ impl NetToChainAdapter { let prev_hash = b.header.previous; let bhash = b.hash(); match self.chain().process_block(b, self.chain_opts()) { - Ok(tip) => { + Ok(_) => { self.validate_chain(bhash); - self.check_compact(tip); + self.check_compact(); true } Err(ref e) if e.is_bad_data() => { @@ -541,25 +542,24 @@ impl NetToChainAdapter { } } - fn check_compact(&self, tip: Option) { + fn check_compact(&self) { // no compaction during sync or if we're in historical mode if self.archive_mode || self.sync_state.is_syncing() { return; } - if let Some(tip) = tip { - // trigger compaction every 2000 blocks, uses a different thread to avoid - // blocking the caller thread (likely a peer) - if tip.height % 2000 == 0 { - let chain = self.chain().clone(); - let _ = thread::Builder::new() - .name("compactor".to_string()) - .spawn(move || { - if let Err(e) = chain.compact() { - error!(LOGGER, "Could not compact chain: {:?}", e); - } - }); - } + // Roll the dice to trigger compaction at 1/COMPACTION_CHECK chance per block, + // uses a different thread to avoid blocking the caller thread (likely a peer) + let mut rng = thread_rng(); + if 0 == rng.gen_range(0, global::COMPACTION_CHECK) { + let chain = self.chain().clone(); + let _ = thread::Builder::new() + .name("compactor".to_string()) + .spawn(move || { + if let Err(e) = chain.compact() { + error!(LOGGER, "Could not compact chain: {:?}", e); + } + }); } }