Pull initial chain compaction out of init() and into the syncer (#2738)

This commit is contained in:
Antioch Peverell 2019-04-12 23:38:07 +01:00 committed by Ignotus Peverell
parent dbbad7be03
commit e8c50359e4
2 changed files with 58 additions and 37 deletions

View file

@ -170,44 +170,34 @@ impl Chain {
archive_mode: bool, archive_mode: bool,
stop_state: Arc<Mutex<StopState>>, stop_state: Arc<Mutex<StopState>>,
) -> Result<Chain, Error> { ) -> Result<Chain, Error> {
let chain = { // Note: We take a lock on the stop_state here and do not release it until
// Note: We take a lock on the stop_state here and do not release it until // we have finished chain initialization.
// we have finished chain initialization. let stop_state_local = stop_state.clone();
let stop_state_local = stop_state.clone(); let stop_lock = stop_state_local.lock();
let stop_lock = stop_state_local.lock(); if stop_lock.is_stopped() {
if stop_lock.is_stopped() { return Err(ErrorKind::Stopped.into());
return Err(ErrorKind::Stopped.into()); }
}
let store = Arc::new(store::ChainStore::new(db_env)?); let store = Arc::new(store::ChainStore::new(db_env)?);
// open the txhashset, creating a new one if necessary // open the txhashset, creating a new one if necessary
let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None)?; let mut txhashset = txhashset::TxHashSet::open(db_root.clone(), store.clone(), None)?;
setup_head(&genesis, &store, &mut txhashset)?; setup_head(&genesis, &store, &mut txhashset)?;
Chain::log_heads(&store)?; Chain::log_heads(&store)?;
Chain { Ok(Chain {
db_root, db_root,
store, store,
adapter, adapter,
orphans: Arc::new(OrphanBlockPool::new()), orphans: Arc::new(OrphanBlockPool::new()),
txhashset: Arc::new(RwLock::new(txhashset)), txhashset: Arc::new(RwLock::new(txhashset)),
pow_verifier, pow_verifier,
verifier_cache, verifier_cache,
archive_mode, archive_mode,
stop_state, stop_state,
genesis: genesis.header.clone(), genesis: genesis.header.clone(),
} })
};
// Run chain compaction. Laptops and other intermittent nodes
// may not run long enough to trigger daily compaction.
// So run it explicitly here on startup (its fast enough to do so).
// Note: we release the stop_lock from above as compact also requires a lock.
chain.compact()?;
Ok(chain)
} }
/// Return our shared txhashset instance. /// Return our shared txhashset instance.
@ -1057,6 +1047,26 @@ impl Chain {
/// * removes historical blocks and associated data from the db (unless archive mode) /// * removes historical blocks and associated data from the db (unless archive mode)
/// ///
pub fn compact(&self) -> Result<(), Error> { pub fn compact(&self) -> Result<(), Error> {
// A node may be restarted multiple times in a short period of time.
// We compact at most once per 60 blocks in this situation by comparing
// current "head" and "tail" height to our cut-through horizon and
// allowing an additional 60 blocks in height before allowing a further compaction.
if let (Ok(tail), Ok(head)) = (self.tail(), self.head()) {
let horizon = global::cut_through_horizon() as u64;
let threshold = horizon.saturating_add(60);
debug!(
"compact: head: {}, tail: {}, diff: {}, horizon: {}",
head.height,
tail.height,
head.height.saturating_sub(tail.height),
horizon
);
if tail.height.saturating_add(threshold) > head.height {
debug!("compact: skipping compaction - threshold is 60 blocks beyond horizon.");
return Ok(());
}
}
// Note: We take a lock on the stop_state here and do not release it until // Note: We take a lock on the stop_state here and do not release it until
// we have finished processing this chain compaction operation. // we have finished processing this chain compaction operation.
// We want to avoid shutting the node down in the middle of compacting the data. // We want to avoid shutting the node down in the middle of compacting the data.

View file

@ -146,16 +146,27 @@ impl SyncRunner {
thread::sleep(time::Duration::from_millis(10)); thread::sleep(time::Duration::from_millis(10));
let currently_syncing = self.sync_state.is_syncing();
// check whether syncing is generally needed, when we compare our state with others // check whether syncing is generally needed, when we compare our state with others
let (syncing, most_work_height) = unwrap_or_restart_loop!(self.needs_syncing()); let (needs_syncing, most_work_height) = unwrap_or_restart_loop!(self.needs_syncing());
if most_work_height > 0 { if most_work_height > 0 {
// we can occasionally get a most work height of 0 if read locks fail // we can occasionally get a most work height of 0 if read locks fail
highest_height = most_work_height; highest_height = most_work_height;
} }
// quick short-circuit (and a decent sleep) if no syncing is needed // quick short-circuit (and a decent sleep) if no syncing is needed
if !syncing { if !needs_syncing {
self.sync_state.update(SyncStatus::NoSync); if currently_syncing {
self.sync_state.update(SyncStatus::NoSync);
// Initial transition out of a "syncing" state and into NoSync.
// This triggers a chain compaction to keep out local node tidy.
// Note: Chain compaction runs with an internal threshold
// so can be safely run even if the node is restarted frequently.
unwrap_or_restart_loop!(self.chain.compact());
}
thread::sleep(time::Duration::from_secs(10)); thread::sleep(time::Duration::from_secs(10));
continue; continue;
} }