log reorg (and fork) depth correctly (#3376)

* log reorg (and fork) depth correctly
depth should be based on "fork point" not prev head

* correct depth for fork/reorg (0 for alternate head etc.)
This commit is contained in:
Antioch Peverell 2020-07-08 09:02:28 +01:00 committed by GitHub
parent 30db9c410e
commit 32253194b5
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 109 additions and 62 deletions

View file

@ -271,24 +271,23 @@ impl Chain {
res res
} }
fn determine_status(&self, head: Option<Tip>, prev_head: Tip) -> BlockStatus { fn determine_status(&self, head: Option<Tip>, prev_head: Tip, fork_point: Tip) -> BlockStatus {
// We have more work if the chain head is updated. // If head is updated then we are either "next" block or we just experienced a "reorg" to new head.
let is_more_work = head.is_some(); // Otherwise this is a "fork" off the main chain.
let mut is_next_block = false;
let mut reorg_depth = None;
if let Some(head) = head { if let Some(head) = head {
if head.prev_block_h == prev_head.last_block_h { if head.prev_block_h == prev_head.last_block_h {
is_next_block = true; BlockStatus::Next { prev_head }
} else { } else {
reorg_depth = Some(prev_head.height.saturating_sub(head.height) + 1); BlockStatus::Reorg {
prev_head,
fork_point,
} }
} }
} else {
match (is_more_work, is_next_block) { BlockStatus::Fork {
(true, true) => BlockStatus::Next, prev_head,
(true, false) => BlockStatus::Reorg(reorg_depth.unwrap_or(0)), fork_point,
(false, _) => BlockStatus::Fork, }
} }
} }
@ -305,10 +304,9 @@ impl Chain {
let mut header_pmmr = self.header_pmmr.write(); let mut header_pmmr = self.header_pmmr.write();
let mut txhashset = self.txhashset.write(); let mut txhashset = self.txhashset.write();
let batch = self.store.batch()?; let batch = self.store.batch()?;
let prev_head = batch.head()?;
let mut ctx = self.new_ctx(opts, batch, &mut header_pmmr, &mut txhashset)?; let mut ctx = self.new_ctx(opts, batch, &mut header_pmmr, &mut txhashset)?;
let prev_head = ctx.batch.head()?;
let maybe_new_head = pipe::process_block(&b, &mut ctx); let maybe_new_head = pipe::process_block(&b, &mut ctx);
// We have flushed txhashset extension changes to disk // We have flushed txhashset extension changes to disk
@ -324,8 +322,8 @@ impl Chain {
}; };
match maybe_new_head { match maybe_new_head {
Ok(head) => { Ok((head, fork_point)) => {
let status = self.determine_status(head.clone(), prev_head); let status = self.determine_status(head, prev_head, Tip::from_header(&fork_point));
// notifying other parts of the system of the update // notifying other parts of the system of the update
self.adapter.block_accepted(&b, status, opts); self.adapter.block_accepted(&b, status, opts);

View file

@ -78,8 +78,11 @@ fn validate_pow_only(header: &BlockHeader, ctx: &mut BlockContext<'_>) -> Result
/// Runs the block processing pipeline, including validation and finding a /// Runs the block processing pipeline, including validation and finding a
/// place for the new block in the chain. /// place for the new block in the chain.
/// Returns new head if chain head updated. /// Returns new head if chain head updated and the "fork point" rewound to when processing the new block.
pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip>, Error> { pub fn process_block(
b: &Block,
ctx: &mut BlockContext<'_>,
) -> Result<(Option<Tip>, BlockHeader), Error> {
debug!( debug!(
"pipe: process_block {} at {} [in/out/kern: {}/{}/{}]", "pipe: process_block {} at {} [in/out/kern: {}/{}/{}]",
b.hash(), b.hash(),
@ -127,8 +130,8 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
let header_pmmr = &mut ctx.header_pmmr; let header_pmmr = &mut ctx.header_pmmr;
let txhashset = &mut ctx.txhashset; let txhashset = &mut ctx.txhashset;
let batch = &mut ctx.batch; let batch = &mut ctx.batch;
txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| { let fork_point = txhashset::extending(header_pmmr, txhashset, batch, |ext, batch| {
rewind_and_apply_fork(&prev, ext, batch)?; let fork_point = rewind_and_apply_fork(&prev, ext, batch)?;
// Check any coinbase being spent have matured sufficiently. // Check any coinbase being spent have matured sufficiently.
// This needs to be done within the context of a potentially // This needs to be done within the context of a potentially
@ -159,7 +162,7 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
ext.extension.force_rollback(); ext.extension.force_rollback();
} }
Ok(()) Ok(fork_point)
})?; })?;
// Add the validated block to the db. // Add the validated block to the db.
@ -176,9 +179,9 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext<'_>) -> Result<Option<Tip
if has_more_work(&b.header, &head) { if has_more_work(&b.header, &head) {
let head = Tip::from_header(&b.header); let head = Tip::from_header(&b.header);
update_head(&head, &mut ctx.batch)?; update_head(&head, &mut ctx.batch)?;
Ok(Some(head)) Ok((Some(head), fork_point))
} else { } else {
Ok(None) Ok((None, fork_point))
} }
} }
@ -545,11 +548,12 @@ pub fn rewind_and_apply_header_fork(
/// to find to fork point. Rewind the txhashset to the fork point and apply all /// to find to fork point. Rewind the txhashset to the fork point and apply all
/// necessary blocks prior to the one being processed to set the txhashset in /// necessary blocks prior to the one being processed to set the txhashset in
/// the expected state. /// the expected state.
/// Returns the "fork point" that we rewound to.
pub fn rewind_and_apply_fork( pub fn rewind_and_apply_fork(
header: &BlockHeader, header: &BlockHeader,
ext: &mut txhashset::ExtensionPair<'_>, ext: &mut txhashset::ExtensionPair<'_>,
batch: &store::Batch<'_>, batch: &store::Batch<'_>,
) -> Result<(), Error> { ) -> Result<BlockHeader, Error> {
let extension = &mut ext.extension; let extension = &mut ext.extension;
let header_extension = &mut ext.header_extension; let header_extension = &mut ext.header_extension;
@ -593,7 +597,7 @@ pub fn rewind_and_apply_fork(
apply_block_to_txhashset(&fb, ext, batch)?; apply_block_to_txhashset(&fb, ext, batch)?;
} }
Ok(()) Ok(fork_point)
} }
fn validate_utxo( fn validate_utxo(

View file

@ -331,7 +331,7 @@ impl Writeable for CommitPos {
/// blockchain tree. References the max height and the latest and previous /// blockchain tree. References the max height and the latest and previous
/// blocks /// blocks
/// for convenience and the total difficulty. /// for convenience and the total difficulty.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq)]
pub struct Tip { pub struct Tip {
/// Height of the tip (max height of the fork) /// Height of the tip (max height of the fork)
pub height: u64, pub height: u64,
@ -444,13 +444,44 @@ impl ChainAdapter for NoopAdapter {
} }
/// Status of an accepted block. /// Status of an accepted block.
#[derive(Debug, Clone, PartialEq)] #[derive(Debug, Clone, Copy, PartialEq)]
pub enum BlockStatus { pub enum BlockStatus {
/// Block is the "next" block, updating the chain head. /// Block is the "next" block, updating the chain head.
Next, Next {
/// Previous chain head.
prev_head: Tip,
},
/// Block does not update the chain head and is a fork. /// Block does not update the chain head and is a fork.
Fork, Fork {
/// Previous chain head.
prev_head: Tip,
/// Fork point for rewind.
fork_point: Tip,
},
/// Block updates the chain head via a (potentially disruptive) "reorg". /// Block updates the chain head via a (potentially disruptive) "reorg".
/// Previous block was not our previous chain head. /// Previous block was not our previous chain head.
Reorg(u64), Reorg {
/// Previous chain head.
prev_head: Tip,
/// Fork point for rewind.
fork_point: Tip,
},
}
impl BlockStatus {
/// Is this the "next" block?
pub fn is_next(&self) -> bool {
match *self {
BlockStatus::Next { .. } => true,
_ => false,
}
}
/// Is this block a "reorg"?
pub fn is_reorg(&self) -> bool {
match *self {
BlockStatus::Reorg { .. } => true,
_ => false,
}
}
} }

View file

@ -365,12 +365,12 @@ fn mine_reorg() {
chain.process_block(b, chain::Options::SKIP_POW).unwrap(); chain.process_block(b, chain::Options::SKIP_POW).unwrap();
} }
let head = chain.head_header().unwrap(); let head = chain.head().unwrap();
assert_eq!(head.height, NUM_BLOCKS_MAIN); assert_eq!(head.height, NUM_BLOCKS_MAIN);
assert_eq!(head.hash(), prev.hash()); assert_eq!(head.hash(), prev.hash());
// Reorg chain should exceed main chain's total difficulty to be considered // Reorg chain should exceed main chain's total difficulty to be considered
let reorg_difficulty = head.total_difficulty().to_num(); let reorg_difficulty = head.total_difficulty.to_num();
// Create one block for reorg chain forking off NUM_BLOCKS_MAIN - REORG_DEPTH height // Create one block for reorg chain forking off NUM_BLOCKS_MAIN - REORG_DEPTH height
let fork_head = chain let fork_head = chain
@ -381,13 +381,17 @@ fn mine_reorg() {
chain.process_block(b, chain::Options::SKIP_POW).unwrap(); chain.process_block(b, chain::Options::SKIP_POW).unwrap();
// Check that reorg is correctly reported in block status // Check that reorg is correctly reported in block status
let fork_point = chain.get_header_by_height(1).unwrap();
assert_eq!( assert_eq!(
*adapter.last_status.read(), *adapter.last_status.read(),
Some(BlockStatus::Reorg(REORG_DEPTH)) Some(BlockStatus::Reorg {
prev_head: head,
fork_point: Tip::from_header(&fork_point)
})
); );
// Chain should be switched to the reorganized chain // Chain should be switched to the reorganized chain
let head = chain.head_header().unwrap(); let head = chain.head().unwrap();
assert_eq!(head.height, NUM_BLOCKS_MAIN - REORG_DEPTH + 1); assert_eq!(head.height, NUM_BLOCKS_MAIN - REORG_DEPTH + 1);
assert_eq!(head.hash(), reorg_head.hash()); assert_eq!(head.hash(), reorg_head.hash());
} }

View file

@ -725,7 +725,7 @@ where
// not broadcasting blocks received through sync // not broadcasting blocks received through sync
if !opts.contains(chain::Options::SYNC) { if !opts.contains(chain::Options::SYNC) {
for hook in &self.hooks { for hook in &self.hooks {
hook.on_block_accepted(b, &status); hook.on_block_accepted(b, status);
} }
// If we mined the block then we want to broadcast the compact block. // If we mined the block then we want to broadcast the compact block.
// If we received the block from another node then broadcast "header first" // If we received the block from another node then broadcast "header first"
@ -743,12 +743,8 @@ where
// Reconcile the txpool against the new block *after* we have broadcast it too our peers. // Reconcile the txpool against the new block *after* we have broadcast it too our peers.
// This may be slow and we do not want to delay block propagation. // This may be slow and we do not want to delay block propagation.
// We only want to reconcile the txpool against the new block *if* total work has increased. // We only want to reconcile the txpool against the new block *if* total work has increased.
let is_reorg = if let BlockStatus::Reorg(_) = status {
true if status.is_next() || status.is_reorg() {
} else {
false
};
if status == BlockStatus::Next || is_reorg {
let mut tx_pool = self.tx_pool.write(); let mut tx_pool = self.tx_pool.write();
let _ = tx_pool.reconcile_block(b); let _ = tx_pool.reconcile_block(b);
@ -758,7 +754,7 @@ where
tx_pool.truncate_reorg_cache(cutoff); tx_pool.truncate_reorg_cache(cutoff);
} }
if is_reorg { if status.is_reorg() {
let _ = self.tx_pool.write().reconcile_reorg_cache(&b.header); let _ = self.tx_pool.write().reconcile_reorg_cache(&b.header);
} }
} }

View file

@ -76,7 +76,7 @@ pub trait NetEvents {
/// Trait to be implemented by Chain Event Hooks /// Trait to be implemented by Chain Event Hooks
pub trait ChainEvents { pub trait ChainEvents {
/// Triggers when a new block is accepted by the chain (might be a Reorg or a Fork) /// Triggers when a new block is accepted by the chain (might be a Reorg or a Fork)
fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) {} fn on_block_accepted(&self, block: &core::Block, status: BlockStatus) {}
} }
/// Basic Logger /// Basic Logger
@ -116,31 +116,45 @@ impl NetEvents for EventLogger {
} }
impl ChainEvents for EventLogger { impl ChainEvents for EventLogger {
fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) { fn on_block_accepted(&self, block: &core::Block, status: BlockStatus) {
match status { match status {
BlockStatus::Reorg(depth) => { BlockStatus::Reorg {
prev_head,
fork_point,
} => {
warn!( warn!(
"block_accepted (REORG!): {:?} at {} (depth: {}, diff: {})", "block_accepted (REORG!): {} at {}, (prev_head: {} at {}, fork_point: {} at {}, depth: {})",
block.hash(), block.hash(),
block.header.height, block.header.height,
depth, prev_head.hash(),
block.header.total_difficulty(), prev_head.height,
fork_point.hash(),
fork_point.height,
block.header.height.saturating_sub(fork_point.height + 1),
); );
} }
BlockStatus::Fork => { BlockStatus::Fork {
prev_head,
fork_point,
} => {
debug!( debug!(
"block_accepted (fork?): {:?} at {} (diff: {})", "block_accepted (fork?): {} at {}, (prev_head: {} at {}, fork_point: {} at {}, depth: {})",
block.hash(), block.hash(),
block.header.height, block.header.height,
block.header.total_difficulty(), prev_head.hash(),
prev_head.height,
fork_point.hash(),
fork_point.height,
block.header.height.saturating_sub(fork_point.height + 1),
); );
} }
BlockStatus::Next => { BlockStatus::Next { prev_head } => {
debug!( debug!(
"block_accepted (head+): {:?} at {} (diff: {})", "block_accepted (head+): {} at {} (prev_head: {} at {})",
block.hash(), block.hash(),
block.header.height, block.header.height,
block.header.total_difficulty(), prev_head.hash(),
prev_head.height,
); );
} }
} }
@ -262,20 +276,20 @@ impl WebHook {
} }
impl ChainEvents for WebHook { impl ChainEvents for WebHook {
fn on_block_accepted(&self, block: &core::Block, status: &BlockStatus) { fn on_block_accepted(&self, block: &core::Block, status: BlockStatus) {
let status_str = match status { let status_str = match status {
BlockStatus::Reorg(_) => "reorg", BlockStatus::Reorg { .. } => "reorg",
BlockStatus::Fork => "fork", BlockStatus::Fork { .. } => "fork",
BlockStatus::Next => "head", BlockStatus::Next { .. } => "head",
}; };
// Add additional `depth` field to the JSON in case of reorg // Add additional `depth` field to the JSON in case of reorg
let payload = if let BlockStatus::Reorg(depth) = status { let payload = if let BlockStatus::Reorg { fork_point, .. } = status {
let depth = block.header.height.saturating_sub(fork_point.height + 1);
json!({ json!({
"hash": block.header.hash().to_hex(), "hash": block.header.hash().to_hex(),
"status": status_str, "status": status_str,
"data": block, "data": block,
"depth": depth "depth": depth
}) })
} else { } else {