Making initial sync easier on the eyes [master] (#623)

* Making initial sync easier on the eyes:
- display "chain pointers" as: cumulative @ height [hash]
- clarify and line up to make the "pointers" easy to compare
- make every 100th block show info on debug level info, else as before
This commit is contained in:
Simon B 2018-01-18 23:47:42 +01:00 committed by Ignotus Peverell
parent 6b0f1fc20e
commit 885c2d73ea
3 changed files with 33 additions and 35 deletions

View file

@ -175,12 +175,14 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
} }
if !ctx.opts.intersects(SKIP_POW) { if !ctx.opts.intersects(SKIP_POW) {
let cycle_size = global::sizeshift(); let n = global::sizeshift() as u32;
if !(ctx.pow_verifier)(header, n) {
debug!(LOGGER, "pipe: validate_header cuckoo size {}", cycle_size); error!(LOGGER, "pipe: validate_header failed for cuckoo shift size {}", n);
if !(ctx.pow_verifier)(header, cycle_size as u32) {
return Err(Error::InvalidPow); return Err(Error::InvalidPow);
} }
if header.height % 500 == 0 {
debug!(LOGGER, "Validating header validated, using cuckoo shift size {}", n);
}
} }
// first I/O cost, better as late as possible // first I/O cost, better as late as possible
@ -333,16 +335,12 @@ fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error>
.map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?; .map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?;
} }
ctx.head = tip.clone(); ctx.head = tip.clone();
debug!( if b.header.height % 100 == 0 {
LOGGER,
"pipe: update_head: {}, {} at {}",
b.hash(),
b.header.total_difficulty,
b.header.height
);
if b.header.height % 500 == 0 {
info!(LOGGER, "pipe: chain head reached {} @ {} [{}]", info!(LOGGER, "pipe: chain head reached {} @ {} [{}]",
b.header.height, b.header.difficulty, b.hash()); b.header.height, b.header.difficulty, b.hash());
} else {
debug!(LOGGER, "pipe: chain head reached {} @ {} [{}]",
b.header.height, b.header.difficulty, b.hash());
} }
Ok(Some(tip)) Ok(Some(tip))
} else { } else {
@ -357,15 +355,10 @@ fn update_sync_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<T
.save_sync_head(&tip) .save_sync_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?; .map_err(|e| Error::StoreErr(e, "pipe save sync head".to_owned()))?;
ctx.head = tip.clone(); ctx.head = tip.clone();
debug!( if bh.height % 100 == 0 {
LOGGER, info!(LOGGER, "sync head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash());
"pipe: update_sync_head: {}, {} at {}", } else {
bh.hash(), debug!(LOGGER, "sync head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash());
bh.total_difficulty,
bh.height,
);
if bh.height % 1000 == 0 {
info!(LOGGER, "pipe: sync head reached {} [{}]", bh.height, bh.hash());
} }
Ok(Some(tip)) Ok(Some(tip))
} }
@ -378,13 +371,11 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
.save_header_head(&tip) .save_header_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?; .map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
ctx.head = tip.clone(); ctx.head = tip.clone();
debug!( if bh.height % 100 == 0 {
LOGGER, info!(LOGGER, "header head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash());
"pipe: update_header_head: {}, {} at {}", } else {
bh.hash(), debug!(LOGGER, "header head {} @ {} [{}]", bh.total_difficulty, bh.height, bh.hash());
bh.total_difficulty, }
bh.height,
);
Ok(Some(tip)) Ok(Some(tip))
} else { } else {
Ok(None) Ok(None)
@ -420,10 +411,10 @@ pub fn rewind_and_apply_fork(
debug!( debug!(
LOGGER, LOGGER,
"validate_block: forked_block: {} at {}", "rewind_and_apply_fork @ {} [{}]",
forked_block.header.hash(),
forked_block.header.height, forked_block.header.height,
); forked_block.header.hash(),
);
// rewind the sum trees up to the forking block // rewind the sum trees up to the forking block
ext.rewind(&forked_block)?; ext.rewind(&forked_block)?;

View file

@ -167,9 +167,9 @@ impl Miner {
); );
// look for a pow for at most attempt_time_per_block sec on the // look for a pow for at most attempt_time_per_block sec on the
// same block (to give a chance to new // same block (to give a chance to new
// transactions) and as long as the head hasn't changed // transactions) and as long as the head hasn't changed
// Will change this to something else at some point // Will change this to something else at some point
let deadline = time::get_time().sec + attempt_time_per_block as i64; let deadline = time::get_time().sec + attempt_time_per_block as i64;
// how often to output stats // how often to output stats

View file

@ -106,7 +106,14 @@ impl Handshake {
total_difficulty: shake.total_difficulty, total_difficulty: shake.total_difficulty,
}; };
debug!(LOGGER, "Connected to peer {:?}", peer_info); debug!(
LOGGER,
"Connected! Cumulative {} offered from {:?} {:?} {:?}",
peer_info.total_difficulty.into_num(),
peer_info.addr,
peer_info.user_agent,
peer_info.capabilities
);
// when more than one protocol version is supported, choosing should go here // when more than one protocol version is supported, choosing should go here
Ok((conn, ProtocolV1::new(), peer_info)) Ok((conn, ProtocolV1::new(), peer_info))
} }