mirror of
https://github.com/mimblewimble/grin.git
synced 2025-05-07 01:31:15 +03:00
Console debug fixes (#861)
* move some debug! to trace! * more informative debugs * standardising on always showing chain tips as "cumulative difficulty @ height [hash]" * make 2 debug outputs into a single * "no peers" as warning (not info) to let it stand out more clearly * move fn param (used only in this one debug line) * clarify difficulty "units"
This commit is contained in:
parent
bb134758e7
commit
a8fc82d8c1
13 changed files with 53 additions and 51 deletions
chain/src
grin/src
p2p/src
wallet/src
|
@ -204,7 +204,13 @@ impl Chain {
|
|||
// Make sure sync_head is available for later use when needed.
|
||||
store.reset_head()?;
|
||||
|
||||
debug!(LOGGER, "Chain init: {:?}", head);
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Chain init: {} @ {} [{}]",
|
||||
head.total_difficulty.into_num(),
|
||||
head.height,
|
||||
head.last_block_h
|
||||
);
|
||||
|
||||
Ok(Chain {
|
||||
db_root: db_root,
|
||||
|
@ -370,7 +376,7 @@ impl Chain {
|
|||
|
||||
/// Check for orphans, once a block is successfully added
|
||||
pub fn check_orphans(&self, mut last_block_hash: Hash) {
|
||||
debug!(
|
||||
trace!(
|
||||
LOGGER,
|
||||
"chain: check_orphans: # orphans {}",
|
||||
self.orphans.len(),
|
||||
|
|
|
@ -97,7 +97,7 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
|
|||
// internal validation and saving operations
|
||||
let result = txhashset::extending(&mut txhashset, |mut extension| {
|
||||
validate_block(b, &mut ctx, &mut extension)?;
|
||||
debug!(
|
||||
trace!(
|
||||
LOGGER,
|
||||
"pipe: process_block: {} at {} is valid, save and append.",
|
||||
b.hash(),
|
||||
|
|
|
@ -253,7 +253,7 @@ where
|
|||
{
|
||||
let commit_index = trees.commit_index.clone();
|
||||
|
||||
debug!(LOGGER, "Starting new txhashset extension.");
|
||||
trace!(LOGGER, "Starting new txhashset extension.");
|
||||
let mut extension = Extension::new(trees, commit_index);
|
||||
res = inner(&mut extension);
|
||||
|
||||
|
@ -273,12 +273,12 @@ where
|
|||
}
|
||||
Ok(r) => {
|
||||
if rollback {
|
||||
debug!(LOGGER, "Rollbacking txhashset extension.");
|
||||
debug!(LOGGER, "Rollbacking txhashset extension. sizes {:?}", sizes);
|
||||
trees.output_pmmr_h.backend.discard();
|
||||
trees.rproof_pmmr_h.backend.discard();
|
||||
trees.kernel_pmmr_h.backend.discard();
|
||||
} else {
|
||||
debug!(LOGGER, "Committing txhashset extension.");
|
||||
debug!(LOGGER, "Committing txhashset extension. sizes {:?}", sizes);
|
||||
trees.output_pmmr_h.backend.sync()?;
|
||||
trees.rproof_pmmr_h.backend.sync()?;
|
||||
trees.kernel_pmmr_h.backend.sync()?;
|
||||
|
@ -287,7 +287,7 @@ where
|
|||
trees.kernel_pmmr_h.last_pos = sizes.2;
|
||||
}
|
||||
|
||||
debug!(LOGGER, "TxHashSet extension done.");
|
||||
trace!(LOGGER, "TxHashSet extension done.");
|
||||
Ok(r)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ use adapters::PoolToChainAdapter;
|
|||
use core::consensus;
|
||||
use core::core;
|
||||
use core::core::Proof;
|
||||
use core::core::target::Difficulty;
|
||||
use core::core::{Block, BlockHeader, Transaction};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use pow::{cuckoo, MiningWorker};
|
||||
|
@ -130,7 +129,6 @@ impl Miner {
|
|||
pub fn inner_loop_async(
|
||||
&self,
|
||||
plugin_miner: &mut PluginMiner,
|
||||
difficulty: Difficulty,
|
||||
b: &mut Block,
|
||||
cuckoo_size: u32,
|
||||
head: &BlockHeader,
|
||||
|
@ -140,12 +138,13 @@ impl Miner {
|
|||
) -> Option<Proof> {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.",
|
||||
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
|
||||
self.debug_output_id,
|
||||
cuckoo_size,
|
||||
attempt_time_per_block,
|
||||
b.header.total_difficulty,
|
||||
b.header.height,
|
||||
b.header.total_difficulty
|
||||
b.header.hash()
|
||||
);
|
||||
|
||||
// look for a pow for at most attempt_time_per_block sec on the
|
||||
|
@ -175,10 +174,10 @@ impl Miner {
|
|||
let proof_diff = proof.clone().to_difficulty();
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})",
|
||||
"Found cuckoo solution! nonce {} gave difficulty {} (block diff {})",
|
||||
s.get_nonce_as_u64(),
|
||||
proof_diff.into_num(),
|
||||
difficulty.into_num()
|
||||
(b.header.total_difficulty.clone() - head.total_difficulty.clone()).into_num()
|
||||
);
|
||||
if proof_diff > (b.header.total_difficulty.clone() - head.total_difficulty.clone())
|
||||
{
|
||||
|
@ -270,13 +269,14 @@ impl Miner {
|
|||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"(Server ID: {}) Mining at Cuckoo{} for {} secs (will wait for last solution) \
|
||||
on block {} at difficulty {}.",
|
||||
"(Server ID: {}) Mining Cuckoo{} for max {}s (will wait for last solution) \
|
||||
on {} @ {} [{}].",
|
||||
self.debug_output_id,
|
||||
cuckoo_size,
|
||||
attempt_time_per_block,
|
||||
latest_hash,
|
||||
b.header.total_difficulty
|
||||
b.header.total_difficulty,
|
||||
b.header.height,
|
||||
latest_hash
|
||||
);
|
||||
let mut iter_count = 0;
|
||||
|
||||
|
@ -296,7 +296,7 @@ impl Miner {
|
|||
let proof_diff = proof.clone().to_difficulty();
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Found cuckoo solution for nonce {} of difficulty {} (difficulty target {})",
|
||||
"Found cuckoo solution for nonce {} of difficulty {} (cumulative diff {})",
|
||||
b.header.nonce,
|
||||
proof_diff.into_num(),
|
||||
b.header.total_difficulty.into_num()
|
||||
|
@ -388,12 +388,13 @@ impl Miner {
|
|||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs on block {} at difficulty {}.",
|
||||
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
|
||||
self.debug_output_id,
|
||||
cuckoo_size,
|
||||
attempt_time_per_block,
|
||||
latest_hash,
|
||||
b.header.total_difficulty
|
||||
b.header.total_difficulty,
|
||||
b.header.height,
|
||||
latest_hash
|
||||
);
|
||||
let mut iter_count = 0;
|
||||
|
||||
|
@ -486,8 +487,7 @@ impl Miner {
|
|||
}
|
||||
|
||||
loop {
|
||||
debug!(LOGGER, "in miner loop...");
|
||||
trace!(LOGGER, "key_id: {:?}", key_id);
|
||||
trace!(LOGGER, "in miner loop. key_id: {:?}", key_id);
|
||||
|
||||
// get the latest chain state and build a block on top of it
|
||||
let head = self.chain.head_header().unwrap();
|
||||
|
@ -526,7 +526,6 @@ impl Miner {
|
|||
if use_async {
|
||||
sol = self.inner_loop_async(
|
||||
&mut p,
|
||||
(b.header.total_difficulty.clone() - head.total_difficulty.clone()),
|
||||
&mut b,
|
||||
cuckoo_size,
|
||||
&head,
|
||||
|
@ -575,7 +574,7 @@ impl Miner {
|
|||
e
|
||||
);
|
||||
}
|
||||
debug!(LOGGER, "resetting key_id in miner to None");
|
||||
trace!(LOGGER, "resetting key_id in miner to None");
|
||||
key_id = None;
|
||||
} else {
|
||||
debug!(
|
||||
|
@ -630,7 +629,7 @@ impl Miner {
|
|||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"(Server ID: {}) Built new block with {} inputs and {} outputs, network difficulty: {}, block cumulative difficulty {}",
|
||||
"(Server ID: {}) Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
|
||||
self.debug_output_id,
|
||||
b.inputs.len(),
|
||||
b.outputs.len(),
|
||||
|
|
|
@ -87,14 +87,6 @@ fn monitor_peers(
|
|||
// regularly check if we need to acquire more peers and if so, gets
|
||||
// them from db
|
||||
let total_count = peers.all_peers().len();
|
||||
debug!(
|
||||
LOGGER,
|
||||
"monitor_peers: {} most_work_peers, {} connected, {} total known",
|
||||
peers.most_work_peers().len(),
|
||||
peers.connected_peers().len(),
|
||||
total_count,
|
||||
);
|
||||
|
||||
let mut healthy_count = 0;
|
||||
let mut banned_count = 0;
|
||||
let mut defunct_count = 0;
|
||||
|
@ -120,7 +112,10 @@ fn monitor_peers(
|
|||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"monitor_peers: all {} = {} healthy + {} banned + {} defunct",
|
||||
"monitor_peers: {} connected ({} most_work). \
|
||||
all {} = {} healthy + {} banned + {} defunct",
|
||||
peers.connected_peers().len(),
|
||||
peers.most_work_peers().len(),
|
||||
total_count,
|
||||
healthy_count,
|
||||
banned_count,
|
||||
|
@ -214,7 +209,7 @@ fn listen_for_addrs(
|
|||
let connect_peer = p2p.connect(&addr);
|
||||
match connect_peer {
|
||||
Ok(p) => {
|
||||
debug!(LOGGER, "connect_and_req: ok. attempting send_peer_request");
|
||||
trace!(LOGGER, "connect_and_req: ok. attempting send_peer_request");
|
||||
if let Ok(p) = p.try_read() {
|
||||
let _ = p.send_peer_request(capab);
|
||||
}
|
||||
|
|
|
@ -266,11 +266,13 @@ fn needs_syncing(
|
|||
);
|
||||
|
||||
if peer.info.total_difficulty <= local_diff {
|
||||
let ch = chain.head().unwrap();
|
||||
info!(
|
||||
LOGGER,
|
||||
"synchronized at {:?} @ {:?}",
|
||||
local_diff,
|
||||
chain.head().unwrap().height
|
||||
"synchronised at {} @ {} [{}]",
|
||||
local_diff.into_num(),
|
||||
ch.height,
|
||||
ch.last_block_h
|
||||
);
|
||||
if !header_only {
|
||||
let _ = chain.reset_head();
|
||||
|
@ -279,7 +281,7 @@ fn needs_syncing(
|
|||
}
|
||||
}
|
||||
} else {
|
||||
info!(LOGGER, "sync: no peers available, disabling sync");
|
||||
warn!(LOGGER, "sync: no peers available, disabling sync");
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -206,7 +206,7 @@ fn poll<H>(
|
|||
// check the read end
|
||||
if let Some(h) = try_break!(error_tx, read_header(conn)) {
|
||||
let msg = Message::from_header(h, conn);
|
||||
debug!(
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Received message header, type {:?}, len {}.",
|
||||
msg.header.msg_type,
|
||||
|
|
|
@ -174,7 +174,7 @@ impl Handshake {
|
|||
};
|
||||
|
||||
write_message(conn, shake, Type::Shake)?;
|
||||
debug!(LOGGER, "Success handshake with {}.", peer_info.addr);
|
||||
trace!(LOGGER, "Success handshake with {}.", peer_info.addr);
|
||||
|
||||
// when more than one protocol version is supported, choosing should go here
|
||||
Ok(peer_info)
|
||||
|
|
|
@ -161,7 +161,7 @@ impl Peer {
|
|||
/// if the remote peer is known to already have the block.
|
||||
pub fn send_block(&self, b: &core::Block) -> Result<(), Error> {
|
||||
if !self.tracking_adapter.has(b.hash()) {
|
||||
debug!(LOGGER, "Send block {} to {}", b.hash(), self.info.addr);
|
||||
trace!(LOGGER, "Send block {} to {}", b.hash(), self.info.addr);
|
||||
self.connection.as_ref().unwrap().send(b, msg::Type::Block)
|
||||
} else {
|
||||
debug!(
|
||||
|
@ -176,7 +176,7 @@ impl Peer {
|
|||
|
||||
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<(), Error> {
|
||||
if !self.tracking_adapter.has(b.hash()) {
|
||||
debug!(
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Send compact block {} to {}",
|
||||
b.hash(),
|
||||
|
@ -205,7 +205,7 @@ impl Peer {
|
|||
.unwrap()
|
||||
.send(bh, msg::Type::Header)
|
||||
} else {
|
||||
debug!(
|
||||
trace!(
|
||||
LOGGER,
|
||||
"Suppress header send {} to {} (already seen)",
|
||||
bh.hash(),
|
||||
|
|
|
@ -331,7 +331,7 @@ impl Peers {
|
|||
}
|
||||
}
|
||||
}
|
||||
debug!(
|
||||
trace!(
|
||||
LOGGER,
|
||||
"broadcast_header: {}, {} at {}, to {} peers, done.",
|
||||
bh.hash(),
|
||||
|
@ -600,7 +600,7 @@ impl NetAdapter for Peers {
|
|||
|
||||
/// A list of peers has been received from one of our peers.
|
||||
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
|
||||
debug!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len());
|
||||
trace!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len());
|
||||
for pa in peer_addrs {
|
||||
if let Ok(e) = self.exists_peer(pa) {
|
||||
if e {
|
||||
|
|
|
@ -75,7 +75,7 @@ impl MessageHandler for Protocol {
|
|||
|
||||
Type::GetBlock => {
|
||||
let h: Hash = msg.body()?;
|
||||
debug!(LOGGER, "handle_payload: GetBlock {}", h);
|
||||
trace!(LOGGER, "handle_payload: GetBlock {}", h);
|
||||
|
||||
let bo = adapter.get_block(h);
|
||||
if let Some(b) = bo {
|
||||
|
@ -88,7 +88,7 @@ impl MessageHandler for Protocol {
|
|||
let b: core::Block = msg.body()?;
|
||||
let bh = b.hash();
|
||||
|
||||
debug!(LOGGER, "handle_payload: Block {}", bh);
|
||||
trace!(LOGGER, "handle_payload: Block {}", bh);
|
||||
|
||||
adapter.block_received(b, self.addr);
|
||||
Ok(None)
|
||||
|
|
|
@ -130,7 +130,7 @@ impl Server {
|
|||
return Ok(p);
|
||||
}
|
||||
|
||||
debug!(LOGGER, "connect_peer: connecting to {}", addr);
|
||||
trace!(LOGGER, "connect_peer: connecting to {}", addr);
|
||||
match TcpStream::connect_timeout(addr, Duration::from_secs(10)) {
|
||||
Ok(mut stream) => {
|
||||
let addr = SocketAddr::new(self.config.host, self.config.port);
|
||||
|
|
|
@ -525,7 +525,7 @@ impl WalletData {
|
|||
info!(LOGGER, "Acquiring wallet lock ...");
|
||||
|
||||
let action = || {
|
||||
debug!(LOGGER, "Attempting to acquire wallet lock");
|
||||
trace!(LOGGER, "making lock file for wallet lock");
|
||||
OpenOptions::new()
|
||||
.write(true)
|
||||
.create_new(true)
|
||||
|
|
Loading…
Add table
Reference in a new issue