Replace logging backend to log4rs and add log rotation (#1789)

* Replace logging backend to flexi-logger and add log rotation
* Changed flexi_logger to log4rs
* Disable logging level filtering in Root logger
* Support different logging levels for file and stdout
* Don't log messages from modules other than Grin-related
* Fix formatting
* Place backed up compressed log copies into log file directory
* Increase default log file size to 16 MiB
* Add comment to config file on log_max_size option
This commit is contained in:
eupn 2018-10-21 23:30:56 +03:00 committed by Ignotus Peverell
parent 0852b0c4cf
commit 1195071f5b
83 changed files with 582 additions and 897 deletions

View file

@ -24,7 +24,7 @@ humansize = "1.1.0"
daemonize = "0.3" daemonize = "0.3"
serde = "1" serde = "1"
serde_json = "1" serde_json = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
term = "0.5" term = "0.5"
grin_api = { path = "./api" } grin_api = { path = "./api" }

View file

@ -15,7 +15,7 @@ ring = "0.13"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
serde_json = "1" serde_json = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
tokio = "0.1.7" tokio = "0.1.7"
tokio-core = "0.1.17" tokio-core = "0.1.17"
tokio-tcp = "0.1" tokio-tcp = "0.1"

View file

@ -37,7 +37,6 @@ use types::*;
use url::form_urlencoded; use url::form_urlencoded;
use util; use util;
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
use util::LOGGER;
use web::*; use web::*;
// All handlers use `Weak` references instead of `Arc` to avoid cycles that // All handlers use `Weak` references instead of `Arc` to avoid cycles that
@ -206,12 +205,8 @@ impl OutputHandler {
} }
debug!( debug!(
LOGGER,
"outputs_block_batch: {}-{}, {:?}, {:?}", "outputs_block_batch: {}-{}, {:?}, {:?}",
start_height, start_height, end_height, commitments, include_rp,
end_height,
commitments,
include_rp,
); );
let mut return_vec = vec![]; let mut return_vec = vec![];
@ -745,7 +740,6 @@ impl PoolPushHandler {
identifier: "?.?.?.?".to_string(), identifier: "?.?.?.?".to_string(),
}; };
info!( info!(
LOGGER,
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})", "Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
tx.hash(), tx.hash(),
tx.inputs().len(), tx.inputs().len(),
@ -759,7 +753,7 @@ impl PoolPushHandler {
tx_pool tx_pool
.add_to_pool(source, tx, !fluff, &header) .add_to_pool(source, tx, !fluff, &header)
.map_err(|e| { .map_err(|e| {
error!(LOGGER, "update_pool: failed with error: {:?}", e); error!("update_pool: failed with error: {:?}", e);
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into() ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
}) })
}), }),
@ -808,7 +802,7 @@ pub fn start_rest_apis(
router.add_middleware(basic_auth_middleware); router.add_middleware(basic_auth_middleware);
} }
info!(LOGGER, "Starting HTTP API server at {}.", addr); info!("Starting HTTP API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address"); let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
apis.start(socket_addr, router, tls_config).is_ok() apis.start(socket_addr, router, tls_config).is_ok()
} }

View file

@ -33,7 +33,7 @@ extern crate serde;
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate futures; extern crate futures;
extern crate http; extern crate http;
extern crate hyper_rustls; extern crate hyper_rustls;

View file

@ -33,7 +33,6 @@ use std::sync::Arc;
use std::{io, thread}; use std::{io, thread};
use tokio_rustls::ServerConfigExt; use tokio_rustls::ServerConfigExt;
use tokio_tcp; use tokio_tcp;
use util::LOGGER;
/// Errors that can be returned by an ApiEndpoint implementation. /// Errors that can be returned by an ApiEndpoint implementation.
#[derive(Debug)] #[derive(Debug)]
@ -243,13 +242,10 @@ impl ApiServer {
// TODO re-enable stop after investigation // TODO re-enable stop after investigation
//let tx = mem::replace(&mut self.shutdown_sender, None).unwrap(); //let tx = mem::replace(&mut self.shutdown_sender, None).unwrap();
//tx.send(()).expect("Failed to stop API server"); //tx.send(()).expect("Failed to stop API server");
info!(LOGGER, "API server has been stoped"); info!("API server has been stoped");
true true
} else { } else {
error!( error!("Can't stop API server, it's not running or doesn't spport stop operation");
LOGGER,
"Can't stop API server, it's not running or doesn't spport stop operation"
);
false false
} }
} }
@ -263,7 +259,7 @@ impl Handler for LoggingMiddleware {
req: Request<Body>, req: Request<Body>,
mut handlers: Box<Iterator<Item = HandlerObj>>, mut handlers: Box<Iterator<Item = HandlerObj>>,
) -> ResponseFuture { ) -> ResponseFuture {
debug!(LOGGER, "REST call: {} {}", req.method(), req.uri().path()); debug!("REST call: {} {}", req.method(), req.uri().path());
handlers.next().unwrap().call(req, handlers) handlers.next().unwrap().call(req, handlers)
} }
} }

View file

@ -12,7 +12,7 @@ lmdb-zero = "0.4.4"
failure = "0.1" failure = "0.1"
failure_derive = "0.1" failure_derive = "0.1"
croaring = "0.3" croaring = "0.3"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
chrono = "0.4.4" chrono = "0.4.4"

View file

@ -38,7 +38,6 @@ use store;
use txhashset; use txhashset;
use types::{ChainAdapter, NoStatus, Options, Tip, TxHashSetRoots, TxHashsetWriteStatus}; use types::{ChainAdapter, NoStatus, Options, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use util::secp::pedersen::{Commitment, RangeProof}; use util::secp::pedersen::{Commitment, RangeProof};
use util::LOGGER;
/// Orphan pool size is limited by MAX_ORPHAN_SIZE /// Orphan pool size is limited by MAX_ORPHAN_SIZE
pub const MAX_ORPHAN_SIZE: usize = 200; pub const MAX_ORPHAN_SIZE: usize = 200;
@ -184,7 +183,6 @@ impl Chain {
let head = store.head()?; let head = store.head()?;
debug!( debug!(
LOGGER,
"Chain init: {} @ {} [{}]", "Chain init: {} @ {} [{}]",
head.total_difficulty.to_num(), head.total_difficulty.to_num(),
head.height, head.height,
@ -261,7 +259,6 @@ impl Chain {
&self.orphans.add(orphan); &self.orphans.add(orphan);
debug!( debug!(
LOGGER,
"process_block: orphan: {:?}, # orphans {}{}", "process_block: orphan: {:?}, # orphans {}{}",
block_hash, block_hash,
self.orphans.len(), self.orphans.len(),
@ -275,7 +272,6 @@ impl Chain {
} }
ErrorKind::Unfit(ref msg) => { ErrorKind::Unfit(ref msg) => {
debug!( debug!(
LOGGER,
"Block {} at {} is unfit at this time: {}", "Block {} at {} is unfit at this time: {}",
b.hash(), b.hash(),
b.header.height, b.header.height,
@ -285,7 +281,6 @@ impl Chain {
} }
_ => { _ => {
info!( info!(
LOGGER,
"Rejected block {} at {}: {:?}", "Rejected block {} at {}: {:?}",
b.hash(), b.hash(),
b.header.height, b.header.height,
@ -360,7 +355,6 @@ impl Chain {
// Is there an orphan in our orphans that we can now process? // Is there an orphan in our orphans that we can now process?
loop { loop {
trace!( trace!(
LOGGER,
"check_orphans: at {}, # orphans {}", "check_orphans: at {}, # orphans {}",
height, height,
self.orphans.len(), self.orphans.len(),
@ -373,7 +367,6 @@ impl Chain {
let orphans_len = orphans.len(); let orphans_len = orphans.len();
for (i, orphan) in orphans.into_iter().enumerate() { for (i, orphan) in orphans.into_iter().enumerate() {
debug!( debug!(
LOGGER,
"check_orphans: get block {} at {}{}", "check_orphans: get block {} at {}{}",
orphan.block.hash(), orphan.block.hash(),
height, height,
@ -402,7 +395,6 @@ impl Chain {
if initial_height != height { if initial_height != height {
debug!( debug!(
LOGGER,
"check_orphans: {} blocks accepted since height {}, remaining # orphans {}", "check_orphans: {} blocks accepted since height {}, remaining # orphans {}",
height - initial_height, height - initial_height,
initial_height, initial_height,
@ -589,7 +581,6 @@ impl Chain {
txhashset: &txhashset::TxHashSet, txhashset: &txhashset::TxHashSet,
) -> Result<(), Error> { ) -> Result<(), Error> {
debug!( debug!(
LOGGER,
"chain: validate_kernel_history: rewinding and validating kernel history (readonly)" "chain: validate_kernel_history: rewinding and validating kernel history (readonly)"
); );
@ -606,8 +597,8 @@ impl Chain {
})?; })?;
debug!( debug!(
LOGGER, "chain: validate_kernel_history: validated kernel root on {} headers",
"chain: validate_kernel_history: validated kernel root on {} headers", count, count,
); );
Ok(()) Ok(())
@ -682,10 +673,7 @@ impl Chain {
self.validate_kernel_history(&header, &txhashset)?; self.validate_kernel_history(&header, &txhashset)?;
// all good, prepare a new batch and update all the required records // all good, prepare a new batch and update all the required records
debug!( debug!("chain: txhashset_write: rewinding a 2nd time (writeable)");
LOGGER,
"chain: txhashset_write: rewinding a 2nd time (writeable)"
);
let mut batch = self.store.batch()?; let mut batch = self.store.batch()?;
@ -709,10 +697,7 @@ impl Chain {
Ok(()) Ok(())
})?; })?;
debug!( debug!("chain: txhashset_write: finished validating and rebuilding");
LOGGER,
"chain: txhashset_write: finished validating and rebuilding"
);
status.on_save(); status.on_save();
@ -727,10 +712,7 @@ impl Chain {
// Commit all the changes to the db. // Commit all the changes to the db.
batch.commit()?; batch.commit()?;
debug!( debug!("chain: txhashset_write: finished committing the batch (head etc.)");
LOGGER,
"chain: txhashset_write: finished committing the batch (head etc.)"
);
// Replace the chain txhashset with the newly built one. // Replace the chain txhashset with the newly built one.
{ {
@ -738,10 +720,7 @@ impl Chain {
*txhashset_ref = txhashset; *txhashset_ref = txhashset;
} }
debug!( debug!("chain: txhashset_write: replaced our txhashset with the new one");
LOGGER,
"chain: txhashset_write: replaced our txhashset with the new one"
);
// Check for any orphan blocks and process them based on the new chain state. // Check for any orphan blocks and process them based on the new chain state.
self.check_orphans(header.height + 1); self.check_orphans(header.height + 1);
@ -763,14 +742,11 @@ impl Chain {
/// therefore be called judiciously. /// therefore be called judiciously.
pub fn compact(&self) -> Result<(), Error> { pub fn compact(&self) -> Result<(), Error> {
if self.archive_mode { if self.archive_mode {
debug!( debug!("Blockchain compaction disabled, node running in archive mode.");
LOGGER,
"Blockchain compaction disabled, node running in archive mode."
);
return Ok(()); return Ok(());
} }
debug!(LOGGER, "Starting blockchain compaction."); debug!("Starting blockchain compaction.");
// Compact the txhashset via the extension. // Compact the txhashset via the extension.
{ {
let mut txhashset = self.txhashset.write(); let mut txhashset = self.txhashset.write();
@ -785,7 +761,7 @@ impl Chain {
// Now check we can still successfully validate the chain state after // Now check we can still successfully validate the chain state after
// compacting, shouldn't be necessary once all of this is well-oiled // compacting, shouldn't be necessary once all of this is well-oiled
debug!(LOGGER, "Validating state after compaction."); debug!("Validating state after compaction.");
self.validate(true)?; self.validate(true)?;
// we need to be careful here in testing as 20 blocks is not that long // we need to be careful here in testing as 20 blocks is not that long
@ -798,7 +774,6 @@ impl Chain {
} }
debug!( debug!(
LOGGER,
"Compaction remove blocks older than {}.", "Compaction remove blocks older than {}.",
head.height - horizon head.height - horizon
); );
@ -831,7 +806,7 @@ impl Chain {
} }
} }
batch.commit()?; batch.commit()?;
debug!(LOGGER, "Compaction removed {} blocks, done.", count); debug!("Compaction removed {} blocks, done.", count);
Ok(()) Ok(())
} }
@ -1052,7 +1027,6 @@ fn setup_head(
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err() if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
{ {
debug!( debug!(
LOGGER,
"chain: init: building (missing) block sums for {} @ {}", "chain: init: building (missing) block sums for {} @ {}",
header.height, header.height,
header.hash() header.hash()
@ -1073,7 +1047,6 @@ fn setup_head(
} }
debug!( debug!(
LOGGER,
"chain: init: rewinding and validating before we start... {} at {}", "chain: init: rewinding and validating before we start... {} at {}",
header.hash(), header.hash(),
header.height, header.height,
@ -1110,7 +1083,7 @@ fn setup_head(
// Save the block_sums to the db for use later. // Save the block_sums to the db for use later.
batch.save_block_sums(&genesis.hash(), &BlockSums::default())?; batch.save_block_sums(&genesis.hash(), &BlockSums::default())?;
info!(LOGGER, "chain: init: saved genesis: {:?}", genesis.hash()); info!("chain: init: saved genesis: {:?}", genesis.hash());
} }
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?, Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
}; };

View file

@ -30,7 +30,7 @@ extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
extern crate failure; extern crate failure;
#[macro_use] #[macro_use]

View file

@ -35,7 +35,6 @@ use grin_store;
use store; use store;
use txhashset; use txhashset;
use types::{Options, Tip}; use types::{Options, Tip};
use util::LOGGER;
/// Contextual information required to process a new block and either reject or /// Contextual information required to process a new block and either reject or
/// accept it. /// accept it.
@ -71,7 +70,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
// spend resources reading the full block when its header is invalid // spend resources reading the full block when its header is invalid
debug!( debug!(
LOGGER,
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels", "pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
b.hash(), b.hash(),
b.header.height, b.header.height,
@ -168,7 +166,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
})?; })?;
trace!( trace!(
LOGGER,
"pipe: process_block: {} at {} is valid, save and append.", "pipe: process_block: {} at {} is valid, save and append.",
b.hash(), b.hash(),
b.header.height, b.header.height,
@ -190,7 +187,6 @@ pub fn sync_block_headers(
) -> Result<Option<Tip>, Error> { ) -> Result<Option<Tip>, Error> {
if let Some(header) = headers.first() { if let Some(header) = headers.first() {
debug!( debug!(
LOGGER,
"pipe: sync_block_headers: {} headers from {} at {}", "pipe: sync_block_headers: {} headers from {} at {}",
headers.len(), headers.len(),
header.hash(), header.hash(),
@ -251,7 +247,6 @@ pub fn sync_block_headers(
/// it. /// it.
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> { pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
debug!( debug!(
LOGGER,
"pipe: process_block_header: {} at {}", "pipe: process_block_header: {} at {}",
header.hash(), header.hash(),
header.height, header.height,
@ -356,8 +351,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
// check version, enforces scheduled hard fork // check version, enforces scheduled hard fork
if !consensus::valid_header_version(header.height, header.version) { if !consensus::valid_header_version(header.height, header.version) {
error!( error!(
LOGGER, "Invalid block header version received ({}), maybe update Grin?",
"Invalid block header version received ({}), maybe update Grin?", header.version header.version
); );
return Err(ErrorKind::InvalidBlockVersion(header.version).into()); return Err(ErrorKind::InvalidBlockVersion(header.version).into());
} }
@ -378,8 +373,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
let edge_bits = header.pow.edge_bits(); let edge_bits = header.pow.edge_bits();
if !(ctx.pow_verifier)(header, edge_bits).is_ok() { if !(ctx.pow_verifier)(header, edge_bits).is_ok() {
error!( error!(
LOGGER, "pipe: error validating header with cuckoo edge_bits {}",
"pipe: error validating header with cuckoo edge_bits {}", edge_bits edge_bits
); );
return Err(ErrorKind::InvalidPow.into()); return Err(ErrorKind::InvalidPow.into());
} }
@ -434,7 +429,6 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
let next_header_info = consensus::next_difficulty(header.height, diff_iter); let next_header_info = consensus::next_difficulty(header.height, diff_iter);
if target_difficulty != next_header_info.difficulty { if target_difficulty != next_header_info.difficulty {
info!( info!(
LOGGER,
"validate_header: header target difficulty {} != {}", "validate_header: header target difficulty {} != {}",
target_difficulty.to_num(), target_difficulty.to_num(),
next_header_info.difficulty.to_num() next_header_info.difficulty.to_num()
@ -548,8 +542,8 @@ fn update_head(b: &Block, ctx: &BlockContext) -> Result<Option<Tip>, Error> {
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?; .map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
debug!( debug!(
LOGGER, "pipe: head updated to {} at {}",
"pipe: head updated to {} at {}", tip.last_block_h, tip.height tip.last_block_h, tip.height
); );
Ok(Some(tip)) Ok(Some(tip))
@ -569,7 +563,7 @@ fn update_sync_head(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Er
batch batch
.save_sync_head(&tip) .save_sync_head(&tip)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?; .map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
debug!(LOGGER, "sync head {} @ {}", bh.hash(), bh.height); debug!("sync head {} @ {}", bh.hash(), bh.height);
Ok(()) Ok(())
} }
@ -583,8 +577,8 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?; .map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
debug!( debug!(
LOGGER, "pipe: header_head updated to {} at {}",
"pipe: header_head updated to {} at {}", tip.last_block_h, tip.height tip.last_block_h, tip.height
); );
Ok(Some(tip)) Ok(Some(tip))
@ -616,7 +610,6 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Resul
let forked_header = ext.batch.get_block_header(&current)?; let forked_header = ext.batch.get_block_header(&current)?;
trace!( trace!(
LOGGER,
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]", "rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
forked_header.height, forked_header.height,
forked_header.hash(), forked_header.hash(),
@ -627,11 +620,7 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Resul
// Rewind the txhashset state back to the block where we forked from the most work chain. // Rewind the txhashset state back to the block where we forked from the most work chain.
ext.rewind(&forked_header)?; ext.rewind(&forked_header)?;
trace!( trace!("rewind_and_apply_fork: blocks on fork: {:?}", fork_hashes,);
LOGGER,
"rewind_and_apply_fork: blocks on fork: {:?}",
fork_hashes,
);
// Now re-apply all blocks on this fork. // Now re-apply all blocks on this fork.
for (_, h) in fork_hashes { for (_, h) in fork_hashes {

View file

@ -40,7 +40,7 @@ use grin_store::types::prune_noop;
use store::{Batch, ChainStore}; use store::{Batch, ChainStore};
use txhashset::{RewindableKernelView, UTXOView}; use txhashset::{RewindableKernelView, UTXOView};
use types::{Tip, TxHashSetRoots, TxHashsetWriteStatus}; use types::{Tip, TxHashSetRoots, TxHashsetWriteStatus};
use util::{file, secp_static, zip, LOGGER}; use util::{file, secp_static, zip};
const HEADERHASHSET_SUBDIR: &'static str = "header"; const HEADERHASHSET_SUBDIR: &'static str = "header";
const TXHASHSET_SUBDIR: &'static str = "txhashset"; const TXHASHSET_SUBDIR: &'static str = "txhashset";
@ -328,7 +328,7 @@ where
// we explicitly rewind the extension. // we explicitly rewind the extension.
let header = batch.head_header()?; let header = batch.head_header()?;
trace!(LOGGER, "Starting new txhashset (readonly) extension."); trace!("Starting new txhashset (readonly) extension.");
let res = { let res = {
let mut extension = Extension::new(trees, &batch, header); let mut extension = Extension::new(trees, &batch, header);
@ -340,14 +340,14 @@ where
inner(&mut extension) inner(&mut extension)
}; };
trace!(LOGGER, "Rollbacking txhashset (readonly) extension."); trace!("Rollbacking txhashset (readonly) extension.");
trees.header_pmmr_h.backend.discard(); trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard(); trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard(); trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard(); trees.kernel_pmmr_h.backend.discard();
trace!(LOGGER, "TxHashSet (readonly) extension done."); trace!("TxHashSet (readonly) extension done.");
res res
} }
@ -423,7 +423,7 @@ where
// index saving can be undone // index saving can be undone
let child_batch = batch.child()?; let child_batch = batch.child()?;
{ {
trace!(LOGGER, "Starting new txhashset extension."); trace!("Starting new txhashset extension.");
// TODO - header_mmr may be out ahead via the header_head // TODO - header_mmr may be out ahead via the header_head
// TODO - do we need to handle this via an explicit rewind on the header_mmr? // TODO - do we need to handle this via an explicit rewind on the header_mmr?
@ -436,10 +436,7 @@ where
match res { match res {
Err(e) => { Err(e) => {
debug!( debug!("Error returned, discarding txhashset extension: {}", e);
LOGGER,
"Error returned, discarding txhashset extension: {}", e
);
trees.header_pmmr_h.backend.discard(); trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard(); trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard(); trees.rproof_pmmr_h.backend.discard();
@ -448,13 +445,13 @@ where
} }
Ok(r) => { Ok(r) => {
if rollback { if rollback {
trace!(LOGGER, "Rollbacking txhashset extension. sizes {:?}", sizes); trace!("Rollbacking txhashset extension. sizes {:?}", sizes);
trees.header_pmmr_h.backend.discard(); trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard(); trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard(); trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard(); trees.kernel_pmmr_h.backend.discard();
} else { } else {
trace!(LOGGER, "Committing txhashset extension. sizes {:?}", sizes); trace!("Committing txhashset extension. sizes {:?}", sizes);
child_batch.commit()?; child_batch.commit()?;
trees.header_pmmr_h.backend.sync()?; trees.header_pmmr_h.backend.sync()?;
trees.output_pmmr_h.backend.sync()?; trees.output_pmmr_h.backend.sync()?;
@ -466,7 +463,7 @@ where
trees.kernel_pmmr_h.last_pos = sizes.3; trees.kernel_pmmr_h.last_pos = sizes.3;
} }
trace!(LOGGER, "TxHashSet extension done."); trace!("TxHashSet extension done.");
Ok(r) Ok(r)
} }
} }
@ -497,7 +494,7 @@ where
// index saving can be undone // index saving can be undone
let child_batch = batch.child()?; let child_batch = batch.child()?;
{ {
trace!(LOGGER, "Starting new txhashset sync_head extension."); trace!("Starting new txhashset sync_head extension.");
let pmmr = DBPMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos); let pmmr = DBPMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
let mut extension = HeaderExtension::new(pmmr, &child_batch, header); let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
@ -510,31 +507,23 @@ where
match res { match res {
Err(e) => { Err(e) => {
debug!( debug!(
LOGGER, "Error returned, discarding txhashset sync_head extension: {}",
"Error returned, discarding txhashset sync_head extension: {}", e e
); );
trees.sync_pmmr_h.backend.discard(); trees.sync_pmmr_h.backend.discard();
Err(e) Err(e)
} }
Ok(r) => { Ok(r) => {
if rollback { if rollback {
trace!( trace!("Rollbacking txhashset sync_head extension. size {:?}", size);
LOGGER,
"Rollbacking txhashset sync_head extension. size {:?}",
size
);
trees.sync_pmmr_h.backend.discard(); trees.sync_pmmr_h.backend.discard();
} else { } else {
trace!( trace!("Committing txhashset sync_head extension. size {:?}", size);
LOGGER,
"Committing txhashset sync_head extension. size {:?}",
size
);
child_batch.commit()?; child_batch.commit()?;
trees.sync_pmmr_h.backend.sync()?; trees.sync_pmmr_h.backend.sync()?;
trees.sync_pmmr_h.last_pos = size; trees.sync_pmmr_h.last_pos = size;
} }
trace!(LOGGER, "TxHashSet sync_head extension done."); trace!("TxHashSet sync_head extension done.");
Ok(r) Ok(r)
} }
} }
@ -564,7 +553,7 @@ where
// index saving can be undone // index saving can be undone
let child_batch = batch.child()?; let child_batch = batch.child()?;
{ {
trace!(LOGGER, "Starting new txhashset header extension."); trace!("Starting new txhashset header extension.");
let pmmr = DBPMMR::at( let pmmr = DBPMMR::at(
&mut trees.header_pmmr_h.backend, &mut trees.header_pmmr_h.backend,
trees.header_pmmr_h.last_pos, trees.header_pmmr_h.last_pos,
@ -579,31 +568,23 @@ where
match res { match res {
Err(e) => { Err(e) => {
debug!( debug!(
LOGGER, "Error returned, discarding txhashset header extension: {}",
"Error returned, discarding txhashset header extension: {}", e e
); );
trees.header_pmmr_h.backend.discard(); trees.header_pmmr_h.backend.discard();
Err(e) Err(e)
} }
Ok(r) => { Ok(r) => {
if rollback { if rollback {
trace!( trace!("Rollbacking txhashset header extension. size {:?}", size);
LOGGER,
"Rollbacking txhashset header extension. size {:?}",
size
);
trees.header_pmmr_h.backend.discard(); trees.header_pmmr_h.backend.discard();
} else { } else {
trace!( trace!("Committing txhashset header extension. size {:?}", size);
LOGGER,
"Committing txhashset header extension. size {:?}",
size
);
child_batch.commit()?; child_batch.commit()?;
trees.header_pmmr_h.backend.sync()?; trees.header_pmmr_h.backend.sync()?;
trees.header_pmmr_h.last_pos = size; trees.header_pmmr_h.last_pos = size;
} }
trace!(LOGGER, "TxHashSet header extension done."); trace!("TxHashSet header extension done.");
Ok(r) Ok(r)
} }
} }
@ -654,7 +635,6 @@ impl<'a> HeaderExtension<'a> {
/// Note the close relationship between header height and insertion index. /// Note the close relationship between header height and insertion index.
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> { pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!( debug!(
LOGGER,
"Rewind header extension to {} at {}", "Rewind header extension to {} at {}",
header.hash(), header.hash(),
header.height header.height
@ -675,7 +655,7 @@ impl<'a> HeaderExtension<'a> {
/// Used when rebuilding the header MMR by reapplying all headers /// Used when rebuilding the header MMR by reapplying all headers
/// including the genesis block header. /// including the genesis block header.
pub fn truncate(&mut self) -> Result<(), Error> { pub fn truncate(&mut self) -> Result<(), Error> {
debug!(LOGGER, "Truncating header extension."); debug!("Truncating header extension.");
self.pmmr.rewind(0).map_err(&ErrorKind::TxHashSetErr)?; self.pmmr.rewind(0).map_err(&ErrorKind::TxHashSetErr)?;
Ok(()) Ok(())
} }
@ -689,7 +669,6 @@ impl<'a> HeaderExtension<'a> {
/// Requires *all* header hashes to be iterated over in ascending order. /// Requires *all* header hashes to be iterated over in ascending order.
pub fn rebuild(&mut self, head: &Tip, genesis: &BlockHeader) -> Result<(), Error> { pub fn rebuild(&mut self, head: &Tip, genesis: &BlockHeader) -> Result<(), Error> {
debug!( debug!(
LOGGER,
"About to rebuild header extension from {:?} to {:?}.", "About to rebuild header extension from {:?} to {:?}.",
genesis.hash(), genesis.hash(),
head.last_block_h, head.last_block_h,
@ -712,7 +691,6 @@ impl<'a> HeaderExtension<'a> {
if header_hashes.len() > 0 { if header_hashes.len() > 0 {
debug!( debug!(
LOGGER,
"Re-applying {} headers to extension, from {:?} to {:?}.", "Re-applying {} headers to extension, from {:?} to {:?}.",
header_hashes.len(), header_hashes.len(),
header_hashes.first().unwrap(), header_hashes.first().unwrap(),
@ -995,10 +973,7 @@ impl<'a> Extension<'a> {
/// We need the hash of each sibling pos from the pos up to the peak /// We need the hash of each sibling pos from the pos up to the peak
/// including the sibling leaf node which may have been removed. /// including the sibling leaf node which may have been removed.
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> { pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> {
debug!( debug!("txhashset: merkle_proof: output: {:?}", output.commit,);
LOGGER,
"txhashset: merkle_proof: output: {:?}", output.commit,
);
// then calculate the Merkle Proof based on the known pos // then calculate the Merkle Proof based on the known pos
let pos = self.batch.get_output_pos(&output.commit)?; let pos = self.batch.get_output_pos(&output.commit)?;
let merkle_proof = self let merkle_proof = self
@ -1027,12 +1002,7 @@ impl<'a> Extension<'a> {
/// Rewinds the MMRs to the provided block, rewinding to the last output pos /// Rewinds the MMRs to the provided block, rewinding to the last output pos
/// and last kernel pos of that block. /// and last kernel pos of that block.
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> { pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!( debug!("Rewind to header {} at {}", header.hash(), header.height,);
LOGGER,
"Rewind to header {} at {}",
header.hash(),
header.height,
);
// We need to build bitmaps of added and removed output positions // We need to build bitmaps of added and removed output positions
// so we can correctly rewind all operations applied to the output MMR // so we can correctly rewind all operations applied to the output MMR
@ -1067,11 +1037,8 @@ impl<'a> Extension<'a> {
rewind_rm_pos: &Bitmap, rewind_rm_pos: &Bitmap,
) -> Result<(), Error> { ) -> Result<(), Error> {
debug!( debug!(
LOGGER,
"txhashset: rewind_to_pos: header {}, output {}, kernel {}", "txhashset: rewind_to_pos: header {}, output {}, kernel {}",
header_pos, header_pos, output_pos, kernel_pos,
output_pos,
kernel_pos,
); );
self.header_pmmr self.header_pmmr
@ -1191,7 +1158,6 @@ impl<'a> Extension<'a> {
} }
debug!( debug!(
LOGGER,
"txhashset: validated the header {}, output {}, rproof {}, kernel {} mmrs, took {}s", "txhashset: validated the header {}, output {}, rproof {}, kernel {} mmrs, took {}s",
self.header_pmmr.unpruned_size(), self.header_pmmr.unpruned_size(),
self.output_pmmr.unpruned_size(), self.output_pmmr.unpruned_size(),
@ -1270,22 +1236,22 @@ impl<'a> Extension<'a> {
/// Dumps the output MMR. /// Dumps the output MMR.
/// We use this after compacting for visual confirmation that it worked. /// We use this after compacting for visual confirmation that it worked.
pub fn dump_output_pmmr(&self) { pub fn dump_output_pmmr(&self) {
debug!(LOGGER, "-- outputs --"); debug!("-- outputs --");
self.output_pmmr.dump_from_file(false); self.output_pmmr.dump_from_file(false);
debug!(LOGGER, "--"); debug!("--");
self.output_pmmr.dump_stats(); self.output_pmmr.dump_stats();
debug!(LOGGER, "-- end of outputs --"); debug!("-- end of outputs --");
} }
/// Dumps the state of the 3 sum trees to stdout for debugging. Short /// Dumps the state of the 3 sum trees to stdout for debugging. Short
/// version only prints the Output tree. /// version only prints the Output tree.
pub fn dump(&self, short: bool) { pub fn dump(&self, short: bool) {
debug!(LOGGER, "-- outputs --"); debug!("-- outputs --");
self.output_pmmr.dump(short); self.output_pmmr.dump(short);
if !short { if !short {
debug!(LOGGER, "-- range proofs --"); debug!("-- range proofs --");
self.rproof_pmmr.dump(short); self.rproof_pmmr.dump(short);
debug!(LOGGER, "-- kernels --"); debug!("-- kernels --");
self.kernel_pmmr.dump(short); self.kernel_pmmr.dump(short);
} }
} }
@ -1318,7 +1284,6 @@ impl<'a> Extension<'a> {
} }
debug!( debug!(
LOGGER,
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s", "txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
kern_count, kern_count,
self.kernel_pmmr.unpruned_size(), self.kernel_pmmr.unpruned_size(),
@ -1353,8 +1318,8 @@ impl<'a> Extension<'a> {
commits.clear(); commits.clear();
proofs.clear(); proofs.clear();
debug!( debug!(
LOGGER, "txhashset: verify_rangeproofs: verified {} rangeproofs",
"txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count, proof_count,
); );
} }
} }
@ -1370,13 +1335,12 @@ impl<'a> Extension<'a> {
commits.clear(); commits.clear();
proofs.clear(); proofs.clear();
debug!( debug!(
LOGGER, "txhashset: verify_rangeproofs: verified {} rangeproofs",
"txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count, proof_count,
); );
} }
debug!( debug!(
LOGGER,
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s", "txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
proof_count, proof_count,
self.rproof_pmmr.unpruned_size(), self.rproof_pmmr.unpruned_size(),
@ -1452,10 +1416,7 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
// Removing unexpected directories if needed // Removing unexpected directories if needed
if !dir_difference.is_empty() { if !dir_difference.is_empty() {
debug!( debug!("Unexpected folder(s) found in txhashset folder, removing.");
LOGGER,
"Unexpected folder(s) found in txhashset folder, removing."
);
for diff in dir_difference { for diff in dir_difference {
let diff_path = txhashset_path.join(diff); let diff_path = txhashset_path.join(diff);
file::delete(diff_path)?; file::delete(diff_path)?;
@ -1492,7 +1453,6 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
.collect(); .collect();
if !difference.is_empty() { if !difference.is_empty() {
debug!( debug!(
LOGGER,
"Unexpected file(s) found in txhashset subfolder {:?}, removing.", "Unexpected file(s) found in txhashset subfolder {:?}, removing.",
&subdirectory_path &subdirectory_path
); );
@ -1520,10 +1480,8 @@ pub fn input_pos_to_rewind(
if head_header.height < block_header.height { if head_header.height < block_header.height {
debug!( debug!(
LOGGER,
"input_pos_to_rewind: {} < {}, nothing to rewind", "input_pos_to_rewind: {} < {}, nothing to rewind",
head_header.height, head_header.height, block_header.height
block_header.height
); );
return Ok(Bitmap::create()); return Ok(Bitmap::create());
} }

View file

@ -367,7 +367,7 @@ fn comments() -> HashMap<String, String> {
retval.insert( retval.insert(
"stdout_log_level".to_string(), "stdout_log_level".to_string(),
" "
#log level for stdout: Critical, Error, Warning, Info, Debug, Trace #log level for stdout: Error, Warning, Info, Debug, Trace
".to_string(), ".to_string(),
); );
@ -381,7 +381,7 @@ fn comments() -> HashMap<String, String> {
retval.insert( retval.insert(
"file_log_level".to_string(), "file_log_level".to_string(),
" "
#log level for file: Critical, Error, Warning, Info, Debug, Trace #log level for file: Error, Warning, Info, Debug, Trace
".to_string(), ".to_string(),
); );
@ -399,6 +399,14 @@ fn comments() -> HashMap<String, String> {
".to_string(), ".to_string(),
); );
retval.insert(
"log_max_size".to_string(),
"
#maximum log file size in bytes before performing log rotation
#comment it to disable log rotation
".to_string(),
);
retval retval
} }

View file

@ -20,7 +20,7 @@ rand = "0.5"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
siphasher = "0.2" siphasher = "0.2"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
chrono = "0.4.4" chrono = "0.4.4"
grin_keychain = { path = "../keychain" } grin_keychain = { path = "../keychain" }

View file

@ -36,7 +36,7 @@ use global;
use keychain::{self, BlindingFactor}; use keychain::{self, BlindingFactor};
use pow::{Difficulty, Proof, ProofOfWork}; use pow::{Difficulty, Proof, ProofOfWork};
use ser::{self, PMMRable, Readable, Reader, Writeable, Writer}; use ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
use util::{secp, static_secp_instance, LOGGER}; use util::{secp, static_secp_instance};
/// Errors thrown by Block validation /// Errors thrown by Block validation
#[derive(Debug, Clone, Eq, PartialEq, Fail)] #[derive(Debug, Clone, Eq, PartialEq, Fail)]
@ -425,12 +425,7 @@ impl Block {
/// Note: caller must validate the block themselves, we do not validate it /// Note: caller must validate the block themselves, we do not validate it
/// here. /// here.
pub fn hydrate_from(cb: CompactBlock, txs: Vec<Transaction>) -> Result<Block, Error> { pub fn hydrate_from(cb: CompactBlock, txs: Vec<Transaction>) -> Result<Block, Error> {
trace!( trace!("block: hydrate_from: {}, {} txs", cb.hash(), txs.len(),);
LOGGER,
"block: hydrate_from: {}, {} txs",
cb.hash(),
txs.len(),
);
let header = cb.header.clone(); let header = cb.header.clone();

View file

@ -22,7 +22,6 @@ use core::merkle_proof::MerkleProof;
use core::pmmr::{Backend, ReadonlyPMMR}; use core::pmmr::{Backend, ReadonlyPMMR};
use core::BlockHeader; use core::BlockHeader;
use ser::{PMMRIndexHashable, PMMRable}; use ser::{PMMRIndexHashable, PMMRable};
use util::LOGGER;
/// 64 bits all ones: 0b11111111...1 /// 64 bits all ones: 0b11111111...1
const ALL_ONES: u64 = u64::MAX; const ALL_ONES: u64 = u64::MAX;
@ -137,7 +136,7 @@ where
/// Build a Merkle proof for the element at the given position. /// Build a Merkle proof for the element at the given position.
pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> { pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> {
debug!(LOGGER, "merkle_proof {}, last_pos {}", pos, self.last_pos); debug!("merkle_proof {}, last_pos {}", pos, self.last_pos);
// check this pos is actually a leaf in the MMR // check this pos is actually a leaf in the MMR
if !is_leaf(pos) { if !is_leaf(pos) {
@ -384,14 +383,14 @@ where
None => hashes.push_str(&format!("{:>8} ", "??")), None => hashes.push_str(&format!("{:>8} ", "??")),
} }
} }
trace!(LOGGER, "{}", idx); trace!("{}", idx);
trace!(LOGGER, "{}", hashes); trace!("{}", hashes);
} }
} }
/// Prints PMMR statistics to the logs, used for debugging. /// Prints PMMR statistics to the logs, used for debugging.
pub fn dump_stats(&self) { pub fn dump_stats(&self) {
debug!(LOGGER, "pmmr: unpruned - {}", self.unpruned_size()); debug!("pmmr: unpruned - {}", self.unpruned_size());
self.backend.dump_stats(); self.backend.dump_stats();
} }
@ -418,8 +417,8 @@ where
None => hashes.push_str(&format!("{:>8} ", " .")), None => hashes.push_str(&format!("{:>8} ", " .")),
} }
} }
debug!(LOGGER, "{}", idx); debug!("{}", idx);
debug!(LOGGER, "{}", hashes); debug!("{}", hashes);
} }
} }
} }

View file

@ -19,7 +19,6 @@ use lru_cache::LruCache;
use core::hash::{Hash, Hashed}; use core::hash::{Hash, Hashed};
use core::{Output, TxKernel}; use core::{Output, TxKernel};
use util::LOGGER;
/// Verifier cache for caching expensive verification results. /// Verifier cache for caching expensive verification results.
/// Specifically the following - /// Specifically the following -
@ -72,7 +71,6 @@ impl VerifierCache for LruVerifierCache {
}).cloned() }).cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
debug!( debug!(
LOGGER,
"lru_verifier_cache: kernel sigs: {}, not cached (must verify): {}", "lru_verifier_cache: kernel sigs: {}, not cached (must verify): {}",
kernels.len(), kernels.len(),
res.len() res.len()
@ -91,7 +89,6 @@ impl VerifierCache for LruVerifierCache {
}).cloned() }).cloned()
.collect::<Vec<_>>(); .collect::<Vec<_>>();
debug!( debug!(
LOGGER,
"lru_verifier_cache: rangeproofs: {}, not cached (must verify): {}", "lru_verifier_cache: rangeproofs: {}, not cached (must verify): {}",
outputs.len(), outputs.len(),
res.len() res.len()

View file

@ -38,7 +38,7 @@ extern crate serde;
extern crate serde_derive; extern crate serde_derive;
extern crate siphasher; extern crate siphasher;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
extern crate failure; extern crate failure;
#[macro_use] #[macro_use]

View file

@ -9,7 +9,7 @@ publish = false
byteorder = "1" byteorder = "1"
blake2-rfc = "0.2" blake2-rfc = "0.2"
rand = "0.5" rand = "0.5"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
serde_json = "1" serde_json = "1"

View file

@ -24,10 +24,10 @@ extern crate serde;
extern crate serde_derive; extern crate serde_derive;
extern crate digest; extern crate digest;
extern crate hmac; extern crate hmac;
extern crate log;
extern crate ripemd160; extern crate ripemd160;
extern crate serde_json; extern crate serde_json;
extern crate sha2; extern crate sha2;
extern crate slog;
extern crate uuid; extern crate uuid;
mod base58; mod base58;

View file

@ -15,7 +15,7 @@ num = "0.1"
rand = "0.5" rand = "0.5"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
chrono = { version = "0.4.4", features = ["serde"] } chrono = { version = "0.4.4", features = ["serde"] }
grin_core = { path = "../core" } grin_core = { path = "../core" }

View file

@ -31,7 +31,6 @@ use util::RwLock;
use core::ser; use core::ser;
use msg::{read_body, read_exact, read_header, write_all, write_to_buf, MsgHeader, Type}; use msg::{read_body, read_exact, read_header, write_all, write_to_buf, MsgHeader, Type};
use types::Error; use types::Error;
use util::LOGGER;
/// A trait to be implemented in order to receive messages from the /// A trait to be implemented in order to receive messages from the
/// connection. Allows providing an optional response. /// connection. Allows providing an optional response.
@ -234,7 +233,6 @@ fn poll<H>(
if let Some(h) = try_break!(error_tx, read_header(conn, None)) { if let Some(h) = try_break!(error_tx, read_header(conn, None)) {
let msg = Message::from_header(h, conn); let msg = Message::from_header(h, conn);
trace!( trace!(
LOGGER,
"Received message header, type {:?}, len {}.", "Received message header, type {:?}, len {}.",
msg.header.msg_type, msg.header.msg_type,
msg.header.msg_len msg.header.msg_len
@ -276,7 +274,6 @@ fn poll<H>(
// check the close channel // check the close channel
if let Ok(_) = close_rx.try_recv() { if let Ok(_) = close_rx.try_recv() {
debug!( debug!(
LOGGER,
"Connection close with {} initiated by us", "Connection close with {} initiated by us",
conn.peer_addr() conn.peer_addr()
.map(|a| a.to_string()) .map(|a| a.to_string())

View file

@ -25,7 +25,6 @@ use core::pow::Difficulty;
use msg::{read_message, write_message, Hand, Shake, SockAddr, Type, PROTOCOL_VERSION, USER_AGENT}; use msg::{read_message, write_message, Hand, Shake, SockAddr, Type, PROTOCOL_VERSION, USER_AGENT};
use peer::Peer; use peer::Peer;
use types::{Capabilities, Direction, Error, P2PConfig, PeerInfo, PeerLiveInfo}; use types::{Capabilities, Direction, Error, P2PConfig, PeerInfo, PeerLiveInfo};
use util::LOGGER;
const NONCES_CAP: usize = 100; const NONCES_CAP: usize = 100;
@ -115,7 +114,6 @@ impl Handshake {
} }
debug!( debug!(
LOGGER,
"Connected! Cumulative {} offered from {:?} {:?} {:?}", "Connected! Cumulative {} offered from {:?} {:?} {:?}",
shake.total_difficulty.to_num(), shake.total_difficulty.to_num(),
peer_info.addr, peer_info.addr,
@ -186,7 +184,7 @@ impl Handshake {
}; };
write_message(conn, shake, Type::Shake)?; write_message(conn, shake, Type::Shake)?;
trace!(LOGGER, "Success handshake with {}.", peer_info.addr); trace!("Success handshake with {}.", peer_info.addr);
// when more than one protocol version is supported, choosing should go here // when more than one protocol version is supported, choosing should go here
Ok(peer_info) Ok(peer_info)

View file

@ -37,7 +37,7 @@ extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
mod conn; mod conn;

View file

@ -26,7 +26,6 @@ use core::pow::Difficulty;
use core::ser::{self, Readable, Reader, Writeable, Writer}; use core::ser::{self, Readable, Reader, Writeable, Writer};
use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS}; use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS};
use util::LOGGER;
/// Current latest version of the protocol /// Current latest version of the protocol
pub const PROTOCOL_VERSION: u32 = 1; pub const PROTOCOL_VERSION: u32 = 1;
@ -207,8 +206,8 @@ pub fn read_header(conn: &mut TcpStream, msg_type: Option<Type>) -> Result<MsgHe
// TODO 4x the limits for now to leave ourselves space to change things // TODO 4x the limits for now to leave ourselves space to change things
if header.msg_len > max_len * 4 { if header.msg_len > max_len * 4 {
error!( error!(
LOGGER, "Too large read {}, had {}, wanted {}.",
"Too large read {}, had {}, wanted {}.", header.msg_type as u8, max_len, header.msg_len header.msg_type as u8, max_len, header.msg_len
); );
return Err(Error::Serialization(ser::Error::TooLargeReadErr)); return Err(Error::Serialization(ser::Error::TooLargeReadErr));
} }

View file

@ -28,7 +28,6 @@ use protocol::Protocol;
use types::{ use types::{
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerInfo, ReasonForBan, TxHashSetRead, Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerInfo, ReasonForBan, TxHashSetRead,
}; };
use util::LOGGER;
const MAX_TRACK_SIZE: usize = 30; const MAX_TRACK_SIZE: usize = 30;
@ -104,8 +103,8 @@ impl Peer {
if let Some(ref denied) = config.peers_deny { if let Some(ref denied) = config.peers_deny {
if denied.contains(&peer) { if denied.contains(&peer) {
debug!( debug!(
LOGGER, "checking peer allowed/denied: {:?} explicitly denied",
"checking peer allowed/denied: {:?} explicitly denied", peer_addr peer_addr
); );
return true; return true;
} }
@ -113,14 +112,14 @@ impl Peer {
if let Some(ref allowed) = config.peers_allow { if let Some(ref allowed) = config.peers_allow {
if allowed.contains(&peer) { if allowed.contains(&peer) {
debug!( debug!(
LOGGER, "checking peer allowed/denied: {:?} explicitly allowed",
"checking peer allowed/denied: {:?} explicitly allowed", peer_addr peer_addr
); );
return false; return false;
} else { } else {
debug!( debug!(
LOGGER, "checking peer allowed/denied: {:?} not explicitly allowed, denying",
"checking peer allowed/denied: {:?} not explicitly allowed, denying", peer_addr peer_addr
); );
return true; return true;
} }
@ -198,13 +197,10 @@ impl Peer {
.unwrap() .unwrap()
.send(ban_reason_msg, msg::Type::BanReason) .send(ban_reason_msg, msg::Type::BanReason)
{ {
Ok(_) => debug!( Ok(_) => debug!("Sent ban reason {:?} to {}", ban_reason, self.info.addr),
LOGGER,
"Sent ban reason {:?} to {}", ban_reason, self.info.addr
),
Err(e) => error!( Err(e) => error!(
LOGGER, "Could not send ban reason {:?} to {}: {:?}",
"Could not send ban reason {:?} to {}: {:?}", ban_reason, self.info.addr, e ban_reason, self.info.addr, e
), ),
}; };
} }
@ -213,7 +209,7 @@ impl Peer {
/// if the remote peer is known to already have the block. /// if the remote peer is known to already have the block.
pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> { pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> {
if !self.tracking_adapter.has(b.hash()) { if !self.tracking_adapter.has(b.hash()) {
trace!(LOGGER, "Send block {} to {}", b.hash(), self.info.addr); trace!("Send block {} to {}", b.hash(), self.info.addr);
self.connection self.connection
.as_ref() .as_ref()
.unwrap() .unwrap()
@ -221,7 +217,6 @@ impl Peer {
Ok(true) Ok(true)
} else { } else {
debug!( debug!(
LOGGER,
"Suppress block send {} to {} (already seen)", "Suppress block send {} to {} (already seen)",
b.hash(), b.hash(),
self.info.addr, self.info.addr,
@ -232,12 +227,7 @@ impl Peer {
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> { pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> {
if !self.tracking_adapter.has(b.hash()) { if !self.tracking_adapter.has(b.hash()) {
trace!( trace!("Send compact block {} to {}", b.hash(), self.info.addr);
LOGGER,
"Send compact block {} to {}",
b.hash(),
self.info.addr
);
self.connection self.connection
.as_ref() .as_ref()
.unwrap() .unwrap()
@ -245,7 +235,6 @@ impl Peer {
Ok(true) Ok(true)
} else { } else {
debug!( debug!(
LOGGER,
"Suppress compact block send {} to {} (already seen)", "Suppress compact block send {} to {} (already seen)",
b.hash(), b.hash(),
self.info.addr, self.info.addr,
@ -256,7 +245,7 @@ impl Peer {
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<bool, Error> { pub fn send_header(&self, bh: &core::BlockHeader) -> Result<bool, Error> {
if !self.tracking_adapter.has(bh.hash()) { if !self.tracking_adapter.has(bh.hash()) {
debug!(LOGGER, "Send header {} to {}", bh.hash(), self.info.addr); debug!("Send header {} to {}", bh.hash(), self.info.addr);
self.connection self.connection
.as_ref() .as_ref()
.unwrap() .unwrap()
@ -264,7 +253,6 @@ impl Peer {
Ok(true) Ok(true)
} else { } else {
debug!( debug!(
LOGGER,
"Suppress header send {} to {} (already seen)", "Suppress header send {} to {} (already seen)",
bh.hash(), bh.hash(),
self.info.addr, self.info.addr,
@ -277,7 +265,7 @@ impl Peer {
/// dropped if the remote peer is known to already have the transaction. /// dropped if the remote peer is known to already have the transaction.
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<bool, Error> { pub fn send_transaction(&self, tx: &core::Transaction) -> Result<bool, Error> {
if !self.tracking_adapter.has(tx.hash()) { if !self.tracking_adapter.has(tx.hash()) {
debug!(LOGGER, "Send tx {} to {}", tx.hash(), self.info.addr); debug!("Send tx {} to {}", tx.hash(), self.info.addr);
self.connection self.connection
.as_ref() .as_ref()
.unwrap() .unwrap()
@ -285,7 +273,6 @@ impl Peer {
Ok(true) Ok(true)
} else { } else {
debug!( debug!(
LOGGER,
"Not sending tx {} to {} (already seen)", "Not sending tx {} to {} (already seen)",
tx.hash(), tx.hash(),
self.info.addr self.info.addr
@ -298,7 +285,7 @@ impl Peer {
/// Note: tracking adapter is ignored for stem transactions (while under /// Note: tracking adapter is ignored for stem transactions (while under
/// embargo). /// embargo).
pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> { pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
debug!(LOGGER, "Send (stem) tx {} to {}", tx.hash(), self.info.addr); debug!("Send (stem) tx {} to {}", tx.hash(), self.info.addr);
self.connection self.connection
.as_ref() .as_ref()
.unwrap() .unwrap()
@ -316,10 +303,7 @@ impl Peer {
/// Sends a request for a specific block by hash /// Sends a request for a specific block by hash
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> { pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
debug!( debug!("Requesting block {} from peer {}.", h, self.info.addr);
LOGGER,
"Requesting block {} from peer {}.", h, self.info.addr
);
self.connection self.connection
.as_ref() .as_ref()
.unwrap() .unwrap()
@ -328,10 +312,7 @@ impl Peer {
/// Sends a request for a specific compact block by hash /// Sends a request for a specific compact block by hash
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> { pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
debug!( debug!("Requesting compact block {} from {}", h, self.info.addr);
LOGGER,
"Requesting compact block {} from {}", h, self.info.addr
);
self.connection self.connection
.as_ref() .as_ref()
.unwrap() .unwrap()
@ -339,7 +320,7 @@ impl Peer {
} }
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> { pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
debug!(LOGGER, "Asking {} for more peers.", self.info.addr); debug!("Asking {} for more peers.", self.info.addr);
self.connection.as_ref().unwrap().send( self.connection.as_ref().unwrap().send(
&GetPeerAddrs { &GetPeerAddrs {
capabilities: capab, capabilities: capab,
@ -350,8 +331,8 @@ impl Peer {
pub fn send_txhashset_request(&self, height: u64, hash: Hash) -> Result<(), Error> { pub fn send_txhashset_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
debug!( debug!(
LOGGER, "Asking {} for txhashset archive at {} {}.",
"Asking {} for txhashset archive at {} {}.", self.info.addr, height, hash self.info.addr, height, hash
); );
self.connection.as_ref().unwrap().send( self.connection.as_ref().unwrap().send(
&TxHashSetRequest { hash, height }, &TxHashSetRequest { hash, height },
@ -378,8 +359,8 @@ impl Peer {
}; };
if need_stop { if need_stop {
debug!( debug!(
LOGGER, "Client {} corrupted, will disconnect ({:?}).",
"Client {} corrupted, will disconnect ({:?}).", self.info.addr, e self.info.addr, e
); );
self.stop(); self.stop();
} }
@ -396,7 +377,7 @@ impl Peer {
} }
}; };
if need_stop { if need_stop {
debug!(LOGGER, "Client {} connection lost: {:?}", self.info.addr, e); debug!("Client {} connection lost: {:?}", self.info.addr, e);
self.stop(); self.stop();
} }
false false

View file

@ -24,7 +24,6 @@ use chrono::prelude::*;
use core::core; use core::core;
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::pow::Difficulty; use core::pow::Difficulty;
use util::LOGGER;
use peer::Peer; use peer::Peer;
use store::{PeerData, PeerStore, State}; use store::{PeerData, PeerStore, State};
@ -71,7 +70,7 @@ impl Peers {
}; };
addr = peer.info.addr.clone(); addr = peer.info.addr.clone();
} }
debug!(LOGGER, "Saving newly connected peer {}.", addr); debug!("Saving newly connected peer {}.", addr);
self.save_peer(&peer_data)?; self.save_peer(&peer_data)?;
{ {
@ -94,11 +93,11 @@ impl Peers {
.write() .write()
.insert(Utc::now().timestamp(), peer.clone()); .insert(Utc::now().timestamp(), peer.clone());
debug!( debug!(
LOGGER, "Successfully updated Dandelion relay to: {}",
"Successfully updated Dandelion relay to: {}", peer.info.addr peer.info.addr
); );
} }
None => debug!(LOGGER, "Could not update dandelion relay"), None => debug!("Could not update dandelion relay"),
}; };
} }
@ -238,11 +237,11 @@ impl Peers {
/// Ban a peer, disconnecting it if we're currently connected /// Ban a peer, disconnecting it if we're currently connected
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) { pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
if let Err(e) = self.update_state(*peer_addr, State::Banned) { if let Err(e) = self.update_state(*peer_addr, State::Banned) {
error!(LOGGER, "Couldn't ban {}: {:?}", peer_addr, e); error!("Couldn't ban {}: {:?}", peer_addr, e);
} }
if let Some(peer) = self.get_connected_peer(peer_addr) { if let Some(peer) = self.get_connected_peer(peer_addr) {
debug!(LOGGER, "Banning peer {}", peer_addr); debug!("Banning peer {}", peer_addr);
// setting peer status will get it removed at the next clean_peer // setting peer status will get it removed at the next clean_peer
peer.send_ban_reason(ban_reason); peer.send_ban_reason(ban_reason);
peer.set_banned(); peer.set_banned();
@ -256,13 +255,13 @@ impl Peers {
Ok(_) => { Ok(_) => {
if self.is_banned(*peer_addr) { if self.is_banned(*peer_addr) {
if let Err(e) = self.update_state(*peer_addr, State::Healthy) { if let Err(e) = self.update_state(*peer_addr, State::Healthy) {
error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e); error!("Couldn't unban {}: {:?}", peer_addr, e);
} }
} else { } else {
error!(LOGGER, "Couldn't unban {}: peer is not banned", peer_addr); error!("Couldn't unban {}: peer is not banned", peer_addr);
} }
} }
Err(e) => error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e), Err(e) => error!("Couldn't unban {}: {:?}", peer_addr, e),
}; };
} }
@ -278,7 +277,7 @@ impl Peers {
match inner(&p) { match inner(&p) {
Ok(true) => count += 1, Ok(true) => count += 1,
Ok(false) => (), Ok(false) => (),
Err(e) => debug!(LOGGER, "Error sending {} to peer: {:?}", obj_name, e), Err(e) => debug!("Error sending {} to peer: {:?}", obj_name, e),
} }
if count >= num_peers { if count >= num_peers {
@ -297,7 +296,6 @@ impl Peers {
let num_peers = self.config.peer_max_count(); let num_peers = self.config.peer_max_count();
let count = self.broadcast("compact block", num_peers, |p| p.send_compact_block(b)); let count = self.broadcast("compact block", num_peers, |p| p.send_compact_block(b));
debug!( debug!(
LOGGER,
"broadcast_compact_block: {}, {} at {}, to {} peers, done.", "broadcast_compact_block: {}, {} at {}, to {} peers, done.",
b.hash(), b.hash(),
b.header.pow.total_difficulty, b.header.pow.total_difficulty,
@ -315,7 +313,6 @@ impl Peers {
let num_peers = self.config.peer_min_preferred_count(); let num_peers = self.config.peer_min_preferred_count();
let count = self.broadcast("header", num_peers, |p| p.send_header(bh)); let count = self.broadcast("header", num_peers, |p| p.send_header(bh));
debug!( debug!(
LOGGER,
"broadcast_header: {}, {} at {}, to {} peers, done.", "broadcast_header: {}, {} at {}, to {} peers, done.",
bh.hash(), bh.hash(),
bh.pow.total_difficulty, bh.pow.total_difficulty,
@ -328,7 +325,7 @@ impl Peers {
pub fn broadcast_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> { pub fn broadcast_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
let dandelion_relay = self.get_dandelion_relay(); let dandelion_relay = self.get_dandelion_relay();
if dandelion_relay.is_empty() { if dandelion_relay.is_empty() {
debug!(LOGGER, "No dandelion relay, updating."); debug!("No dandelion relay, updating.");
self.update_dandelion_relay(); self.update_dandelion_relay();
} }
// If still return an error, let the caller handle this as they see fit. // If still return an error, let the caller handle this as they see fit.
@ -339,10 +336,7 @@ impl Peers {
for relay in dandelion_relay.values() { for relay in dandelion_relay.values() {
if relay.is_connected() { if relay.is_connected() {
if let Err(e) = relay.send_stem_transaction(tx) { if let Err(e) = relay.send_stem_transaction(tx) {
debug!( debug!("Error sending stem transaction to peer relay: {:?}", e);
LOGGER,
"Error sending stem transaction to peer relay: {:?}", e
);
} }
} }
} }
@ -358,7 +352,6 @@ impl Peers {
let num_peers = self.config.peer_min_preferred_count(); let num_peers = self.config.peer_min_preferred_count();
let count = self.broadcast("transaction", num_peers, |p| p.send_transaction(tx)); let count = self.broadcast("transaction", num_peers, |p| p.send_transaction(tx));
trace!( trace!(
LOGGER,
"broadcast_transaction: {}, to {} peers, done.", "broadcast_transaction: {}, to {} peers, done.",
tx.hash(), tx.hash(),
count, count,
@ -417,15 +410,15 @@ impl Peers {
// build a list of peers to be cleaned up // build a list of peers to be cleaned up
for peer in self.peers.read().values() { for peer in self.peers.read().values() {
if peer.is_banned() { if peer.is_banned() {
debug!(LOGGER, "clean_peers {:?}, peer banned", peer.info.addr); debug!("clean_peers {:?}, peer banned", peer.info.addr);
rm.push(peer.clone()); rm.push(peer.clone());
} else if !peer.is_connected() { } else if !peer.is_connected() {
debug!(LOGGER, "clean_peers {:?}, not connected", peer.info.addr); debug!("clean_peers {:?}, not connected", peer.info.addr);
rm.push(peer.clone()); rm.push(peer.clone());
} else { } else {
let (stuck, diff) = peer.is_stuck(); let (stuck, diff) = peer.is_stuck();
if stuck && diff < self.adapter.total_difficulty() { if stuck && diff < self.adapter.total_difficulty() {
debug!(LOGGER, "clean_peers {:?}, stuck peer", peer.info.addr); debug!("clean_peers {:?}, stuck peer", peer.info.addr);
peer.stop(); peer.stop();
let _ = self.update_state(peer.info.addr, State::Defunct); let _ = self.update_state(peer.info.addr, State::Defunct);
rm.push(peer.clone()); rm.push(peer.clone());
@ -497,8 +490,8 @@ impl ChainAdapter for Peers {
// if the peer sent us a block that's intrinsically bad // if the peer sent us a block that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban // they are either mistaken or malevolent, both of which require a ban
debug!( debug!(
LOGGER, "Received a bad block {} from {}, the peer will be banned",
"Received a bad block {} from {}, the peer will be banned", hash, peer_addr hash, peer_addr
); );
self.ban_peer(&peer_addr, ReasonForBan::BadBlock); self.ban_peer(&peer_addr, ReasonForBan::BadBlock);
false false
@ -513,10 +506,8 @@ impl ChainAdapter for Peers {
// if the peer sent us a block that's intrinsically bad // if the peer sent us a block that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban // they are either mistaken or malevolent, both of which require a ban
debug!( debug!(
LOGGER,
"Received a bad compact block {} from {}, the peer will be banned", "Received a bad compact block {} from {}, the peer will be banned",
hash, hash, &peer_addr
&peer_addr
); );
self.ban_peer(&peer_addr, ReasonForBan::BadCompactBlock); self.ban_peer(&peer_addr, ReasonForBan::BadCompactBlock);
false false
@ -566,8 +557,8 @@ impl ChainAdapter for Peers {
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool { fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
if !self.adapter.txhashset_write(h, txhashset_data, peer_addr) { if !self.adapter.txhashset_write(h, txhashset_data, peer_addr) {
debug!( debug!(
LOGGER, "Received a bad txhashset data from {}, the peer will be banned",
"Received a bad txhashset data from {}, the peer will be banned", &peer_addr &peer_addr
); );
self.ban_peer(&peer_addr, ReasonForBan::BadTxHashSet); self.ban_peer(&peer_addr, ReasonForBan::BadTxHashSet);
false false
@ -592,17 +583,13 @@ impl NetAdapter for Peers {
/// addresses. /// addresses.
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> { fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize); let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
trace!( trace!("find_peer_addrs: {} healthy peers picked", peers.len());
LOGGER,
"find_peer_addrs: {} healthy peers picked",
peers.len()
);
map_vec!(peers, |p| p.addr) map_vec!(peers, |p| p.addr)
} }
/// A list of peers has been received from one of our peers. /// A list of peers has been received from one of our peers.
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) { fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
trace!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len()); trace!("Received {} peer addrs, saving.", peer_addrs.len());
for pa in peer_addrs { for pa in peer_addrs {
if let Ok(e) = self.exists_peer(pa) { if let Ok(e) = self.exists_peer(pa) {
if e { if e {
@ -618,7 +605,7 @@ impl NetAdapter for Peers {
ban_reason: ReasonForBan::None, ban_reason: ReasonForBan::None,
}; };
if let Err(e) = self.save_peer(&peer) { if let Err(e) = self.save_peer(&peer) {
error!(LOGGER, "Could not save received peer address: {:?}", e); error!("Could not save received peer address: {:?}", e);
} }
} }
} }

View file

@ -30,7 +30,6 @@ use msg::{
TxHashSetArchive, TxHashSetRequest, Type, TxHashSetArchive, TxHashSetRequest, Type,
}; };
use types::{Error, NetAdapter}; use types::{Error, NetAdapter};
use util::LOGGER;
pub struct Protocol { pub struct Protocol {
adapter: Arc<NetAdapter>, adapter: Arc<NetAdapter>,
@ -52,10 +51,8 @@ impl MessageHandler for Protocol {
// banned peers up correctly? // banned peers up correctly?
if adapter.is_banned(self.addr.clone()) { if adapter.is_banned(self.addr.clone()) {
debug!( debug!(
LOGGER,
"handler: consume: peer {:?} banned, received: {:?}, dropping.", "handler: consume: peer {:?} banned, received: {:?}, dropping.",
self.addr, self.addr, msg.header.msg_type,
msg.header.msg_type,
); );
return Ok(None); return Ok(None);
} }
@ -82,14 +79,14 @@ impl MessageHandler for Protocol {
Type::BanReason => { Type::BanReason => {
let ban_reason: BanReason = msg.body()?; let ban_reason: BanReason = msg.body()?;
error!(LOGGER, "handle_payload: BanReason {:?}", ban_reason); error!("handle_payload: BanReason {:?}", ban_reason);
Ok(None) Ok(None)
} }
Type::Transaction => { Type::Transaction => {
debug!( debug!(
LOGGER, "handle_payload: received tx: msg_len: {}",
"handle_payload: received tx: msg_len: {}", msg.header.msg_len msg.header.msg_len
); );
let tx: core::Transaction = msg.body()?; let tx: core::Transaction = msg.body()?;
adapter.transaction_received(tx, false); adapter.transaction_received(tx, false);
@ -98,8 +95,8 @@ impl MessageHandler for Protocol {
Type::StemTransaction => { Type::StemTransaction => {
debug!( debug!(
LOGGER, "handle_payload: received stem tx: msg_len: {}",
"handle_payload: received stem tx: msg_len: {}", msg.header.msg_len msg.header.msg_len
); );
let tx: core::Transaction = msg.body()?; let tx: core::Transaction = msg.body()?;
adapter.transaction_received(tx, true); adapter.transaction_received(tx, true);
@ -109,7 +106,6 @@ impl MessageHandler for Protocol {
Type::GetBlock => { Type::GetBlock => {
let h: Hash = msg.body()?; let h: Hash = msg.body()?;
trace!( trace!(
LOGGER,
"handle_payload: Getblock: {}, msg_len: {}", "handle_payload: Getblock: {}, msg_len: {}",
h, h,
msg.header.msg_len, msg.header.msg_len,
@ -124,8 +120,8 @@ impl MessageHandler for Protocol {
Type::Block => { Type::Block => {
debug!( debug!(
LOGGER, "handle_payload: received block: msg_len: {}",
"handle_payload: received block: msg_len: {}", msg.header.msg_len msg.header.msg_len
); );
let b: core::Block = msg.body()?; let b: core::Block = msg.body()?;
@ -145,8 +141,8 @@ impl MessageHandler for Protocol {
Type::CompactBlock => { Type::CompactBlock => {
debug!( debug!(
LOGGER, "handle_payload: received compact block: msg_len: {}",
"handle_payload: received compact block: msg_len: {}", msg.header.msg_len msg.header.msg_len
); );
let b: core::CompactBlock = msg.body()?; let b: core::CompactBlock = msg.body()?;
@ -218,8 +214,8 @@ impl MessageHandler for Protocol {
Type::TxHashSetRequest => { Type::TxHashSetRequest => {
let sm_req: TxHashSetRequest = msg.body()?; let sm_req: TxHashSetRequest = msg.body()?;
debug!( debug!(
LOGGER, "handle_payload: txhashset req for {} at {}",
"handle_payload: txhashset req for {} at {}", sm_req.hash, sm_req.height sm_req.hash, sm_req.height
); );
let txhashset = self.adapter.txhashset_read(sm_req.hash); let txhashset = self.adapter.txhashset_read(sm_req.hash);
@ -244,15 +240,11 @@ impl MessageHandler for Protocol {
Type::TxHashSetArchive => { Type::TxHashSetArchive => {
let sm_arch: TxHashSetArchive = msg.body()?; let sm_arch: TxHashSetArchive = msg.body()?;
debug!( debug!(
LOGGER,
"handle_payload: txhashset archive for {} at {}. size={}", "handle_payload: txhashset archive for {} at {}. size={}",
sm_arch.hash, sm_arch.hash, sm_arch.height, sm_arch.bytes,
sm_arch.height,
sm_arch.bytes,
); );
if !self.adapter.txhashset_receive_ready() { if !self.adapter.txhashset_receive_ready() {
error!( error!(
LOGGER,
"handle_payload: txhashset archive received but SyncStatus not on TxHashsetDownload", "handle_payload: txhashset archive received but SyncStatus not on TxHashsetDownload",
); );
return Err(Error::BadMessage); return Err(Error::BadMessage);
@ -284,14 +276,13 @@ impl MessageHandler for Protocol {
if let Err(e) = save_txhashset_to_file(tmp.clone()) { if let Err(e) = save_txhashset_to_file(tmp.clone()) {
error!( error!(
LOGGER, "handle_payload: txhashset archive save to file fail. err={:?}",
"handle_payload: txhashset archive save to file fail. err={:?}", e e
); );
return Err(e); return Err(e);
} }
trace!( trace!(
LOGGER,
"handle_payload: txhashset archive save to file {:?} success", "handle_payload: txhashset archive save to file {:?} success",
tmp, tmp,
); );
@ -302,18 +293,15 @@ impl MessageHandler for Protocol {
.txhashset_write(sm_arch.hash, tmp_zip, self.addr); .txhashset_write(sm_arch.hash, tmp_zip, self.addr);
debug!( debug!(
LOGGER,
"handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}", "handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}",
sm_arch.hash, sm_arch.hash, sm_arch.height, res
sm_arch.height,
res
); );
Ok(None) Ok(None)
} }
_ => { _ => {
debug!(LOGGER, "unknown message type {:?}", msg.header.msg_type); debug!("unknown message type {:?}", msg.header.msg_type);
Ok(None) Ok(None)
} }
} }
@ -341,12 +329,8 @@ fn headers_header_size(conn: &mut TcpStream, msg_len: u64) -> Result<u64, Error>
let max_size = min_size + 6; let max_size = min_size + 6;
if average_header_size < min_size as u64 || average_header_size > max_size as u64 { if average_header_size < min_size as u64 || average_header_size > max_size as u64 {
debug!( debug!(
LOGGER,
"headers_header_size - size of Vec: {}, average_header_size: {}, min: {}, max: {}", "headers_header_size - size of Vec: {}, average_header_size: {}, min: {}, max: {}",
total_headers, total_headers, average_header_size, min_size, max_size,
average_header_size,
min_size,
max_size,
); );
return Err(Error::Connection(io::Error::new( return Err(Error::Connection(io::Error::new(
io::ErrorKind::InvalidData, io::ErrorKind::InvalidData,

View file

@ -30,7 +30,6 @@ use peer::Peer;
use peers::Peers; use peers::Peers;
use store::PeerStore; use store::PeerStore;
use types::{Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, TxHashSetRead}; use types::{Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, TxHashSetRead};
use util::LOGGER;
/// P2P server implementation, handling bootstrapping to find and connect to /// P2P server implementation, handling bootstrapping to find and connect to
/// peers, receiving connections from other peers and keep track of all of them. /// peers, receiving connections from other peers and keep track of all of them.
@ -64,17 +63,14 @@ impl Server {
// Check that we have block 1 // Check that we have block 1
match block_1_hash { match block_1_hash {
Some(hash) => match adapter.get_block(hash) { Some(hash) => match adapter.get_block(hash) {
Some(_) => debug!(LOGGER, "Full block 1 found, archive capabilities confirmed"), Some(_) => debug!("Full block 1 found, archive capabilities confirmed"),
None => { None => {
debug!( debug!("Full block 1 not found, archive capabilities disabled");
LOGGER,
"Full block 1 not found, archive capabilities disabled"
);
capab.remove(Capabilities::FULL_HIST); capab.remove(Capabilities::FULL_HIST);
} }
}, },
None => { None => {
debug!(LOGGER, "Block 1 not found, archive capabilities disabled"); debug!("Block 1 not found, archive capabilities disabled");
capab.remove(Capabilities::FULL_HIST); capab.remove(Capabilities::FULL_HIST);
} }
} }
@ -102,12 +98,7 @@ impl Server {
Ok((stream, peer_addr)) => { Ok((stream, peer_addr)) => {
if !self.check_banned(&stream) { if !self.check_banned(&stream) {
if let Err(e) = self.handle_new_peer(stream) { if let Err(e) = self.handle_new_peer(stream) {
warn!( warn!("Error accepting peer {}: {:?}", peer_addr.to_string(), e);
LOGGER,
"Error accepting peer {}: {:?}",
peer_addr.to_string(),
e
);
} }
} }
} }
@ -115,7 +106,7 @@ impl Server {
// nothing to do, will retry in next iteration // nothing to do, will retry in next iteration
} }
Err(e) => { Err(e) => {
warn!(LOGGER, "Couldn't establish new client connection: {:?}", e); warn!("Couldn't establish new client connection: {:?}", e);
} }
} }
if self.stop.load(Ordering::Relaxed) { if self.stop.load(Ordering::Relaxed) {
@ -130,10 +121,7 @@ impl Server {
/// we're already connected to the provided address. /// we're already connected to the provided address.
pub fn connect(&self, addr: &SocketAddr) -> Result<Arc<Peer>, Error> { pub fn connect(&self, addr: &SocketAddr) -> Result<Arc<Peer>, Error> {
if Peer::is_denied(&self.config, &addr) { if Peer::is_denied(&self.config, &addr) {
debug!( debug!("connect_peer: peer {} denied, not connecting.", addr);
LOGGER,
"connect_peer: peer {} denied, not connecting.", addr
);
return Err(Error::ConnectionClose); return Err(Error::ConnectionClose);
} }
@ -148,12 +136,11 @@ impl Server {
if let Some(p) = self.peers.get_connected_peer(addr) { if let Some(p) = self.peers.get_connected_peer(addr) {
// if we're already connected to the addr, just return the peer // if we're already connected to the addr, just return the peer
trace!(LOGGER, "connect_peer: already connected {}", addr); trace!("connect_peer: already connected {}", addr);
return Ok(p); return Ok(p);
} }
trace!( trace!(
LOGGER,
"connect_peer: on {}:{}. connecting to {}", "connect_peer: on {}:{}. connecting to {}",
self.config.host, self.config.host,
self.config.port, self.config.port,
@ -179,12 +166,8 @@ impl Server {
} }
Err(e) => { Err(e) => {
debug!( debug!(
LOGGER,
"connect_peer: on {}:{}. Could not connect to {}: {:?}", "connect_peer: on {}:{}. Could not connect to {}: {:?}",
self.config.host, self.config.host, self.config.port, addr, e
self.config.port,
addr,
e
); );
Err(Error::Connection(e)) Err(Error::Connection(e))
} }
@ -211,9 +194,9 @@ impl Server {
// peer has been banned, go away! // peer has been banned, go away!
if let Ok(peer_addr) = stream.peer_addr() { if let Ok(peer_addr) = stream.peer_addr() {
if self.peers.is_banned(peer_addr) { if self.peers.is_banned(peer_addr) {
debug!(LOGGER, "Peer {} banned, refusing connection.", peer_addr); debug!("Peer {} banned, refusing connection.", peer_addr);
if let Err(e) = stream.shutdown(Shutdown::Both) { if let Err(e) = stream.shutdown(Shutdown::Both) {
debug!(LOGGER, "Error shutting down conn: {:?}", e); debug!("Error shutting down conn: {:?}", e);
} }
return true; return true;
} }

View file

@ -26,7 +26,6 @@ use core::ser::{self, Readable, Reader, Writeable, Writer};
use grin_store::{self, option_to_not_found, to_key, Error}; use grin_store::{self, option_to_not_found, to_key, Error};
use msg::SockAddr; use msg::SockAddr;
use types::{Capabilities, ReasonForBan}; use types::{Capabilities, ReasonForBan};
use util::LOGGER;
const STORE_SUBPATH: &'static str = "peers"; const STORE_SUBPATH: &'static str = "peers";
@ -111,7 +110,7 @@ impl PeerStore {
} }
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> { pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
debug!(LOGGER, "save_peer: {:?} marked {:?}", p.addr, p.flags); debug!("save_peer: {:?} marked {:?}", p.addr, p.flags);
let batch = self.db.batch()?; let batch = self.db.batch()?;
batch.put_ser(&peer_key(p.addr)[..], p)?; batch.put_ser(&peer_key(p.addr)[..], p)?;

View file

@ -10,7 +10,7 @@ blake2-rfc = "0.2"
rand = "0.5" rand = "0.5"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
chrono = "0.4.4" chrono = "0.4.4"
grin_core = { path = "../core" } grin_core = { path = "../core" }

View file

@ -30,7 +30,7 @@ extern crate serde;
#[macro_use] // Needed for Serialize/Deserialize. The compiler complaining here is a bug. #[macro_use] // Needed for Serialize/Deserialize. The compiler complaining here is a bug.
extern crate serde_derive; extern crate serde_derive;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
mod pool; mod pool;

View file

@ -26,7 +26,6 @@ use core::core::transaction;
use core::core::verifier_cache::VerifierCache; use core::core::verifier_cache::VerifierCache;
use core::core::{Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel}; use core::core::{Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel};
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError}; use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
use util::LOGGER;
// max weight leaving minimum space for a coinbase // max weight leaving minimum space for a coinbase
const MAX_MINEABLE_WEIGHT: usize = const MAX_MINEABLE_WEIGHT: usize =
@ -192,7 +191,6 @@ impl Pool {
header: &BlockHeader, header: &BlockHeader,
) -> Result<(), PoolError> { ) -> Result<(), PoolError> {
debug!( debug!(
LOGGER,
"pool [{}]: add_to_pool: {}, {:?}, inputs: {}, outputs: {}, kernels: {} (at block {})", "pool [{}]: add_to_pool: {}, {:?}, inputs: {}, outputs: {}, kernels: {} (at block {})",
self.name, self.name,
entry.tx.hash(), entry.tx.hash(),

View file

@ -13,8 +13,8 @@ hyper-staticfile = "0.3"
itertools = "0.7" itertools = "0.7"
lmdb-zero = "0.4.4" lmdb-zero = "0.4.4"
rand = "0.5" rand = "0.5"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
serde = "1" serde = "1"
log = "0.4"
serde_derive = "1" serde_derive = "1"
serde_json = "1" serde_json = "1"
chrono = "0.4.4" chrono = "0.4.4"

View file

@ -35,7 +35,7 @@ use p2p;
use pool; use pool;
use rand::prelude::*; use rand::prelude::*;
use store; use store;
use util::{OneTime, LOGGER}; use util::OneTime;
/// Implementation of the NetAdapter for the . Gets notified when new /// Implementation of the NetAdapter for the . Gets notified when new
/// blocks and transactions are received and forwards to the chain and pool /// blocks and transactions are received and forwards to the chain and pool
@ -74,7 +74,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let header = self.chain().head_header().unwrap(); let header = self.chain().head_header().unwrap();
debug!( debug!(
LOGGER,
"Received tx {}, inputs: {}, outputs: {}, kernels: {}, going to process.", "Received tx {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
tx_hash, tx_hash,
tx.inputs().len(), tx.inputs().len(),
@ -88,13 +87,12 @@ impl p2p::ChainAdapter for NetToChainAdapter {
}; };
if let Err(e) = res { if let Err(e) = res {
debug!(LOGGER, "Transaction {} rejected: {:?}", tx_hash, e); debug!("Transaction {} rejected: {:?}", tx_hash, e);
} }
} }
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool { fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
debug!( debug!(
LOGGER,
"Received block {} at {} from {}, inputs: {}, outputs: {}, kernels: {}, going to process.", "Received block {} at {} from {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
b.hash(), b.hash(),
b.header.height, b.header.height,
@ -109,7 +107,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool { fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
let bhash = cb.hash(); let bhash = cb.hash();
debug!( debug!(
LOGGER,
"Received compact_block {} at {} from {}, outputs: {}, kernels: {}, kern_ids: {}, going to process.", "Received compact_block {} at {} from {}, outputs: {}, kernels: {}, kern_ids: {}, going to process.",
bhash, bhash,
cb.header.height, cb.header.height,
@ -125,7 +122,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
match core::Block::hydrate_from(cb, vec![]) { match core::Block::hydrate_from(cb, vec![]) {
Ok(block) => self.process_block(block, addr), Ok(block) => self.process_block(block, addr),
Err(e) => { Err(e) => {
debug!(LOGGER, "Invalid hydrated block {}: {}", cb_hash, e); debug!("Invalid hydrated block {}: {}", cb_hash, e);
return false; return false;
} }
} }
@ -135,7 +132,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.chain() .chain()
.process_block_header(&cb.header, self.chain_opts()) .process_block_header(&cb.header, self.chain_opts())
{ {
debug!(LOGGER, "Invalid compact block header {}: {}", cb_hash, e); debug!("Invalid compact block header {}: {}", cb_hash, e);
return !e.is_bad_data(); return !e.is_bad_data();
} }
@ -145,7 +142,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
}; };
debug!( debug!(
LOGGER,
"adapter: txs from tx pool - {}, (unknown kern_ids: {})", "adapter: txs from tx pool - {}, (unknown kern_ids: {})",
txs.len(), txs.len(),
missing_short_ids.len(), missing_short_ids.len(),
@ -159,7 +155,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let block = match core::Block::hydrate_from(cb.clone(), txs) { let block = match core::Block::hydrate_from(cb.clone(), txs) {
Ok(block) => block, Ok(block) => block,
Err(e) => { Err(e) => {
debug!(LOGGER, "Invalid hydrated block {}: {}", cb.hash(), e); debug!("Invalid hydrated block {}: {}", cb.hash(), e);
return false; return false;
} }
}; };
@ -169,29 +165,22 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.validate(&prev.total_kernel_offset, self.verifier_cache.clone()) .validate(&prev.total_kernel_offset, self.verifier_cache.clone())
.is_ok() .is_ok()
{ {
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!"); debug!("adapter: successfully hydrated block from tx pool!");
self.process_block(block, addr) self.process_block(block, addr)
} else { } else {
if self.sync_state.status() == SyncStatus::NoSync { if self.sync_state.status() == SyncStatus::NoSync {
debug!( debug!("adapter: block invalid after hydration, requesting full block");
LOGGER,
"adapter: block invalid after hydration, requesting full block"
);
self.request_block(&cb.header, &addr); self.request_block(&cb.header, &addr);
true true
} else { } else {
debug!( debug!(
LOGGER,
"adapter: block invalid after hydration, ignoring it, cause still syncing" "adapter: block invalid after hydration, ignoring it, cause still syncing"
); );
true true
} }
} }
} else { } else {
debug!( debug!("adapter: failed to retrieve previous block header (still syncing?)");
LOGGER,
"adapter: failed to retrieve previous block header (still syncing?)"
);
true true
} }
} }
@ -200,8 +189,8 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool { fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
let bhash = bh.hash(); let bhash = bh.hash();
debug!( debug!(
LOGGER, "Received block header {} at {} from {}, going to process.",
"Received block header {} at {} from {}, going to process.", bhash, bh.height, addr, bhash, bh.height, addr,
); );
// pushing the new block header through the header chain pipeline // pushing the new block header through the header chain pipeline
@ -209,16 +198,11 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let res = self.chain().process_block_header(&bh, self.chain_opts()); let res = self.chain().process_block_header(&bh, self.chain_opts());
if let &Err(ref e) = &res { if let &Err(ref e) = &res {
debug!( debug!("Block header {} refused by chain: {:?}", bhash, e.kind());
LOGGER,
"Block header {} refused by chain: {:?}",
bhash,
e.kind()
);
if e.is_bad_data() { if e.is_bad_data() {
debug!( debug!(
LOGGER, "header_received: {} is a bad header, resetting header head",
"header_received: {} is a bad header, resetting header head", bhash bhash
); );
let _ = self.chain().reset_head(); let _ = self.chain().reset_head();
return false; return false;
@ -239,7 +223,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) -> bool { fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) -> bool {
info!( info!(
LOGGER,
"Received block headers {:?} from {}", "Received block headers {:?} from {}",
bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(), bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(),
addr, addr,
@ -252,7 +235,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
// try to add headers to our header chain // try to add headers to our header chain
let res = self.chain().sync_block_headers(&bhs, self.chain_opts()); let res = self.chain().sync_block_headers(&bhs, self.chain_opts());
if let &Err(ref e) = &res { if let &Err(ref e) = &res {
debug!(LOGGER, "Block headers refused by chain: {:?}", e); debug!("Block headers refused by chain: {:?}", e);
if e.is_bad_data() { if e.is_bad_data() {
return false; return false;
@ -262,14 +245,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
} }
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> { fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
debug!(LOGGER, "locate_headers: {:?}", locator,); debug!("locate_headers: {:?}", locator,);
let header = match self.find_common_header(locator) { let header = match self.find_common_header(locator) {
Some(header) => header, Some(header) => header,
None => return vec![], None => return vec![],
}; };
debug!(LOGGER, "locate_headers: common header: {:?}", header.hash(),); debug!("locate_headers: common header: {:?}", header.hash(),);
// looks like we know one, getting as many following headers as allowed // looks like we know one, getting as many following headers as allowed
let hh = header.height; let hh = header.height;
@ -281,18 +264,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
Err(e) => match e.kind() { Err(e) => match e.kind() {
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break, chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break,
_ => { _ => {
error!(LOGGER, "Could not build header locator: {:?}", e); error!("Could not build header locator: {:?}", e);
return vec![]; return vec![];
} }
}, },
} }
} }
debug!( debug!("locate_headers: returning headers: {}", headers.len(),);
LOGGER,
"locate_headers: returning headers: {}",
headers.len(),
);
headers headers
} }
@ -317,10 +296,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
reader: read, reader: read,
}), }),
Err(e) => { Err(e) => {
warn!( warn!("Couldn't produce txhashset data for block {}: {:?}", h, e);
LOGGER,
"Couldn't produce txhashset data for block {}: {:?}", h, e
);
None None
} }
} }
@ -367,12 +343,12 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.chain() .chain()
.txhashset_write(h, txhashset_data, self.sync_state.as_ref()) .txhashset_write(h, txhashset_data, self.sync_state.as_ref())
{ {
error!(LOGGER, "Failed to save txhashset archive: {}", e); error!("Failed to save txhashset archive: {}", e);
let is_good_data = !e.is_bad_data(); let is_good_data = !e.is_bad_data();
self.sync_state.set_sync_error(types::Error::Chain(e)); self.sync_state.set_sync_error(types::Error::Chain(e));
is_good_data is_good_data
} else { } else {
info!(LOGGER, "Received valid txhashset data for {}.", h); info!("Received valid txhashset data for {}.", h);
true true
} }
} }
@ -447,7 +423,7 @@ impl NetToChainAdapter {
self.find_common_header(locator[1..].to_vec()) self.find_common_header(locator[1..].to_vec())
} }
_ => { _ => {
error!(LOGGER, "Could not build header locator: {:?}", e); error!("Could not build header locator: {:?}", e);
None None
} }
}, },
@ -479,8 +455,8 @@ impl NetToChainAdapter {
} }
Err(ref e) if e.is_bad_data() => { Err(ref e) if e.is_bad_data() => {
debug!( debug!(
LOGGER, "adapter: process_block: {} is a bad block, resetting head",
"adapter: process_block: {} is a bad block, resetting head", bhash bhash
); );
let _ = self.chain().reset_head(); let _ = self.chain().reset_head();
@ -495,14 +471,13 @@ impl NetToChainAdapter {
chain::ErrorKind::Orphan => { chain::ErrorKind::Orphan => {
// make sure we did not miss the parent block // make sure we did not miss the parent block
if !self.chain().is_orphan(&prev_hash) && !self.sync_state.is_syncing() { if !self.chain().is_orphan(&prev_hash) && !self.sync_state.is_syncing() {
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash); debug!("adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
self.request_block_by_hash(prev_hash, &addr) self.request_block_by_hash(prev_hash, &addr)
} }
true true
} }
_ => { _ => {
debug!( debug!(
LOGGER,
"adapter: process_block: block {} refused by chain: {}", "adapter: process_block: block {} refused by chain: {}",
bhash, bhash,
e.kind() e.kind()
@ -527,8 +502,8 @@ impl NetToChainAdapter {
let now = Instant::now(); let now = Instant::now();
debug!( debug!(
LOGGER, "adapter: process_block: ***** validating full chain state at {}",
"adapter: process_block: ***** validating full chain state at {}", bhash, bhash,
); );
self.chain() self.chain()
@ -536,7 +511,6 @@ impl NetToChainAdapter {
.expect("chain validation failed, hard stop"); .expect("chain validation failed, hard stop");
debug!( debug!(
LOGGER,
"adapter: process_block: ***** done validating full chain state, took {}s", "adapter: process_block: ***** done validating full chain state, took {}s",
now.elapsed().as_secs(), now.elapsed().as_secs(),
); );
@ -558,7 +532,7 @@ impl NetToChainAdapter {
.name("compactor".to_string()) .name("compactor".to_string())
.spawn(move || { .spawn(move || {
if let Err(e) = chain.compact() { if let Err(e) = chain.compact() {
error!(LOGGER, "Could not compact chain: {:?}", e); error!("Could not compact chain: {:?}", e);
} }
}); });
} }
@ -592,23 +566,19 @@ impl NetToChainAdapter {
match self.chain().block_exists(h) { match self.chain().block_exists(h) {
Ok(false) => match self.peers().get_connected_peer(addr) { Ok(false) => match self.peers().get_connected_peer(addr) {
None => debug!( None => debug!(
LOGGER,
"send_block_request_to_peer: can't send request to peer {:?}, not connected", "send_block_request_to_peer: can't send request to peer {:?}, not connected",
addr addr
), ),
Some(peer) => { Some(peer) => {
if let Err(e) = f(&peer, h) { if let Err(e) = f(&peer, h) {
error!(LOGGER, "send_block_request_to_peer: failed: {:?}", e) error!("send_block_request_to_peer: failed: {:?}", e)
} }
} }
}, },
Ok(true) => debug!( Ok(true) => debug!("send_block_request_to_peer: block {} already known", h),
LOGGER,
"send_block_request_to_peer: block {} already known", h
),
Err(e) => error!( Err(e) => error!(
LOGGER, "send_block_request_to_peer: failed to check block exists: {:?}",
"send_block_request_to_peer: failed to check block exists: {:?}", e e
), ),
} }
} }
@ -639,11 +609,10 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
return; return;
} }
debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash()); debug!("adapter: block_accepted: {:?}", b.hash());
if let Err(e) = self.tx_pool.write().reconcile_block(b) { if let Err(e) = self.tx_pool.write().reconcile_block(b) {
error!( error!(
LOGGER,
"Pool could not update itself at block {}: {:?}", "Pool could not update itself at block {}: {:?}",
b.hash(), b.hash(),
e, e,

View file

@ -25,7 +25,6 @@ use core::{core, pow};
use p2p; use p2p;
use pool; use pool;
use store; use store;
use util::LOGGER;
use wallet; use wallet;
/// Error type wrapping underlying module errors. /// Error type wrapping underlying module errors.
@ -314,10 +313,7 @@ impl SyncState {
let mut status = self.current.write(); let mut status = self.current.write();
debug!( debug!("sync_state: sync_status: {:?} -> {:?}", *status, new_status,);
LOGGER,
"sync_state: sync_status: {:?} -> {:?}", *status, new_status,
);
*status = new_status; *status = new_status;
} }

View file

@ -24,7 +24,6 @@ use core::core::hash::Hashed;
use core::core::transaction; use core::core::transaction;
use core::core::verifier_cache::VerifierCache; use core::core::verifier_cache::VerifierCache;
use pool::{DandelionConfig, PoolEntryState, PoolError, TransactionPool, TxSource}; use pool::{DandelionConfig, PoolEntryState, PoolError, TransactionPool, TxSource};
use util::LOGGER;
/// A process to monitor transactions in the stempool. /// A process to monitor transactions in the stempool.
/// With Dandelion, transaction can be broadcasted in stem or fluff phase. /// With Dandelion, transaction can be broadcasted in stem or fluff phase.
@ -40,7 +39,7 @@ pub fn monitor_transactions(
verifier_cache: Arc<RwLock<VerifierCache>>, verifier_cache: Arc<RwLock<VerifierCache>>,
stop: Arc<AtomicBool>, stop: Arc<AtomicBool>,
) { ) {
debug!(LOGGER, "Started Dandelion transaction monitor."); debug!("Started Dandelion transaction monitor.");
let _ = thread::Builder::new() let _ = thread::Builder::new()
.name("dandelion".to_string()) .name("dandelion".to_string())
@ -58,26 +57,26 @@ pub fn monitor_transactions(
// Aggregate them up to give a single (valid) aggregated tx and propagate it // Aggregate them up to give a single (valid) aggregated tx and propagate it
// to the next Dandelion relay along the stem. // to the next Dandelion relay along the stem.
if process_stem_phase(tx_pool.clone(), verifier_cache.clone()).is_err() { if process_stem_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem with stem phase."); error!("dand_mon: Problem with stem phase.");
} }
// Step 2: find all "ToFluff" entries in stempool from last run. // Step 2: find all "ToFluff" entries in stempool from last run.
// Aggregate them up to give a single (valid) aggregated tx and (re)add it // Aggregate them up to give a single (valid) aggregated tx and (re)add it
// to our pool with stem=false (which will then broadcast it). // to our pool with stem=false (which will then broadcast it).
if process_fluff_phase(tx_pool.clone(), verifier_cache.clone()).is_err() { if process_fluff_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem with fluff phase."); error!("dand_mon: Problem with fluff phase.");
} }
// Step 3: now find all "Fresh" entries in stempool since last run. // Step 3: now find all "Fresh" entries in stempool since last run.
// Coin flip for each (90/10) and label them as either "ToStem" or "ToFluff". // Coin flip for each (90/10) and label them as either "ToStem" or "ToFluff".
// We will process these in the next run (waiting patience secs). // We will process these in the next run (waiting patience secs).
if process_fresh_entries(dandelion_config.clone(), tx_pool.clone()).is_err() { if process_fresh_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem processing fresh pool entries."); error!("dand_mon: Problem processing fresh pool entries.");
} }
// Step 4: now find all expired entries based on embargo timer. // Step 4: now find all expired entries based on embargo timer.
if process_expired_entries(dandelion_config.clone(), tx_pool.clone()).is_err() { if process_expired_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem processing fresh pool entries."); error!("dand_mon: Problem processing fresh pool entries.");
} }
} }
}); });
@ -103,21 +102,14 @@ fn process_stem_phase(
.transition_to_state(&stem_txs, PoolEntryState::Stemmed); .transition_to_state(&stem_txs, PoolEntryState::Stemmed);
if stem_txs.len() > 0 { if stem_txs.len() > 0 {
debug!( debug!("dand_mon: Found {} txs for stemming.", stem_txs.len());
LOGGER,
"dand_mon: Found {} txs for stemming.",
stem_txs.len()
);
let agg_tx = transaction::aggregate(stem_txs)?; let agg_tx = transaction::aggregate(stem_txs)?;
agg_tx.validate(verifier_cache.clone())?; agg_tx.validate(verifier_cache.clone())?;
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx); let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
if res.is_err() { if res.is_err() {
debug!( debug!("dand_mon: Unable to propagate stem tx. No relay, fluffing instead.");
LOGGER,
"dand_mon: Unable to propagate stem tx. No relay, fluffing instead."
);
let src = TxSource { let src = TxSource {
debug_name: "no_relay".to_string(), debug_name: "no_relay".to_string(),
@ -150,11 +142,7 @@ fn process_fluff_phase(
.transition_to_state(&stem_txs, PoolEntryState::Fluffed); .transition_to_state(&stem_txs, PoolEntryState::Fluffed);
if stem_txs.len() > 0 { if stem_txs.len() > 0 {
debug!( debug!("dand_mon: Found {} txs for fluffing.", stem_txs.len());
LOGGER,
"dand_mon: Found {} txs for fluffing.",
stem_txs.len()
);
let agg_tx = transaction::aggregate(stem_txs)?; let agg_tx = transaction::aggregate(stem_txs)?;
agg_tx.validate(verifier_cache.clone())?; agg_tx.validate(verifier_cache.clone())?;
@ -186,7 +174,6 @@ fn process_fresh_entries(
if fresh_entries.len() > 0 { if fresh_entries.len() > 0 {
debug!( debug!(
LOGGER,
"dand_mon: Found {} fresh entries in stempool.", "dand_mon: Found {} fresh entries in stempool.",
fresh_entries.len() fresh_entries.len()
); );
@ -220,21 +207,13 @@ fn process_expired_entries(
.iter() .iter()
.filter(|x| x.tx_at.timestamp() < cutoff) .filter(|x| x.tx_at.timestamp() < cutoff)
{ {
debug!( debug!("dand_mon: Embargo timer expired for {:?}", entry.tx.hash());
LOGGER,
"dand_mon: Embargo timer expired for {:?}",
entry.tx.hash()
);
expired_entries.push(entry.clone()); expired_entries.push(entry.clone());
} }
} }
if expired_entries.len() > 0 { if expired_entries.len() > 0 {
debug!( debug!("dand_mon: Found {} expired txs.", expired_entries.len());
LOGGER,
"dand_mon: Found {} expired txs.",
expired_entries.len()
);
{ {
let mut tx_pool = tx_pool.write(); let mut tx_pool = tx_pool.write();
@ -246,11 +225,8 @@ fn process_expired_entries(
identifier: "?.?.?.?".to_string(), identifier: "?.?.?.?".to_string(),
}; };
match tx_pool.add_to_pool(src, entry.tx, false, &header) { match tx_pool.add_to_pool(src, entry.tx, false, &header) {
Ok(_) => debug!( Ok(_) => debug!("dand_mon: embargo expired, fluffed tx successfully."),
LOGGER, Err(e) => debug!("dand_mon: Failed to fluff expired tx - {:?}", e),
"dand_mon: embargo expired, fluffed tx successfully."
),
Err(e) => debug!(LOGGER, "dand_mon: Failed to fluff expired tx - {:?}", e),
}; };
} }
} }

View file

@ -27,7 +27,6 @@ use std::{cmp, io, str, thread, time};
use p2p; use p2p;
use p2p::ChainAdapter; use p2p::ChainAdapter;
use pool::DandelionConfig; use pool::DandelionConfig;
use util::LOGGER;
// DNS Seeds with contact email associated // DNS Seeds with contact email associated
const DNS_SEEDS: &'static [&'static str] = &[ const DNS_SEEDS: &'static [&'static str] = &[
@ -119,8 +118,8 @@ fn monitor_peers(
if interval >= config.ban_window() { if interval >= config.ban_window() {
peers.unban_peer(&x.addr); peers.unban_peer(&x.addr);
debug!( debug!(
LOGGER, "monitor_peers: unbanned {} after {} seconds",
"monitor_peers: unbanned {} after {} seconds", x.addr, interval x.addr, interval
); );
} else { } else {
banned_count += 1; banned_count += 1;
@ -132,7 +131,6 @@ fn monitor_peers(
} }
debug!( debug!(
LOGGER,
"monitor_peers: on {}:{}, {} connected ({} most_work). \ "monitor_peers: on {}:{}, {} connected ({} most_work). \
all {} = {} healthy + {} banned + {} defunct", all {} = {} healthy + {} banned + {} defunct",
config.host, config.host,
@ -158,8 +156,8 @@ fn monitor_peers(
let mut connected_peers: Vec<SocketAddr> = vec![]; let mut connected_peers: Vec<SocketAddr> = vec![];
for p in peers.connected_peers() { for p in peers.connected_peers() {
debug!( debug!(
LOGGER, "monitor_peers: {}:{} ask {} for more peers",
"monitor_peers: {}:{} ask {} for more peers", config.host, config.port, p.info.addr, config.host, config.port, p.info.addr,
); );
let _ = p.send_peer_request(capabilities); let _ = p.send_peer_request(capabilities);
connected_peers.push(p.info.addr) connected_peers.push(p.info.addr)
@ -178,7 +176,7 @@ fn monitor_peers(
} }
} }
} }
None => debug!(LOGGER, "monitor_peers: no preferred peers"), None => debug!("monitor_peers: no preferred peers"),
} }
// take a random defunct peer and mark it healthy: over a long period any // take a random defunct peer and mark it healthy: over a long period any
@ -197,8 +195,8 @@ fn monitor_peers(
); );
for p in new_peers.iter().filter(|p| !peers.is_known(&p.addr)) { for p in new_peers.iter().filter(|p| !peers.is_known(&p.addr)) {
debug!( debug!(
LOGGER, "monitor_peers: on {}:{}, queue to soon try {}",
"monitor_peers: on {}:{}, queue to soon try {}", config.host, config.port, p.addr, config.host, config.port, p.addr,
); );
tx.send(p.addr).unwrap(); tx.send(p.addr).unwrap();
} }
@ -208,13 +206,13 @@ fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionCon
// Dandelion Relay Updater // Dandelion Relay Updater
let dandelion_relay = peers.get_dandelion_relay(); let dandelion_relay = peers.get_dandelion_relay();
if dandelion_relay.is_empty() { if dandelion_relay.is_empty() {
debug!(LOGGER, "monitor_peers: no dandelion relay updating"); debug!("monitor_peers: no dandelion relay updating");
peers.update_dandelion_relay(); peers.update_dandelion_relay();
} else { } else {
for last_added in dandelion_relay.keys() { for last_added in dandelion_relay.keys() {
let dandelion_interval = Utc::now().timestamp() - last_added; let dandelion_interval = Utc::now().timestamp() - last_added;
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 { if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
debug!(LOGGER, "monitor_peers: updating expired dandelion relay"); debug!("monitor_peers: updating expired dandelion relay");
peers.update_dandelion_relay(); peers.update_dandelion_relay();
} }
} }
@ -242,11 +240,11 @@ fn connect_to_seeds_and_preferred_peers(
// If we have preferred peers add them to the connection // If we have preferred peers add them to the connection
match peers_preferred_list { match peers_preferred_list {
Some(mut peers_preferred) => peer_addrs.append(&mut peers_preferred), Some(mut peers_preferred) => peer_addrs.append(&mut peers_preferred),
None => debug!(LOGGER, "No preferred peers"), None => debug!("No preferred peers"),
}; };
if peer_addrs.len() == 0 { if peer_addrs.len() == 0 {
warn!(LOGGER, "No seeds were retrieved."); warn!("No seeds were retrieved.");
} }
// connect to this first set of addresses // connect to this first set of addresses
@ -311,7 +309,7 @@ pub fn dns_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
let mut addresses: Vec<SocketAddr> = vec![]; let mut addresses: Vec<SocketAddr> = vec![];
for dns_seed in DNS_SEEDS { for dns_seed in DNS_SEEDS {
let temp_addresses = addresses.clone(); let temp_addresses = addresses.clone();
debug!(LOGGER, "Retrieving seed nodes from dns {}", dns_seed); debug!("Retrieving seed nodes from dns {}", dns_seed);
match (dns_seed.to_owned(), 0).to_socket_addrs() { match (dns_seed.to_owned(), 0).to_socket_addrs() {
Ok(addrs) => addresses.append( Ok(addrs) => addresses.append(
&mut (addrs &mut (addrs
@ -321,13 +319,10 @@ pub fn dns_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
}).filter(|addr| !temp_addresses.contains(addr)) }).filter(|addr| !temp_addresses.contains(addr))
.collect()), .collect()),
), ),
Err(e) => debug!( Err(e) => debug!("Failed to resolve seed {:?} got error {:?}", dns_seed, e),
LOGGER,
"Failed to resolve seed {:?} got error {:?}", dns_seed, e
),
} }
} }
debug!(LOGGER, "Retrieved seed addresses: {:?}", addresses); debug!("Retrieved seed addresses: {:?}", addresses);
addresses addresses
}) })
} }

View file

@ -39,7 +39,6 @@ use p2p;
use pool; use pool;
use store; use store;
use util::file::get_first_line; use util::file::get_first_line;
use util::LOGGER;
/// Grin server holding internal structures. /// Grin server holding internal structures.
pub struct Server { pub struct Server {
@ -156,7 +155,7 @@ impl Server {
global::ChainTypes::Mainnet => genesis::genesis_testnet2(), //TODO: Fix, obviously global::ChainTypes::Mainnet => genesis::genesis_testnet2(), //TODO: Fix, obviously
}; };
info!(LOGGER, "Starting server, genesis block: {}", genesis.hash()); info!("Starting server, genesis block: {}", genesis.hash());
let db_env = Arc::new(store::new_env(config.db_root.clone())); let db_env = Arc::new(store::new_env(config.db_root.clone()));
let shared_chain = Arc::new(chain::Chain::init( let shared_chain = Arc::new(chain::Chain::init(
@ -205,10 +204,7 @@ impl Server {
if config.p2p_config.seeding_type.clone() != p2p::Seeding::Programmatic { if config.p2p_config.seeding_type.clone() != p2p::Seeding::Programmatic {
let seeder = match config.p2p_config.seeding_type.clone() { let seeder = match config.p2p_config.seeding_type.clone() {
p2p::Seeding::None => { p2p::Seeding::None => {
warn!( warn!("No seed configured, will stay solo until connected to");
LOGGER,
"No seed configured, will stay solo until connected to"
);
seed::predefined_seeds(vec![]) seed::predefined_seeds(vec![])
} }
p2p::Seeding::List => { p2p::Seeding::List => {
@ -255,7 +251,7 @@ impl Server {
.name("p2p-server".to_string()) .name("p2p-server".to_string())
.spawn(move || p2p_inner.listen()); .spawn(move || p2p_inner.listen());
info!(LOGGER, "Starting rest apis at: {}", &config.api_http_addr); info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone()); let api_secret = get_first_line(config.api_secret_path.clone());
api::start_rest_apis( api::start_rest_apis(
config.api_http_addr.clone(), config.api_http_addr.clone(),
@ -266,10 +262,7 @@ impl Server {
None, None,
); );
info!( info!("Starting dandelion monitor: {}", &config.api_http_addr);
LOGGER,
"Starting dandelion monitor: {}", &config.api_http_addr
);
dandelion_monitor::monitor_transactions( dandelion_monitor::monitor_transactions(
config.dandelion_config.clone(), config.dandelion_config.clone(),
tx_pool.clone(), tx_pool.clone(),
@ -277,7 +270,7 @@ impl Server {
stop.clone(), stop.clone(),
); );
warn!(LOGGER, "Grin server started."); warn!("Grin server started.");
Ok(Server { Ok(Server {
config, config,
p2p: p2p_server, p2p: p2p_server,
@ -336,7 +329,7 @@ impl Server {
/// internal miner, and should only be used for automated testing. Burns /// internal miner, and should only be used for automated testing. Burns
/// reward if wallet_listener_url is 'None' /// reward if wallet_listener_url is 'None'
pub fn start_test_miner(&self, wallet_listener_url: Option<String>, stop: Arc<AtomicBool>) { pub fn start_test_miner(&self, wallet_listener_url: Option<String>, stop: Arc<AtomicBool>) {
info!(LOGGER, "start_test_miner - start",); info!("start_test_miner - start",);
let sync_state = self.sync_state.clone(); let sync_state = self.sync_state.clone();
let config_wallet_url = match wallet_listener_url.clone() { let config_wallet_url = match wallet_listener_url.clone() {
Some(u) => u, Some(u) => u,
@ -467,6 +460,6 @@ impl Server {
/// Stops the test miner without stopping the p2p layer /// Stops the test miner without stopping the p2p layer
pub fn stop_test_miner(&self, stop: Arc<AtomicBool>) { pub fn stop_test_miner(&self, stop: Arc<AtomicBool>) {
stop.store(true, Ordering::Relaxed); stop.store(true, Ordering::Relaxed);
info!(LOGGER, "stop_test_miner - stop",); info!("stop_test_miner - stop",);
} }
} }

View file

@ -22,7 +22,6 @@ use common::types::{SyncState, SyncStatus};
use core::core::hash::{Hash, Hashed, ZERO_HASH}; use core::core::hash::{Hash, Hashed, ZERO_HASH};
use core::global; use core::global;
use p2p; use p2p;
use util::LOGGER;
pub struct BodySync { pub struct BodySync {
chain: Arc<chain::Chain>, chain: Arc<chain::Chain>,
@ -94,7 +93,6 @@ impl BodySync {
self.reset(); self.reset();
debug!( debug!(
LOGGER,
"body_sync: body_head - {}, {}, header_head - {}, {}, sync_head - {}, {}", "body_sync: body_head - {}, {}, header_head - {}, {}, sync_head - {}, {}",
body_head.last_block_h, body_head.last_block_h,
body_head.height, body_head.height,
@ -148,7 +146,6 @@ impl BodySync {
if hashes_to_get.len() > 0 { if hashes_to_get.len() > 0 {
debug!( debug!(
LOGGER,
"block_sync: {}/{} requesting blocks {:?} from {} peers", "block_sync: {}/{} requesting blocks {:?} from {} peers",
body_head.height, body_head.height,
header_head.height, header_head.height,
@ -161,7 +158,7 @@ impl BodySync {
for hash in hashes_to_get.clone() { for hash in hashes_to_get.clone() {
if let Some(peer) = peers_iter.next() { if let Some(peer) = peers_iter.next() {
if let Err(e) = peer.send_block_request(*hash) { if let Err(e) = peer.send_block_request(*hash) {
debug!(LOGGER, "Skipped request to {}: {:?}", peer.info.addr, e); debug!("Skipped request to {}: {:?}", peer.info.addr, e);
} else { } else {
self.body_sync_hashes.push(hash.clone()); self.body_sync_hashes.push(hash.clone());
} }
@ -199,7 +196,6 @@ impl BodySync {
.filter(|x| !self.chain.get_block(*x).is_ok() && !self.chain.is_orphan(*x)) .filter(|x| !self.chain.get_block(*x).is_ok() && !self.chain.is_orphan(*x))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
debug!( debug!(
LOGGER,
"body_sync: {}/{} blocks received, and no more in 200ms", "body_sync: {}/{} blocks received, and no more in 200ms",
self.body_sync_hashes.len() - hashes_not_get.len(), self.body_sync_hashes.len() - hashes_not_get.len(),
self.body_sync_hashes.len(), self.body_sync_hashes.len(),
@ -210,7 +206,6 @@ impl BodySync {
None => { None => {
if Utc::now() - self.sync_start_ts > Duration::seconds(5) { if Utc::now() - self.sync_start_ts > Duration::seconds(5) {
debug!( debug!(
LOGGER,
"body_sync: 0/{} blocks received in 5s", "body_sync: 0/{} blocks received in 5s",
self.body_sync_hashes.len(), self.body_sync_hashes.len(),
); );

View file

@ -20,7 +20,6 @@ use chain;
use common::types::{Error, SyncState, SyncStatus}; use common::types::{Error, SyncState, SyncStatus};
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use p2p::{self, Peer}; use p2p::{self, Peer};
use util::LOGGER;
pub struct HeaderSync { pub struct HeaderSync {
sync_state: Arc<SyncState>, sync_state: Arc<SyncState>,
@ -60,7 +59,6 @@ impl HeaderSync {
// but ONLY on initial transition to HeaderSync state. // but ONLY on initial transition to HeaderSync state.
let sync_head = self.chain.get_sync_head().unwrap(); let sync_head = self.chain.get_sync_head().unwrap();
debug!( debug!(
LOGGER,
"sync: initial transition to HeaderSync. sync_head: {} at {}, reset to: {} at {}", "sync: initial transition to HeaderSync. sync_head: {} at {}, reset to: {} at {}",
sync_head.hash(), sync_head.hash(),
sync_head.height, sync_head.height,
@ -141,8 +139,8 @@ impl HeaderSync {
fn request_headers(&mut self, peer: &Peer) { fn request_headers(&mut self, peer: &Peer) {
if let Ok(locator) = self.get_locator() { if let Ok(locator) = self.get_locator() {
debug!( debug!(
LOGGER, "sync: request_headers: asking {} for headers, {:?}",
"sync: request_headers: asking {} for headers, {:?}", peer.info.addr, locator, peer.info.addr, locator,
); );
let _ = peer.send_header_request(locator); let _ = peer.send_header_request(locator);
@ -165,7 +163,7 @@ impl HeaderSync {
self.history_locators.clear(); self.history_locators.clear();
} }
debug!(LOGGER, "sync: locator heights : {:?}", heights); debug!("sync: locator heights : {:?}", heights);
let mut locator: Vec<Hash> = vec![]; let mut locator: Vec<Hash> = vec![];
let mut current = self.chain.get_block_header(&tip.last_block_h); let mut current = self.chain.get_block_header(&tip.last_block_h);
@ -237,7 +235,7 @@ impl HeaderSync {
} }
} }
debug!(LOGGER, "sync: locator heights': {:?}", new_heights); debug!("sync: locator heights': {:?}", new_heights);
// shrink history_locators properly // shrink history_locators properly
if heights.len() > 1 { if heights.len() > 1 {
@ -258,14 +256,13 @@ impl HeaderSync {
} }
} }
debug!( debug!(
LOGGER,
"sync: history locators: len={}, shrunk={}", "sync: history locators: len={}, shrunk={}",
self.history_locators.len(), self.history_locators.len(),
shrunk_size shrunk_size
); );
} }
debug!(LOGGER, "sync: locator: {:?}", locator); debug!("sync: locator: {:?}", locator);
Ok(locator) Ok(locator)
} }

View file

@ -21,7 +21,6 @@ use common::types::{Error, SyncState, SyncStatus};
use core::core::hash::Hashed; use core::core::hash::Hashed;
use core::global; use core::global;
use p2p::{self, Peer}; use p2p::{self, Peer};
use util::LOGGER;
/// Fast sync has 3 "states": /// Fast sync has 3 "states":
/// * syncing headers /// * syncing headers
@ -77,10 +76,7 @@ impl StateSync {
{ {
let clone = self.sync_state.sync_error(); let clone = self.sync_state.sync_error();
if let Some(ref sync_error) = *clone.read() { if let Some(ref sync_error) = *clone.read() {
error!( error!("fast_sync: error = {:?}. restart fast sync", sync_error);
LOGGER,
"fast_sync: error = {:?}. restart fast sync", sync_error
);
sync_need_restart = true; sync_need_restart = true;
} }
drop(clone); drop(clone);
@ -92,8 +88,8 @@ impl StateSync {
if !peer.is_connected() { if !peer.is_connected() {
sync_need_restart = true; sync_need_restart = true;
info!( info!(
LOGGER, "fast_sync: peer connection lost: {:?}. restart",
"fast_sync: peer connection lost: {:?}. restart", peer.info.addr, peer.info.addr,
); );
} }
} }
@ -110,10 +106,7 @@ impl StateSync {
if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() { if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() {
if download_timeout { if download_timeout {
error!( error!("fast_sync: TxHashsetDownload status timeout in 10 minutes!");
LOGGER,
"fast_sync: TxHashsetDownload status timeout in 10 minutes!"
);
self.sync_state self.sync_state
.set_sync_error(Error::P2P(p2p::Error::Timeout)); .set_sync_error(Error::P2P(p2p::Error::Timeout));
} }
@ -168,7 +161,6 @@ impl StateSync {
} }
let bhash = txhashset_head.hash(); let bhash = txhashset_head.hash();
debug!( debug!(
LOGGER,
"fast_sync: before txhashset request, header head: {} / {}, txhashset_head: {} / {}", "fast_sync: before txhashset request, header head: {} / {}, txhashset_head: {} / {}",
header_head.height, header_head.height,
header_head.last_block_h, header_head.last_block_h,
@ -176,7 +168,7 @@ impl StateSync {
bhash bhash
); );
if let Err(e) = peer.send_txhashset_request(txhashset_head.height, bhash) { if let Err(e) = peer.send_txhashset_request(txhashset_head.height, bhash) {
error!(LOGGER, "fast_sync: send_txhashset_request err! {:?}", e); error!("fast_sync: send_txhashset_request err! {:?}", e);
return Err(e); return Err(e);
} }
return Ok(peer.clone()); return Ok(peer.clone());

View file

@ -24,7 +24,6 @@ use grin::sync::body_sync::BodySync;
use grin::sync::header_sync::HeaderSync; use grin::sync::header_sync::HeaderSync;
use grin::sync::state_sync::StateSync; use grin::sync::state_sync::StateSync;
use p2p::{self, Peers}; use p2p::{self, Peers};
use util::LOGGER;
pub fn run_sync( pub fn run_sync(
sync_state: Arc<SyncState>, sync_state: Arc<SyncState>,
@ -164,7 +163,6 @@ fn needs_syncing(
if peer.info.total_difficulty() <= local_diff { if peer.info.total_difficulty() <= local_diff {
let ch = chain.head().unwrap(); let ch = chain.head().unwrap();
info!( info!(
LOGGER,
"synchronized at {} @ {} [{}]", "synchronized at {} @ {} [{}]",
local_diff.to_num(), local_diff.to_num(),
ch.height, ch.height,
@ -175,7 +173,7 @@ fn needs_syncing(
return (false, most_work_height); return (false, most_work_height);
} }
} else { } else {
warn!(LOGGER, "sync: no peers available, disabling sync"); warn!("sync: no peers available, disabling sync");
return (false, 0); return (false, 0);
} }
} else { } else {
@ -192,7 +190,6 @@ fn needs_syncing(
let peer_diff = peer.info.total_difficulty(); let peer_diff = peer.info.total_difficulty();
if peer_diff > local_diff.clone() + threshold.clone() { if peer_diff > local_diff.clone() + threshold.clone() {
info!( info!(
LOGGER,
"sync: total_difficulty {}, peer_difficulty {}, threshold {} (last 5 blocks), enabling sync", "sync: total_difficulty {}, peer_difficulty {}, threshold {} (last 5 blocks), enabling sync",
local_diff, local_diff,
peer_diff, peer_diff,

View file

@ -35,7 +35,7 @@ extern crate serde;
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
extern crate grin_api as api; extern crate grin_api as api;

View file

@ -28,7 +28,7 @@ use core::core::verifier_cache::VerifierCache;
use core::{consensus, core, ser}; use core::{consensus, core, ser};
use keychain::{ExtKeychain, Identifier, Keychain}; use keychain::{ExtKeychain, Identifier, Keychain};
use pool; use pool;
use util::{self, LOGGER}; use util;
use wallet::{self, BlockFees}; use wallet::{self, BlockFees};
// Ensure a block suitable for mining is built and returned // Ensure a block suitable for mining is built and returned
@ -55,24 +55,22 @@ pub fn get_block(
self::Error::Chain(c) => match c.kind() { self::Error::Chain(c) => match c.kind() {
chain::ErrorKind::DuplicateCommitment(_) => { chain::ErrorKind::DuplicateCommitment(_) => {
debug!( debug!(
LOGGER,
"Duplicate commit for potential coinbase detected. Trying next derivation." "Duplicate commit for potential coinbase detected. Trying next derivation."
); );
} }
_ => { _ => {
error!(LOGGER, "Chain Error: {}", c); error!("Chain Error: {}", c);
} }
}, },
self::Error::Wallet(_) => { self::Error::Wallet(_) => {
error!( error!(
LOGGER,
"Error building new block: Can't connect to wallet listener at {:?}; will retry", "Error building new block: Can't connect to wallet listener at {:?}; will retry",
wallet_listener_url.as_ref().unwrap() wallet_listener_url.as_ref().unwrap()
); );
thread::sleep(Duration::from_secs(wallet_retry_interval)); thread::sleep(Duration::from_secs(wallet_retry_interval));
} }
ae => { ae => {
warn!(LOGGER, "Error building new block: {:?}. Retrying.", ae); warn!("Error building new block: {:?}. Retrying.", ae);
} }
} }
thread::sleep(Duration::from_millis(100)); thread::sleep(Duration::from_millis(100));
@ -134,7 +132,6 @@ fn build_block(
let b_difficulty = (b.header.total_difficulty() - head.total_difficulty()).to_num(); let b_difficulty = (b.header.total_difficulty() - head.total_difficulty()).to_num();
debug!( debug!(
LOGGER,
"Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}", "Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
b.inputs().len(), b.inputs().len(),
b.outputs().len(), b.outputs().len(),
@ -159,10 +156,7 @@ fn build_block(
//Some other issue, possibly duplicate kernel //Some other issue, possibly duplicate kernel
_ => { _ => {
error!( error!("Error setting txhashset root to build a block: {:?}", e);
LOGGER,
"Error setting txhashset root to build a block: {:?}", e
);
Err(Error::Chain( Err(Error::Chain(
chain::ErrorKind::Other(format!("{:?}", e)).into(), chain::ErrorKind::Other(format!("{:?}", e)).into(),
)) ))
@ -176,7 +170,7 @@ fn build_block(
/// Probably only want to do this when testing. /// Probably only want to do this when testing.
/// ///
fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, BlockFees), Error> { fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
warn!(LOGGER, "Burning block fees: {:?}", block_fees); warn!("Burning block fees: {:?}", block_fees);
let keychain = ExtKeychain::from_random_seed().unwrap(); let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0); let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let (out, kernel) = let (out, kernel) =
@ -209,7 +203,7 @@ fn get_coinbase(
..block_fees ..block_fees
}; };
debug!(LOGGER, "get_coinbase: {:?}", block_fees); debug!("get_coinbase: {:?}", block_fees);
return Ok((output, kernel, block_fees)); return Ok((output, kernel, block_fees));
} }
} }

View file

@ -35,7 +35,7 @@ use core::{pow, ser};
use keychain; use keychain;
use mining::mine_block; use mining::mine_block;
use pool; use pool;
use util::{self, LOGGER}; use util;
// ---------------------------------------- // ----------------------------------------
// http://www.jsonrpc.org/specification // http://www.jsonrpc.org/specification
@ -114,7 +114,6 @@ fn accept_workers(
match stream { match stream {
Ok(stream) => { Ok(stream) => {
warn!( warn!(
LOGGER,
"(Server ID: {}) New connection: {}", "(Server ID: {}) New connection: {}",
id, id,
stream.peer_addr().unwrap() stream.peer_addr().unwrap()
@ -135,10 +134,7 @@ fn accept_workers(
worker_id = worker_id + 1; worker_id = worker_id + 1;
} }
Err(e) => { Err(e) => {
warn!( warn!("(Server ID: {}) Error accepting connection: {:?}", id, e);
LOGGER,
"(Server ID: {}) Error accepting connection: {:?}", id, e
);
} }
} }
} }
@ -185,8 +181,8 @@ impl Worker {
} }
Err(e) => { Err(e) => {
warn!( warn!(
LOGGER, "(Server ID: {}) Error in connection with stratum client: {}",
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e self.id, e
); );
self.error = true; self.error = true;
return None; return None;
@ -206,16 +202,16 @@ impl Worker {
Ok(_) => {} Ok(_) => {}
Err(e) => { Err(e) => {
warn!( warn!(
LOGGER, "(Server ID: {}) Error in connection with stratum client: {}",
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e self.id, e
); );
self.error = true; self.error = true;
} }
}, },
Err(e) => { Err(e) => {
warn!( warn!(
LOGGER, "(Server ID: {}) Error in connection with stratum client: {}",
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e self.id, e
); );
self.error = true; self.error = true;
return; return;
@ -296,7 +292,6 @@ impl StratumServer {
Err(e) => { Err(e) => {
// not a valid JSON RpcRequest - disconnect the worker // not a valid JSON RpcRequest - disconnect the worker
warn!( warn!(
LOGGER,
"(Server ID: {}) Failed to parse JSONRpc: {} - {:?}", "(Server ID: {}) Failed to parse JSONRpc: {} - {:?}",
self.id, self.id,
e.description(), e.description(),
@ -409,11 +404,8 @@ impl StratumServer {
let job_template = self.build_block_template(); let job_template = self.build_block_template();
let response = serde_json::to_value(&job_template).unwrap(); let response = serde_json::to_value(&job_template).unwrap();
debug!( debug!(
LOGGER,
"(Server ID: {}) sending block {} with id {} to single worker", "(Server ID: {}) sending block {} with id {} to single worker",
self.id, self.id, job_template.height, job_template.job_id,
job_template.height,
job_template.job_id,
); );
return Ok(response); return Ok(response);
} }
@ -452,8 +444,8 @@ impl StratumServer {
if params.height != self.current_block_versions.last().unwrap().header.height { if params.height != self.current_block_versions.last().unwrap().header.height {
// Return error status // Return error status
error!( error!(
LOGGER, "(Server ID: {}) Share at height {} submitted too late",
"(Server ID: {}) Share at height {} submitted too late", self.id, params.height, self.id, params.height,
); );
worker_stats.num_stale += 1; worker_stats.num_stale += 1;
let e = RpcError { let e = RpcError {
@ -467,11 +459,8 @@ impl StratumServer {
if b.is_none() { if b.is_none() {
// Return error status // Return error status
error!( error!(
LOGGER,
"(Server ID: {}) Failed to validate solution at height {}: invalid job_id {}", "(Server ID: {}) Failed to validate solution at height {}: invalid job_id {}",
self.id, self.id, params.height, params.job_id,
params.height,
params.job_id,
); );
worker_stats.num_rejected += 1; worker_stats.num_rejected += 1;
let e = RpcError { let e = RpcError {
@ -491,11 +480,8 @@ impl StratumServer {
if share_difficulty < self.minimum_share_difficulty { if share_difficulty < self.minimum_share_difficulty {
// Return error status // Return error status
error!( error!(
LOGGER,
"(Server ID: {}) Share rejected due to low difficulty: {}/{}", "(Server ID: {}) Share rejected due to low difficulty: {}/{}",
self.id, self.id, share_difficulty, self.minimum_share_difficulty,
share_difficulty,
self.minimum_share_difficulty,
); );
worker_stats.num_rejected += 1; worker_stats.num_rejected += 1;
let e = RpcError { let e = RpcError {
@ -511,7 +497,6 @@ impl StratumServer {
if let Err(e) = res { if let Err(e) = res {
// Return error status // Return error status
error!( error!(
LOGGER,
"(Server ID: {}) Failed to validate solution at height {}: {}: {}", "(Server ID: {}) Failed to validate solution at height {}: {}: {}",
self.id, self.id,
params.height, params.height,
@ -528,15 +513,14 @@ impl StratumServer {
share_is_block = true; share_is_block = true;
// Log message to make it obvious we found a block // Log message to make it obvious we found a block
warn!( warn!(
LOGGER, "(Server ID: {}) Solution Found for block {} - Yay!!!",
"(Server ID: {}) Solution Found for block {} - Yay!!!", self.id, params.height self.id, params.height
); );
} else { } else {
// Do some validation but dont submit // Do some validation but dont submit
if !pow::verify_size(&b.header, b.header.pow.proof.edge_bits).is_ok() { if !pow::verify_size(&b.header, b.header.pow.proof.edge_bits).is_ok() {
// Return error status // Return error status
error!( error!(
LOGGER,
"(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}", "(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}",
self.id, self.id,
params.height, params.height,
@ -557,7 +541,6 @@ impl StratumServer {
Some(login) => login.clone(), Some(login) => login.clone(),
}; };
info!( info!(
LOGGER,
"(Server ID: {}) Got share for block: hash {}, height {}, nonce {}, difficulty {}/{}, submitted by {}", "(Server ID: {}) Got share for block: hash {}, height {}, nonce {}, difficulty {}/{}, submitted by {}",
self.id, self.id,
b.hash(), b.hash(),
@ -588,11 +571,9 @@ impl StratumServer {
for num in start..workers_l.len() { for num in start..workers_l.len() {
if workers_l[num].error == true { if workers_l[num].error == true {
warn!( warn!(
LOGGER, "(Server ID: {}) Dropping worker: {}",
"(Server ID: {}) Dropping worker: {}", self.id, workers_l[num].id
self.id, );
workers_l[num].id;
);
// Update worker stats // Update worker stats
let mut stratum_stats = stratum_stats.write(); let mut stratum_stats = stratum_stats.write();
let worker_stats_id = stratum_stats let worker_stats_id = stratum_stats
@ -631,11 +612,8 @@ impl StratumServer {
}; };
let job_request_json = serde_json::to_string(&job_request).unwrap(); let job_request_json = serde_json::to_string(&job_request).unwrap();
debug!( debug!(
LOGGER,
"(Server ID: {}) sending block {} with id {} to stratum clients", "(Server ID: {}) sending block {} with id {} to stratum clients",
self.id, self.id, job_template.height, job_template.job_id,
job_template.height,
job_template.job_id,
); );
// Push the new block to all connected clients // Push the new block to all connected clients
// NOTE: We do not give a unique nonce (should we?) so miners need // NOTE: We do not give a unique nonce (should we?) so miners need
@ -659,11 +637,8 @@ impl StratumServer {
sync_state: Arc<SyncState>, sync_state: Arc<SyncState>,
) { ) {
info!( info!(
LOGGER,
"(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}", "(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}",
self.id, self.id, edge_bits, proof_size
edge_bits,
proof_size
); );
self.sync_state = sync_state; self.sync_state = sync_state;
@ -698,7 +673,6 @@ impl StratumServer {
} }
warn!( warn!(
LOGGER,
"Stratum server started on {}", "Stratum server started on {}",
self.config.stratum_server_addr.clone().unwrap() self.config.stratum_server_addr.clone().unwrap()
); );

View file

@ -31,7 +31,6 @@ use core::global;
use core::pow::PoWContext; use core::pow::PoWContext;
use mining::mine_block; use mining::mine_block;
use pool; use pool;
use util::LOGGER;
pub struct Miner { pub struct Miner {
config: StratumServerConfig, config: StratumServerConfig,
@ -85,7 +84,6 @@ impl Miner {
let deadline = Utc::now().timestamp() + attempt_time_per_block as i64; let deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
debug!( debug!(
LOGGER,
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].", "(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
self.debug_output_id, self.debug_output_id,
global::min_edge_bits(), global::min_edge_bits(),
@ -116,10 +114,8 @@ impl Miner {
} }
debug!( debug!(
LOGGER,
"(Server ID: {}) No solution found after {} iterations, continuing...", "(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id, self.debug_output_id, iter_count
iter_count
); );
false false
} }
@ -128,8 +124,8 @@ impl Miner {
/// chain anytime required and looking for PoW solution. /// chain anytime required and looking for PoW solution.
pub fn run_loop(&self, wallet_listener_url: Option<String>) { pub fn run_loop(&self, wallet_listener_url: Option<String>) {
info!( info!(
LOGGER, "(Server ID: {}) Starting test miner loop.",
"(Server ID: {}) Starting test miner loop.", self.debug_output_id self.debug_output_id
); );
// iteration, we keep the returned derivation to provide it back when // iteration, we keep the returned derivation to provide it back when
@ -137,7 +133,7 @@ impl Miner {
let mut key_id = None; let mut key_id = None;
while !self.stop.load(Ordering::Relaxed) { while !self.stop.load(Ordering::Relaxed) {
trace!(LOGGER, "in miner loop. key_id: {:?}", key_id); trace!("in miner loop. key_id: {:?}", key_id);
// get the latest chain state and build a block on top of it // get the latest chain state and build a block on top of it
let head = self.chain.head_header().unwrap(); let head = self.chain.head_header().unwrap();
@ -161,7 +157,6 @@ impl Miner {
// we found a solution, push our block through the chain processing pipeline // we found a solution, push our block through the chain processing pipeline
if sol { if sol {
info!( info!(
LOGGER,
"(Server ID: {}) Found valid proof of work, adding block {}.", "(Server ID: {}) Found valid proof of work, adding block {}.",
self.debug_output_id, self.debug_output_id,
b.hash() b.hash()
@ -169,26 +164,21 @@ impl Miner {
let res = self.chain.process_block(b, chain::Options::MINE); let res = self.chain.process_block(b, chain::Options::MINE);
if let Err(e) = res { if let Err(e) = res {
error!( error!(
LOGGER,
"(Server ID: {}) Error validating mined block: {:?}", "(Server ID: {}) Error validating mined block: {:?}",
self.debug_output_id, self.debug_output_id, e
e
); );
} }
trace!(LOGGER, "resetting key_id in miner to None"); trace!("resetting key_id in miner to None");
key_id = None; key_id = None;
} else { } else {
debug!( debug!(
LOGGER, "setting pubkey in miner to pubkey from block_fees - {:?}",
"setting pubkey in miner to pubkey from block_fees - {:?}", block_fees block_fees
); );
key_id = block_fees.key_id(); key_id = block_fees.key_id();
} }
} }
info!( info!("(Server ID: {}) test miner exit.", self.debug_output_id);
LOGGER,
"(Server ID: {}) test miner exit.", self.debug_output_id
);
} }
} }

View file

@ -25,8 +25,6 @@ use std::env;
use std::io::Error; use std::io::Error;
use std::thread; use std::thread;
use util::LOGGER;
/// Future returned from `MainService`. /// Future returned from `MainService`.
enum MainFuture { enum MainFuture {
Root, Root,
@ -94,10 +92,7 @@ pub fn start_webwallet_server() {
let server = Server::bind(&addr) let server = Server::bind(&addr)
.serve(|| future::ok::<_, Error>(MainService::new())) .serve(|| future::ok::<_, Error>(MainService::new()))
.map_err(|e| eprintln!("server error: {}", e)); .map_err(|e| eprintln!("server error: {}", e));
warn!( warn!("Grin Web-Wallet Application is running at http://{}/", addr);
LOGGER,
"Grin Web-Wallet Application is running at http://{}/", addr
);
rt::run(server); rt::run(server);
}); });
} }

View file

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_chain as chain; extern crate grin_chain as chain;
@ -33,12 +33,13 @@ use util::Mutex;
use core::global::{self, ChainTypes}; use core::global::{self, ChainTypes};
use framework::{LocalServerContainer, LocalServerContainerConfig}; use framework::{LocalServerContainer, LocalServerContainerConfig};
use util::{init_test_logger, LOGGER}; use util::init_test_logger;
#[test] #[test]
fn simple_server_wallet() { fn simple_server_wallet() {
init_test_logger(); init_test_logger();
info!(LOGGER, "starting simple_server_wallet"); info!("starting simple_server_wallet");
let test_name_dir = "test_servers";
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting); core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
// Run a separate coinbase wallet for coinbase transactions // Run a separate coinbase wallet for coinbase transactions
@ -82,11 +83,11 @@ fn simple_server_wallet() {
let base_addr = server_config.base_addr; let base_addr = server_config.base_addr;
let api_server_port = server_config.api_server_port; let api_server_port = server_config.api_server_port;
warn!(LOGGER, "Testing chain handler"); warn!("Testing chain handler");
let tip = get_tip(&base_addr, api_server_port); let tip = get_tip(&base_addr, api_server_port);
assert!(tip.is_ok()); assert!(tip.is_ok());
warn!(LOGGER, "Testing status handler"); warn!("Testing status handler");
let status = get_status(&base_addr, api_server_port); let status = get_status(&base_addr, api_server_port);
assert!(status.is_ok()); assert!(status.is_ok());
@ -97,7 +98,7 @@ fn simple_server_wallet() {
current_tip = get_tip(&base_addr, api_server_port).unwrap(); current_tip = get_tip(&base_addr, api_server_port).unwrap();
} }
warn!(LOGGER, "Testing block handler"); warn!("Testing block handler");
let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height); let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height);
assert!(last_block_by_height.is_ok()); assert!(last_block_by_height.is_ok());
let last_block_by_height_compact = let last_block_by_height_compact =
@ -111,7 +112,7 @@ fn simple_server_wallet() {
get_block_by_hash_compact(&base_addr, api_server_port, &block_hash); get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
assert!(last_block_by_hash_compact.is_ok()); assert!(last_block_by_hash_compact.is_ok());
warn!(LOGGER, "Testing chain output handler"); warn!("Testing chain output handler");
let start_height = 0; let start_height = 0;
let end_height = current_tip.height; let end_height = current_tip.height;
let outputs_by_height = let outputs_by_height =
@ -123,7 +124,7 @@ fn simple_server_wallet() {
let outputs_by_ids2 = get_outputs_by_ids2(&base_addr, api_server_port, ids.clone()); let outputs_by_ids2 = get_outputs_by_ids2(&base_addr, api_server_port, ids.clone());
assert!(outputs_by_ids2.is_ok()); assert!(outputs_by_ids2.is_ok());
warn!(LOGGER, "Testing txhashset handler"); warn!("Testing txhashset handler");
let roots = get_txhashset_roots(&base_addr, api_server_port); let roots = get_txhashset_roots(&base_addr, api_server_port);
assert!(roots.is_ok()); assert!(roots.is_ok());
let last_10_outputs = get_txhashset_lastoutputs(&base_addr, api_server_port, 0); let last_10_outputs = get_txhashset_lastoutputs(&base_addr, api_server_port, 0);
@ -147,7 +148,7 @@ fn simple_server_wallet() {
#[test] #[test]
fn test_p2p() { fn test_p2p() {
init_test_logger(); init_test_logger();
info!(LOGGER, "starting test_p2p"); info!("starting test_p2p");
global::set_mining_mode(ChainTypes::AutomatedTesting); global::set_mining_mode(ChainTypes::AutomatedTesting);
let test_name_dir = "test_servers"; let test_name_dir = "test_servers";
@ -188,7 +189,7 @@ fn test_p2p() {
thread::sleep(time::Duration::from_millis(2000)); thread::sleep(time::Duration::from_millis(2000));
// Starting tests // Starting tests
warn!(LOGGER, "Starting P2P Tests"); warn!("Starting P2P Tests");
let base_addr = server_config_one.base_addr; let base_addr = server_config_one.base_addr;
let api_server_port = server_config_one.api_server_port; let api_server_port = server_config_one.api_server_port;

View file

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_chain as chain; extern crate grin_chain as chain;
@ -31,8 +31,6 @@ use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use util::Mutex; use util::Mutex;
use util::LOGGER;
/// Start 1 node mining, 1 non mining node and two wallets. /// Start 1 node mining, 1 non mining node and two wallets.
/// Then send a transaction from one wallet to another and propagate it a stem /// Then send a transaction from one wallet to another and propagate it a stem
/// transaction but without stem relay and check if the transaction is still /// transaction but without stem relay and check if the transaction is still
@ -136,7 +134,7 @@ fn test_dandelion_timeout() {
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed); LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
} }
warn!(LOGGER, "Sending 50 Grins to recipient wallet"); warn!("Sending 50 Grins to recipient wallet");
// Sending stem transaction // Sending stem transaction
LocalServerContainer::send_amount_to( LocalServerContainer::send_amount_to(

View file

@ -21,7 +21,7 @@ extern crate grin_servers as servers;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
mod framework; mod framework;
@ -34,7 +34,6 @@ use util::Mutex;
use core::core::hash::Hashed; use core::core::hash::Hashed;
use core::global::{self, ChainTypes}; use core::global::{self, ChainTypes};
use util::LOGGER;
use wallet::controller; use wallet::controller;
use wallet::libtx::slate::Slate; use wallet::libtx::slate::Slate;
use wallet::libwallet::types::{WalletBackend, WalletInst}; use wallet::libwallet::types::{WalletBackend, WalletInst};
@ -243,7 +242,7 @@ fn simulate_block_propagation() {
thread::sleep(time::Duration::from_millis(1_000)); thread::sleep(time::Duration::from_millis(1_000));
time_spent += 1; time_spent += 1;
if time_spent >= 30 { if time_spent >= 30 {
info!(LOGGER, "simulate_block_propagation - fail on timeout",); info!("simulate_block_propagation - fail on timeout",);
break; break;
} }
@ -285,7 +284,6 @@ fn simulate_full_sync() {
// Get the current header from s1. // Get the current header from s1.
let s1_header = s1.chain.head_header().unwrap(); let s1_header = s1.chain.head_header().unwrap();
info!( info!(
LOGGER,
"simulate_full_sync - s1 header head: {} at {}", "simulate_full_sync - s1 header head: {} at {}",
s1_header.hash(), s1_header.hash(),
s1_header.height s1_header.height
@ -298,7 +296,6 @@ fn simulate_full_sync() {
time_spent += 1; time_spent += 1;
if time_spent >= 30 { if time_spent >= 30 {
info!( info!(
LOGGER,
"sync fail. s2.head().height: {}, s1_header.height: {}", "sync fail. s2.head().height: {}, s1_header.height: {}",
s2.head().height, s2.head().height,
s1_header.height s1_header.height
@ -356,7 +353,6 @@ fn simulate_fast_sync() {
total_wait += 1; total_wait += 1;
if total_wait >= 30 { if total_wait >= 30 {
error!( error!(
LOGGER,
"simulate_fast_sync test fail on timeout! s2 height: {}, s1 height: {}", "simulate_fast_sync test fail on timeout! s2 height: {}, s1 height: {}",
s2.head().height, s2.head().height,
s1_header.height, s1_header.height,

View file

@ -23,7 +23,7 @@ extern crate grin_wallet as wallet;
extern crate bufstream; extern crate bufstream;
extern crate serde_json; extern crate serde_json;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
mod framework; mod framework;
@ -38,7 +38,6 @@ use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use core::global::{self, ChainTypes}; use core::global::{self, ChainTypes};
use util::LOGGER;
use framework::{config, stratum_config}; use framework::{config, stratum_config};
@ -77,7 +76,7 @@ fn basic_stratum_server() {
} }
// As this stream falls out of scope it will be disconnected // As this stream falls out of scope it will be disconnected
} }
info!(LOGGER, "stratum server connected"); info!("stratum server connected");
// Create a few new worker connections // Create a few new worker connections
let mut workers = vec![]; let mut workers = vec![];
@ -89,7 +88,7 @@ fn basic_stratum_server() {
workers.push(stream); workers.push(stream);
} }
assert!(workers.len() == 5); assert!(workers.len() == 5);
info!(LOGGER, "workers length verification ok"); info!("workers length verification ok");
// Simulate a worker lost connection // Simulate a worker lost connection
workers.remove(4); workers.remove(4);
@ -118,7 +117,7 @@ fn basic_stratum_server() {
assert!(false); assert!(false);
} }
} }
info!(LOGGER, "a few stratum JSONRpc commands verification ok"); info!("a few stratum JSONRpc commands verification ok");
// keepalive - expected "ok" result // keepalive - expected "ok" result
let mut response = String::new(); let mut response = String::new();
@ -129,7 +128,7 @@ fn basic_stratum_server() {
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
let _st = workers[2].read_line(&mut response); let _st = workers[2].read_line(&mut response);
assert_eq!(response.as_str(), ok_resp); assert_eq!(response.as_str(), ok_resp);
info!(LOGGER, "keepalive test ok"); info!("keepalive test ok");
// "doesnotexist" - error expected // "doesnotexist" - error expected
let mut response = String::new(); let mut response = String::new();
@ -140,7 +139,7 @@ fn basic_stratum_server() {
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
let _st = workers[3].read_line(&mut response); let _st = workers[3].read_line(&mut response);
assert_eq!(response.as_str(), ok_resp); assert_eq!(response.as_str(), ok_resp);
info!(LOGGER, "worker doesnotexist test ok"); info!("worker doesnotexist test ok");
// Verify stratum server and worker stats // Verify stratum server and worker stats
let stats = s.get_server_stats().unwrap(); let stats = s.get_server_stats().unwrap();
@ -148,18 +147,18 @@ fn basic_stratum_server() {
assert_eq!(stats.stratum_stats.num_workers, 4); // 5 - 1 = 4 assert_eq!(stats.stratum_stats.num_workers, 4); // 5 - 1 = 4
assert_eq!(stats.stratum_stats.worker_stats[5].is_connected, false); // worker was removed assert_eq!(stats.stratum_stats.worker_stats[5].is_connected, false); // worker was removed
assert_eq!(stats.stratum_stats.worker_stats[1].is_connected, true); assert_eq!(stats.stratum_stats.worker_stats[1].is_connected, true);
info!(LOGGER, "stratum server and worker stats verification ok"); info!("stratum server and worker stats verification ok");
// Start mining blocks // Start mining blocks
let stop = Arc::new(AtomicBool::new(false)); let stop = Arc::new(AtomicBool::new(false));
s.start_test_miner(None, stop.clone()); s.start_test_miner(None, stop.clone());
info!(LOGGER, "test miner started"); info!("test miner started");
// This test is supposed to complete in 3 seconds, // This test is supposed to complete in 3 seconds,
// so let's set a timeout on 10s to avoid infinite waiting happened in Travis-CI. // so let's set a timeout on 10s to avoid infinite waiting happened in Travis-CI.
let _handler = thread::spawn(|| { let _handler = thread::spawn(|| {
thread::sleep(time::Duration::from_secs(10)); thread::sleep(time::Duration::from_secs(10));
error!(LOGGER, "basic_stratum_server test fail on timeout!"); error!("basic_stratum_server test fail on timeout!");
thread::sleep(time::Duration::from_millis(100)); thread::sleep(time::Duration::from_millis(100));
process::exit(1); process::exit(1);
}); });
@ -177,12 +176,12 @@ fn basic_stratum_server() {
let _st = workers[2].read_line(&mut jobtemplate); let _st = workers[2].read_line(&mut jobtemplate);
let job_template: Value = serde_json::from_str(&jobtemplate).unwrap(); let job_template: Value = serde_json::from_str(&jobtemplate).unwrap();
assert_eq!(job_template["method"], expected); assert_eq!(job_template["method"], expected);
info!(LOGGER, "blocks broadcasting to workers test ok"); info!("blocks broadcasting to workers test ok");
// Verify stratum server and worker stats // Verify stratum server and worker stats
let stats = s.get_server_stats().unwrap(); let stats = s.get_server_stats().unwrap();
assert_eq!(stats.stratum_stats.num_workers, 3); // 5 - 2 = 3 assert_eq!(stats.stratum_stats.num_workers, 3); // 5 - 2 = 3
assert_eq!(stats.stratum_stats.worker_stats[2].is_connected, false); // worker was removed assert_eq!(stats.stratum_stats.worker_stats[2].is_connected, false); // worker was removed
assert_ne!(stats.stratum_stats.block_height, 1); assert_ne!(stats.stratum_stats.block_height, 1);
info!(LOGGER, "basic_stratum_server test done and ok."); info!("basic_stratum_server test done and ok.");
} }

View file

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_chain as chain; extern crate grin_chain as chain;
@ -31,8 +31,6 @@ use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
use util::Mutex; use util::Mutex;
use util::LOGGER;
/// Start 1 node mining and two wallets, then send a few /// Start 1 node mining and two wallets, then send a few
/// transactions from one to the other /// transactions from one to the other
#[ignore] #[ignore]
@ -105,7 +103,7 @@ fn basic_wallet_transactions() {
coinbase_info = coinbase_info =
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed); LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
} }
warn!(LOGGER, "Sending 50 Grins to recipient wallet"); warn!("Sending 50 Grins to recipient wallet");
LocalServerContainer::send_amount_to( LocalServerContainer::send_amount_to(
&coinbase_wallet_config, &coinbase_wallet_config,
"50.00", "50.00",
@ -125,10 +123,7 @@ fn basic_wallet_transactions() {
println!("Recipient wallet info: {:?}", recipient_info); println!("Recipient wallet info: {:?}", recipient_info);
assert!(recipient_info.amount_currently_spendable == 50000000000); assert!(recipient_info.amount_currently_spendable == 50000000000);
warn!( warn!("Sending many small transactions to recipient wallet");
LOGGER,
"Sending many small transactions to recipient wallet"
);
for _i in 0..10 { for _i in 0..10 {
LocalServerContainer::send_amount_to( LocalServerContainer::send_amount_to(
&coinbase_wallet_config, &coinbase_wallet_config,

View file

@ -29,7 +29,6 @@ use core::global;
use p2p::Seeding; use p2p::Seeding;
use servers; use servers;
use tui::ui; use tui::ui;
use util::LOGGER;
/// wrap below to allow UI to clean up on stop /// wrap below to allow UI to clean up on stop
fn start_server(config: servers::ServerConfig) { fn start_server(config: servers::ServerConfig) {
@ -37,9 +36,9 @@ fn start_server(config: servers::ServerConfig) {
// Just kill process for now, otherwise the process // Just kill process for now, otherwise the process
// hangs around until sigint because the API server // hangs around until sigint because the API server
// currently has no shutdown facility // currently has no shutdown facility
warn!(LOGGER, "Shutting down..."); warn!("Shutting down...");
thread::sleep(Duration::from_millis(1000)); thread::sleep(Duration::from_millis(1000));
warn!(LOGGER, "Shutdown complete."); warn!("Shutdown complete.");
exit(0); exit(0);
} }
@ -47,7 +46,7 @@ fn start_server_tui(config: servers::ServerConfig) {
// Run the UI controller.. here for now for simplicity to access // Run the UI controller.. here for now for simplicity to access
// everything it might need // everything it might need
if config.run_tui.is_some() && config.run_tui.unwrap() { if config.run_tui.is_some() && config.run_tui.unwrap() {
warn!(LOGGER, "Starting GRIN in UI mode..."); warn!("Starting GRIN in UI mode...");
servers::Server::start(config, |serv: Arc<servers::Server>| { servers::Server::start(config, |serv: Arc<servers::Server>| {
let running = Arc::new(AtomicBool::new(true)); let running = Arc::new(AtomicBool::new(true));
let _ = thread::Builder::new() let _ = thread::Builder::new()
@ -60,7 +59,7 @@ fn start_server_tui(config: servers::ServerConfig) {
}); });
}).unwrap(); }).unwrap();
} else { } else {
warn!(LOGGER, "Starting GRIN w/o UI..."); warn!("Starting GRIN w/o UI...");
servers::Server::start(config, |serv: Arc<servers::Server>| { servers::Server::start(config, |serv: Arc<servers::Server>| {
let running = Arc::new(AtomicBool::new(true)); let running = Arc::new(AtomicBool::new(true));
let r = running.clone(); let r = running.clone();
@ -70,7 +69,7 @@ fn start_server_tui(config: servers::ServerConfig) {
while running.load(Ordering::SeqCst) { while running.load(Ordering::SeqCst) {
thread::sleep(Duration::from_secs(1)); thread::sleep(Duration::from_secs(1));
} }
warn!(LOGGER, "Received SIGINT (Ctrl+C) or SIGTERM (kill)."); warn!("Received SIGINT (Ctrl+C) or SIGTERM (kill).");
serv.stop(); serv.stop();
}).unwrap(); }).unwrap();
} }
@ -170,8 +169,8 @@ pub fn server_command(server_args: Option<&ArgMatches>, mut global_config: Globa
} }
}); });
match daemonize.start() { match daemonize.start() {
Ok(_) => info!(LOGGER, "Grin server successfully started."), Ok(_) => info!("Grin server successfully started."),
Err(e) => error!(LOGGER, "Error starting: {}", e), Err(e) => error!("Error starting: {}", e),
} }
} }
("stop", _) => println!("TODO. Just 'kill $pid' for now. Maybe /tmp/grin.pid is $pid"), ("stop", _) => println!("TODO. Just 'kill $pid' for now. Maybe /tmp/grin.pid is $pid"),

View file

@ -35,7 +35,6 @@ use grin_wallet::{
use keychain; use keychain;
use servers::start_webwallet_server; use servers::start_webwallet_server;
use util::file::get_first_line; use util::file::get_first_line;
use util::LOGGER;
pub fn _init_wallet_seed(wallet_config: WalletConfig) { pub fn _init_wallet_seed(wallet_config: WalletConfig) {
if let Err(_) = WalletSeed::from_file(&wallet_config) { if let Err(_) = WalletSeed::from_file(&wallet_config) {
@ -73,7 +72,7 @@ pub fn instantiate_wallet(
println!("Error starting wallet: {}", e); println!("Error starting wallet: {}", e);
process::exit(0); process::exit(0);
}); });
info!(LOGGER, "Using LMDB Backend for wallet"); info!("Using LMDB Backend for wallet");
Box::new(db_wallet) Box::new(db_wallet)
} }
@ -107,7 +106,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
// Generate the initial wallet seed if we are running "wallet init". // Generate the initial wallet seed if we are running "wallet init".
if let ("init", Some(_)) = wallet_args.subcommand() { if let ("init", Some(_)) = wallet_args.subcommand() {
WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file."); WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file.");
info!(LOGGER, "Wallet seed file created"); info!("Wallet seed file created");
let client = let client =
HTTPWalletClient::new(&wallet_config.check_node_api_http_addr, node_api_secret); HTTPWalletClient::new(&wallet_config.check_node_api_http_addr, node_api_secret);
let _: LMDBBackend<HTTPWalletClient, keychain::ExtKeychain> = let _: LMDBBackend<HTTPWalletClient, keychain::ExtKeychain> =
@ -117,7 +116,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
e, wallet_config e, wallet_config
); );
}); });
info!(LOGGER, "Wallet database backend created"); info!("Wallet database backend created");
// give logging thread a moment to catch up // give logging thread a moment to catch up
thread::sleep(Duration::from_millis(200)); thread::sleep(Duration::from_millis(200));
// we are done here with creating the wallet, so just return // we are done here with creating the wallet, so just return
@ -268,7 +267,6 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let slate = match result { let slate = match result {
Ok(s) => { Ok(s) => {
info!( info!(
LOGGER,
"Tx created: {} grin to {} (strategy '{}')", "Tx created: {} grin to {} (strategy '{}')",
core::amount_to_hr_string(amount, false), core::amount_to_hr_string(amount, false),
dest, dest,
@ -277,7 +275,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
s s
} }
Err(e) => { Err(e) => {
error!(LOGGER, "Tx not created: {:?}", e); error!("Tx not created: {:?}", e);
match e.kind() { match e.kind() {
// user errors, don't backtrace // user errors, don't backtrace
libwallet::ErrorKind::NotEnoughFunds { .. } => {} libwallet::ErrorKind::NotEnoughFunds { .. } => {}
@ -285,7 +283,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
libwallet::ErrorKind::FeeExceedsAmount { .. } => {} libwallet::ErrorKind::FeeExceedsAmount { .. } => {}
_ => { _ => {
// otherwise give full dump // otherwise give full dump
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap()); error!("Backtrace: {}", e.backtrace().unwrap());
} }
}; };
panic!(); panic!();
@ -294,18 +292,18 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_tx(&slate, fluff); let result = api.post_tx(&slate, fluff);
match result { match result {
Ok(_) => { Ok(_) => {
info!(LOGGER, "Tx sent",); info!("Tx sent",);
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
error!(LOGGER, "Tx not sent: {:?}", e); error!("Tx not sent: {:?}", e);
Err(e) Err(e)
} }
} }
} else { } else {
error!( error!(
LOGGER, "HTTP Destination should start with http://: or https://: {}",
"HTTP Destination should start with http://: or https://: {}", dest dest
); );
panic!(); panic!();
} }
@ -321,7 +319,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
).expect("Send failed"); ).expect("Send failed");
Ok(()) Ok(())
} else { } else {
error!(LOGGER, "unsupported payment method: {}", method); error!("unsupported payment method: {}", method);
panic!(); panic!();
} }
} }
@ -354,11 +352,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_tx(&slate, fluff); let result = api.post_tx(&slate, fluff);
match result { match result {
Ok(_) => { Ok(_) => {
info!(LOGGER, "Tx sent"); info!("Tx sent");
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
error!(LOGGER, "Tx not sent: {:?}", e); error!("Tx not sent: {:?}", e);
Err(e) Err(e)
} }
} }
@ -439,7 +437,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
("repost", Some(repost_args)) => { ("repost", Some(repost_args)) => {
let tx_id: u32 = match repost_args.value_of("id") { let tx_id: u32 = match repost_args.value_of("id") {
None => { None => {
error!(LOGGER, "Transaction of a completed but unconfirmed transaction required (specify with --id=[id])"); error!("Transaction of a completed but unconfirmed transaction required (specify with --id=[id])");
panic!(); panic!();
} }
Some(tx) => match tx.parse() { Some(tx) => match tx.parse() {
@ -456,11 +454,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_stored_tx(tx_id, fluff); let result = api.post_stored_tx(tx_id, fluff);
match result { match result {
Ok(_) => { Ok(_) => {
info!(LOGGER, "Reposted transaction at {}", tx_id); info!("Reposted transaction at {}", tx_id);
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
error!(LOGGER, "Transaction reposting failed: {}", e); error!("Transaction reposting failed: {}", e);
Err(e) Err(e)
} }
} }
@ -469,11 +467,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.dump_stored_tx(tx_id, true, f); let result = api.dump_stored_tx(tx_id, true, f);
match result { match result {
Ok(_) => { Ok(_) => {
warn!(LOGGER, "Dumped transaction data for tx {} to {}", tx_id, f); warn!("Dumped transaction data for tx {} to {}", tx_id, f);
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
error!(LOGGER, "Transaction reposting failed: {}", e); error!("Transaction reposting failed: {}", e);
Err(e) Err(e)
} }
} }
@ -488,11 +486,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.cancel_tx(tx_id); let result = api.cancel_tx(tx_id);
match result { match result {
Ok(_) => { Ok(_) => {
info!(LOGGER, "Transaction {} Cancelled", tx_id); info!("Transaction {} Cancelled", tx_id);
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
error!(LOGGER, "TX Cancellation failed: {}", e); error!("TX Cancellation failed: {}", e);
Err(e) Err(e)
} }
} }
@ -501,12 +499,12 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.restore(); let result = api.restore();
match result { match result {
Ok(_) => { Ok(_) => {
info!(LOGGER, "Wallet restore complete",); info!("Wallet restore complete",);
Ok(()) Ok(())
} }
Err(e) => { Err(e) => {
error!(LOGGER, "Wallet restore failed: {:?}", e); error!("Wallet restore failed: {:?}", e);
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap()); error!("Backtrace: {}", e.backtrace().unwrap());
Err(e) Err(e)
} }
} }

View file

@ -24,7 +24,7 @@ extern crate daemonize;
extern crate serde; extern crate serde;
extern crate serde_json; extern crate serde_json;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate term; extern crate term;
extern crate grin_api as api; extern crate grin_api as api;
@ -45,7 +45,7 @@ use clap::{App, Arg, SubCommand};
use config::config::{SERVER_CONFIG_FILE_NAME, WALLET_CONFIG_FILE_NAME}; use config::config::{SERVER_CONFIG_FILE_NAME, WALLET_CONFIG_FILE_NAME};
use core::global; use core::global;
use util::{init_logger, LOGGER}; use util::init_logger;
// include build information // include build information
pub mod built_info { pub mod built_info {
@ -73,9 +73,9 @@ pub fn info_strings() -> (String, String, String) {
fn log_build_info() { fn log_build_info() {
let (basic_info, detailed_info, deps) = info_strings(); let (basic_info, detailed_info, deps) = info_strings();
info!(LOGGER, "{}", basic_info); info!("{}", basic_info);
debug!(LOGGER, "{}", detailed_info); debug!("{}", detailed_info);
trace!(LOGGER, "{}", deps); trace!("{}", deps);
} }
fn main() { fn main() {
@ -378,7 +378,6 @@ fn main() {
l.tui_running = Some(false); l.tui_running = Some(false);
init_logger(Some(l)); init_logger(Some(l));
warn!( warn!(
LOGGER,
"Using wallet configuration file at {}", "Using wallet configuration file at {}",
w.config_file_path.as_ref().unwrap().to_str().unwrap() w.config_file_path.as_ref().unwrap().to_str().unwrap()
); );
@ -399,12 +398,11 @@ fn main() {
global::set_mining_mode(s.members.as_mut().unwrap().server.clone().chain_type); global::set_mining_mode(s.members.as_mut().unwrap().server.clone().chain_type);
if let Some(file_path) = &s.config_file_path { if let Some(file_path) = &s.config_file_path {
info!( info!(
LOGGER,
"Using configuration file at {}", "Using configuration file at {}",
file_path.to_str().unwrap() file_path.to_str().unwrap()
); );
} else { } else {
info!(LOGGER, "Node configuration file not found, using default"); info!("Node configuration file not found, using default");
} }
node_config = Some(s); node_config = Some(s);
} }

View file

@ -37,7 +37,6 @@ use servers::Server;
use tui::constants::ROOT_STACK; use tui::constants::ROOT_STACK;
use tui::types::{TUIStatusListener, UIMessage}; use tui::types::{TUIStatusListener, UIMessage};
use tui::{menu, mining, peers, status, version}; use tui::{menu, mining, peers, status, version};
use util::LOGGER;
use built_info; use built_info;
@ -172,7 +171,7 @@ impl Controller {
let mut next_stat_update = Utc::now().timestamp() + stat_update_interval; let mut next_stat_update = Utc::now().timestamp() + stat_update_interval;
while self.ui.step() { while self.ui.step() {
if !running.load(Ordering::SeqCst) { if !running.load(Ordering::SeqCst) {
warn!(LOGGER, "Received SIGINT (Ctrl+C)."); warn!("Received SIGINT (Ctrl+C).");
server.stop(); server.stop();
self.ui.stop(); self.ui.stop();
} }

View file

@ -16,7 +16,7 @@ lmdb-zero = "0.4.4"
memmap = "0.6.2" memmap = "0.6.2"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
grin_core = { path = "../core" } grin_core = { path = "../core" }
grin_util = { path = "../util" } grin_util = { path = "../util" }

View file

@ -25,7 +25,6 @@ use core::core::hash::Hashed;
use core::core::pmmr; use core::core::pmmr;
use core::core::BlockHeader; use core::core::BlockHeader;
use prune_list::PruneList; use prune_list::PruneList;
use util::LOGGER;
/// Compact (roaring) bitmap representing the set of positions of /// Compact (roaring) bitmap representing the set of positions of
/// leaves that are currently unpruned in the MMR. /// leaves that are currently unpruned in the MMR.
@ -64,7 +63,7 @@ impl LeafSet {
let cp_file_path = Path::new(&cp_path); let cp_file_path = Path::new(&cp_path);
if !cp_file_path.exists() { if !cp_file_path.exists() {
debug!(LOGGER, "leaf_set: rewound leaf file not found: {}", cp_path); debug!("leaf_set: rewound leaf file not found: {}", cp_path);
return Ok(()); return Ok(());
} }
@ -73,10 +72,7 @@ impl LeafSet {
bitmap_file.read_to_end(&mut buffer)?; bitmap_file.read_to_end(&mut buffer)?;
let bitmap = Bitmap::deserialize(&buffer); let bitmap = Bitmap::deserialize(&buffer);
debug!( debug!("leaf_set: copying rewound file {} to {}", cp_path, path);
LOGGER,
"leaf_set: copying rewound file {} to {}", cp_path, path
);
let mut leaf_set = LeafSet { let mut leaf_set = LeafSet {
path: path.clone(), path: path.clone(),

View file

@ -28,7 +28,7 @@ extern crate lmdb_zero;
extern crate memmap; extern crate memmap;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate failure; extern crate failure;
#[macro_use] #[macro_use]
extern crate failure_derive; extern crate failure_derive;

View file

@ -24,7 +24,6 @@ use core::ser::{self, PMMRable};
use leaf_set::LeafSet; use leaf_set::LeafSet;
use prune_list::PruneList; use prune_list::PruneList;
use types::{prune_noop, AppendOnlyFile, HashFile}; use types::{prune_noop, AppendOnlyFile, HashFile};
use util::LOGGER;
const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin"; const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin";
const PMMR_DATA_FILE: &'static str = "pmmr_data.bin"; const PMMR_DATA_FILE: &'static str = "pmmr_data.bin";
@ -103,8 +102,8 @@ where
Ok(h) => Some(h), Ok(h) => Some(h),
Err(e) => { Err(e) => {
error!( error!(
LOGGER, "Corrupted storage, could not read an entry from hash store: {:?}",
"Corrupted storage, could not read an entry from hash store: {:?}", e e
); );
return None; return None;
} }
@ -126,8 +125,8 @@ where
Ok(h) => Some(h), Ok(h) => Some(h),
Err(e) => { Err(e) => {
error!( error!(
LOGGER, "Corrupted storage, could not read an entry from data store: {:?}",
"Corrupted storage, could not read an entry from data store: {:?}", e e
); );
return None; return None;
} }
@ -200,7 +199,6 @@ where
fn dump_stats(&self) { fn dump_stats(&self) {
debug!( debug!(
LOGGER,
"pmmr backend: unpruned: {}, hashes: {}, data: {}, leaf_set: {}, prune_list: {}", "pmmr backend: unpruned: {}, hashes: {}, data: {}, leaf_set: {}, prune_list: {}",
self.unpruned_size().unwrap_or(0), self.unpruned_size().unwrap_or(0),
self.hash_size().unwrap_or(0), self.hash_size().unwrap_or(0),

View file

@ -29,8 +29,6 @@ use croaring::Bitmap;
use core::core::pmmr::{bintree_postorder_height, family, path}; use core::core::pmmr::{bintree_postorder_height, family, path};
use util::LOGGER;
/// Maintains a list of previously pruned nodes in PMMR, compacting the list as /// Maintains a list of previously pruned nodes in PMMR, compacting the list as
/// parents get pruned and allowing checking whether a leaf is pruned. Given /// parents get pruned and allowing checking whether a leaf is pruned. Given
/// a node's position, computes how much it should get shifted given the /// a node's position, computes how much it should get shifted given the
@ -91,7 +89,7 @@ impl PruneList {
prune_list.init_caches(); prune_list.init_caches();
if !prune_list.bitmap.is_empty() { if !prune_list.bitmap.is_empty() {
debug!(LOGGER, "prune_list: bitmap {} pos ({} bytes), pruned_cache {} pos ({} bytes), shift_cache {}, leaf_shift_cache {}", debug!("prune_list: bitmap {} pos ({} bytes), pruned_cache {} pos ({} bytes), shift_cache {}, leaf_shift_cache {}",
prune_list.bitmap.cardinality(), prune_list.bitmap.cardinality(),
prune_list.bitmap.get_serialized_size_in_bytes(), prune_list.bitmap.get_serialized_size_in_bytes(),
prune_list.pruned_cache.cardinality(), prune_list.pruned_cache.cardinality(),

View file

@ -27,7 +27,6 @@ use libc::{ftruncate64, off64_t};
use core::core::hash::Hash; use core::core::hash::Hash;
use core::ser; use core::ser;
use util::LOGGER;
/// A no-op function for doing nothing with some pruned data. /// A no-op function for doing nothing with some pruned data.
pub fn prune_noop(_pruned_data: &[u8]) {} pub fn prune_noop(_pruned_data: &[u8]) {}
@ -65,8 +64,8 @@ impl HashFile {
Ok(h) => Some(h), Ok(h) => Some(h),
Err(e) => { Err(e) => {
error!( error!(
LOGGER, "Corrupted storage, could not read an entry from hash file: {:?}",
"Corrupted storage, could not read an entry from hash file: {:?}", e e
); );
return None; return None;
} }

View file

@ -13,9 +13,8 @@ lazy_static = "1"
rand = "0.5" rand = "0.5"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log4rs = { version = "0.8.1", features = ["rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] }
slog-term = "~2.4" log = "0.4"
slog-async = "~2.3"
walkdir = "2" walkdir = "2"
zip = "0.4" zip = "0.4"
parking_lot = {version = "0.6"} parking_lot = {version = "0.6"}

View file

@ -26,10 +26,8 @@ extern crate base64;
extern crate byteorder; extern crate byteorder;
extern crate rand; extern crate rand;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate slog_async; extern crate log4rs;
extern crate slog_term;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
@ -48,7 +46,7 @@ pub extern crate secp256k1zkp as secp;
// Logging related // Logging related
pub mod logger; pub mod logger;
pub use logger::{init_logger, init_test_logger, LOGGER}; pub use logger::{init_logger, init_test_logger};
// Static secp instance // Static secp instance
pub mod secp_static; pub mod secp_static;

View file

@ -12,10 +12,6 @@
// limitations under the License. // limitations under the License.
//! Logging wrapper to be used throughout all crates in the workspace //! Logging wrapper to be used throughout all crates in the workspace
use slog::{Discard, Drain, Duplicate, Level, LevelFilter, Logger};
use slog_async;
use slog_term;
use std::fs::OpenOptions;
use std::ops::Deref; use std::ops::Deref;
use Mutex; use Mutex;
@ -24,14 +20,27 @@ use std::{panic, thread};
use types::{LogLevel, LoggingConfig}; use types::{LogLevel, LoggingConfig};
fn convert_log_level(in_level: &LogLevel) -> Level { use log::{LevelFilter, Record};
use log4rs;
use log4rs::append::console::ConsoleAppender;
use log4rs::append::file::FileAppender;
use log4rs::append::rolling_file::{
policy::compound::roll::fixed_window::FixedWindowRoller,
policy::compound::trigger::size::SizeTrigger, policy::compound::CompoundPolicy,
RollingFileAppender,
};
use log4rs::append::Append;
use log4rs::config::{Appender, Config, Root};
use log4rs::encode::pattern::PatternEncoder;
use log4rs::filter::{threshold::ThresholdFilter, Filter, Response};
fn convert_log_level(in_level: &LogLevel) -> LevelFilter {
match *in_level { match *in_level {
LogLevel::Info => Level::Info, LogLevel::Info => LevelFilter::Info,
LogLevel::Critical => Level::Critical, LogLevel::Warning => LevelFilter::Warn,
LogLevel::Warning => Level::Warning, LogLevel::Debug => LevelFilter::Debug,
LogLevel::Debug => Level::Debug, LogLevel::Trace => LevelFilter::Trace,
LogLevel::Trace => Level::Trace, LogLevel::Error => LevelFilter::Error,
LogLevel::Error => Level::Error,
} }
} }
@ -43,60 +52,115 @@ lazy_static! {
static ref TUI_RUNNING: Mutex<bool> = Mutex::new(false); static ref TUI_RUNNING: Mutex<bool> = Mutex::new(false);
/// Static Logging configuration, should only be set once, before first logging call /// Static Logging configuration, should only be set once, before first logging call
static ref LOGGING_CONFIG: Mutex<LoggingConfig> = Mutex::new(LoggingConfig::default()); static ref LOGGING_CONFIG: Mutex<LoggingConfig> = Mutex::new(LoggingConfig::default());
}
/// And a static reference to the logger itself, accessible from all crates /// This filter is rejecting messages that doesn't start with "grin"
pub static ref LOGGER: Logger = { /// in order to save log space for only Grin-related records
let was_init = WAS_INIT.lock().clone(); #[derive(Debug)]
let config = LOGGING_CONFIG.lock(); struct GrinFilter;
let slog_level_stdout = convert_log_level(&config.stdout_log_level);
let slog_level_file = convert_log_level(&config.file_log_level); impl Filter for GrinFilter {
if config.tui_running.is_some() && config.tui_running.unwrap() { fn filter(&self, record: &Record) -> Response {
let mut tui_running_ref = TUI_RUNNING.lock(); if let Some(module_path) = record.module_path() {
*tui_running_ref = true; if module_path.starts_with("grin") {
return Response::Neutral;
}
} }
//Terminal output drain Response::Reject
let terminal_decorator = slog_term::TermDecorator::new().build(); }
let terminal_drain = slog_term::FullFormat::new(terminal_decorator).build().fuse();
let terminal_drain = LevelFilter::new(terminal_drain, slog_level_stdout).fuse();
let mut terminal_drain = slog_async::Async::new(terminal_drain).build().fuse();
if !config.log_to_stdout || !was_init {
terminal_drain = slog_async::Async::new(Discard{}).build().fuse();
}
if config.log_to_file && was_init {
//File drain
let file = OpenOptions::new()
.create(true)
.write(true)
.append(config.log_file_append)
.truncate(false)
.open(&config.log_file_path)
.unwrap();
let file_decorator = slog_term::PlainDecorator::new(file);
let file_drain = slog_term::FullFormat::new(file_decorator).build().fuse();
let file_drain = LevelFilter::new(file_drain, slog_level_file).fuse();
let file_drain_final = slog_async::Async::new(file_drain).build().fuse();
let composite_drain = Duplicate::new(terminal_drain, file_drain_final).fuse();
Logger::root(composite_drain, o!())
} else {
Logger::root(terminal_drain, o!())
}
};
} }
/// Initialize the logger with the given configuration /// Initialize the logger with the given configuration
pub fn init_logger(config: Option<LoggingConfig>) { pub fn init_logger(config: Option<LoggingConfig>) {
if let Some(c) = config { if let Some(c) = config {
let mut config_ref = LOGGING_CONFIG.lock(); let level_stdout = convert_log_level(&c.stdout_log_level);
*config_ref = c.clone(); let level_file = convert_log_level(&c.file_log_level);
let level_minimum;
// Determine minimum logging level for Root logger
if level_stdout > level_file {
level_minimum = level_stdout;
} else {
level_minimum = level_file;
}
// Start logger
let stdout = ConsoleAppender::builder()
.encoder(Box::new(PatternEncoder::default()))
.build();
let mut root = Root::builder();
let mut appenders = vec![];
if c.log_to_stdout {
let filter = Box::new(ThresholdFilter::new(level_stdout));
appenders.push(
Appender::builder()
.filter(filter)
.filter(Box::new(GrinFilter))
.build("stdout", Box::new(stdout)),
);
root = root.appender("stdout");
}
if c.log_to_file {
// If maximum log size is specified, use rolling file appender
// or use basic one otherwise
let filter = Box::new(ThresholdFilter::new(level_file));
let file: Box<Append> = {
if let Some(size) = c.log_max_size {
let roller = FixedWindowRoller::builder()
.build(&format!("{}.{{}}.gz", c.log_file_path), 32)
.unwrap();
let trigger = SizeTrigger::new(size);
let policy = CompoundPolicy::new(Box::new(trigger), Box::new(roller));
Box::new(
RollingFileAppender::builder()
.append(c.log_file_append)
.encoder(Box::new(PatternEncoder::new("{d} {l} {M} - {m}{n}")))
.build(c.log_file_path, Box::new(policy))
.unwrap(),
)
} else {
Box::new(
FileAppender::builder()
.append(c.log_file_append)
.encoder(Box::new(PatternEncoder::new("{d} {l} {M} - {m}{n}")))
.build(c.log_file_path)
.unwrap(),
)
}
};
appenders.push(
Appender::builder()
.filter(filter)
.filter(Box::new(GrinFilter))
.build("file", file),
);
root = root.appender("file");
}
let config = Config::builder()
.appenders(appenders)
.build(root.build(level_minimum))
.unwrap();
let _ = log4rs::init_config(config).unwrap();
info!(
"log4rs is initialized, file level: {:?}, stdout level: {:?}, min. level: {:?}",
level_file, level_stdout, level_minimum
);
// Logger configuration successfully injected into LOGGING_CONFIG... // Logger configuration successfully injected into LOGGING_CONFIG...
let mut was_init_ref = WAS_INIT.lock(); let mut was_init_ref = WAS_INIT.lock();
*was_init_ref = true; *was_init_ref = true;
// .. allow logging, having ensured that paths etc are immutable
} }
send_panic_to_log(); send_panic_to_log();
} }
@ -134,7 +198,6 @@ fn send_panic_to_log() {
match info.location() { match info.location() {
Some(location) => { Some(location) => {
error!( error!(
LOGGER,
"\nthread '{}' panicked at '{}': {}:{}{:?}\n\n", "\nthread '{}' panicked at '{}': {}:{}{:?}\n\n",
thread, thread,
msg, msg,
@ -143,10 +206,7 @@ fn send_panic_to_log() {
backtrace backtrace
); );
} }
None => error!( None => error!("thread '{}' panicked at '{}'{:?}", thread, msg, backtrace),
LOGGER,
"thread '{}' panicked at '{}'{:?}", thread, msg, backtrace
),
} }
//also print to stderr //also print to stderr
let tui_running = TUI_RUNNING.lock().clone(); let tui_running = TUI_RUNNING.lock().clone();

View file

@ -14,11 +14,9 @@
//! Logging configuration types //! Logging configuration types
/// Log level types, as slog's don't implement serialize /// Log level types
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum LogLevel { pub enum LogLevel {
/// Critical
Critical,
/// Error /// Error
Error, Error,
/// Warning /// Warning
@ -46,6 +44,8 @@ pub struct LoggingConfig {
pub log_file_path: String, pub log_file_path: String,
/// Whether to append to log or replace /// Whether to append to log or replace
pub log_file_append: bool, pub log_file_append: bool,
/// Size of the log in bytes to rotate over (optional)
pub log_max_size: Option<u64>,
/// Whether the tui is running (optional) /// Whether the tui is running (optional)
pub tui_running: Option<bool>, pub tui_running: Option<bool>,
} }
@ -59,6 +59,7 @@ impl Default for LoggingConfig {
file_log_level: LogLevel::Debug, file_log_level: LogLevel::Debug,
log_file_path: String::from("grin.log"), log_file_path: String::from("grin.log"),
log_file_append: true, log_file_append: true,
log_max_size: Some(1024 * 1024 * 16), // 16 megabytes default
tui_running: None, tui_running: None,
} }
} }

View file

@ -17,7 +17,7 @@ rand = "0.5"
serde = "1" serde = "1"
serde_derive = "1" serde_derive = "1"
serde_json = "1" serde_json = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] } log = "0.4"
term = "0.5" term = "0.5"
tokio = "0.1.7" tokio = "0.1.7"
tokio-core = "0.1" tokio-core = "0.1"

View file

@ -26,8 +26,8 @@ use api;
use error::{Error, ErrorKind}; use error::{Error, ErrorKind};
use libtx::slate::Slate; use libtx::slate::Slate;
use libwallet; use libwallet;
use util;
use util::secp::pedersen; use util::secp::pedersen;
use util::{self, LOGGER};
#[derive(Clone)] #[derive(Clone)]
pub struct HTTPWalletClient { pub struct HTTPWalletClient {
@ -65,11 +65,11 @@ impl WalletClient for HTTPWalletClient {
match single_create_coinbase(&url, &block_fees) { match single_create_coinbase(&url, &block_fees) {
Err(e) => { Err(e) => {
error!( error!(
LOGGER, "Failed to get coinbase from {}. Run grin wallet listen?",
"Failed to get coinbase from {}. Run grin wallet listen?", url url
); );
error!(LOGGER, "Underlying Error: {}", e.cause().unwrap()); error!("Underlying Error: {}", e.cause().unwrap());
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap()); error!("Backtrace: {}", e.backtrace().unwrap());
Err(libwallet::ErrorKind::ClientCallback( Err(libwallet::ErrorKind::ClientCallback(
"Failed to get coinbase", "Failed to get coinbase",
))? ))?
@ -85,11 +85,11 @@ impl WalletClient for HTTPWalletClient {
"dest formatted as {} but send -d expected stdout or http://IP:port", "dest formatted as {} but send -d expected stdout or http://IP:port",
dest dest
); );
error!(LOGGER, "{}", err_str,); error!("{}", err_str,);
Err(libwallet::ErrorKind::Uri)? Err(libwallet::ErrorKind::Uri)?
} }
let url = format!("{}/v1/wallet/foreign/receive_tx", dest); let url = format!("{}/v1/wallet/foreign/receive_tx", dest);
debug!(LOGGER, "Posting transaction slate to {}", url); debug!("Posting transaction slate to {}", url);
let res = api::client::post(url.as_str(), None, slate).context( let res = api::client::post(url.as_str(), None, slate).context(
libwallet::ErrorKind::ClientCallback("Posting transaction slate"), libwallet::ErrorKind::ClientCallback("Posting transaction slate"),
@ -153,7 +153,7 @@ impl WalletClient for HTTPWalletClient {
let results = match rt.block_on(task) { let results = match rt.block_on(task) {
Ok(outputs) => outputs, Ok(outputs) => outputs,
Err(e) => { Err(e) => {
error!(LOGGER, "Outputs by id failed: {}", e); error!("Outputs by id failed: {}", e);
return Err(libwallet::ErrorKind::ClientCallback("Error from server"))?; return Err(libwallet::ErrorKind::ClientCallback("Error from server"))?;
} }
}; };
@ -209,8 +209,8 @@ impl WalletClient for HTTPWalletClient {
Err(e) => { Err(e) => {
// if we got anything other than 200 back from server, bye // if we got anything other than 200 back from server, bye
error!( error!(
LOGGER, "get_outputs_by_pmmr_index: unable to contact API {}. Error: {}",
"get_outputs_by_pmmr_index: unable to contact API {}. Error: {}", addr, e addr, e
); );
Err(libwallet::ErrorKind::ClientCallback( Err(libwallet::ErrorKind::ClientCallback(
"unable to contact api", "unable to contact api",
@ -226,11 +226,11 @@ pub fn create_coinbase(dest: &str, block_fees: &BlockFees) -> Result<CbData, Err
match single_create_coinbase(&url, &block_fees) { match single_create_coinbase(&url, &block_fees) {
Err(e) => { Err(e) => {
error!( error!(
LOGGER, "Failed to get coinbase from {}. Run grin wallet listen?",
"Failed to get coinbase from {}. Run grin wallet listen?", url url
); );
error!(LOGGER, "Underlying Error: {}", e.cause().unwrap()); error!("Underlying Error: {}", e.cause().unwrap());
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap()); error!("Backtrace: {}", e.backtrace().unwrap());
Err(e)? Err(e)?
} }
Ok(res) => Ok(res), Ok(res) => Ok(res),

View file

@ -27,8 +27,7 @@ use uuid::Uuid;
use failure::ResultExt; use failure::ResultExt;
use keychain::{self, Identifier, Keychain}; use keychain::{self, Identifier, Keychain};
use util::secp::pedersen; use util::secp::pedersen
use util::LOGGER;
use error::{Error, ErrorKind}; use error::{Error, ErrorKind};
@ -168,11 +167,10 @@ where
// delete the lock file // delete the lock file
if let Err(e) = fs::remove_dir(&self.lock_file_path) { if let Err(e) = fs::remove_dir(&self.lock_file_path) {
error!( error!(
LOGGER,
"Could not remove wallet lock file. Maybe insufficient rights? {:?} ", e "Could not remove wallet lock file. Maybe insufficient rights? {:?} ", e
); );
} }
info!(LOGGER, "... released wallet lock"); info!("... released wallet lock");
} }
} }
@ -224,7 +222,7 @@ where
/// Close wallet and remove any stored credentials (TBD) /// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<(), libwallet::Error> { fn close(&mut self) -> Result<(), libwallet::Error> {
debug!(LOGGER, "Closing wallet keychain"); debug!("Closing wallet keychain");
self.keychain = None; self.keychain = None;
Ok(()) Ok(())
} }
@ -352,14 +350,14 @@ where
fn lock(&self) -> Result<(), libwallet::Error> { fn lock(&self) -> Result<(), libwallet::Error> {
// create directory if it doesn't exist // create directory if it doesn't exist
fs::create_dir_all(self.config.data_file_dir.clone()).unwrap_or_else(|why| { fs::create_dir_all(self.config.data_file_dir.clone()).unwrap_or_else(|why| {
info!(LOGGER, "! {:?}", why.kind()); info!("! {:?}", why.kind());
}); });
info!(LOGGER, "Acquiring wallet lock ..."); info!("Acquiring wallet lock ...");
let lock_file_path = self.lock_file_path.clone(); let lock_file_path = self.lock_file_path.clone();
let action = || { let action = || {
trace!(LOGGER, "making lock file for wallet lock"); trace!("making lock file for wallet lock");
fs::create_dir(&lock_file_path) fs::create_dir(&lock_file_path)
}; };
@ -377,7 +375,6 @@ where
Ok(_) => Ok(()), Ok(_) => Ok(()),
Err(e) => { Err(e) => {
error!( error!(
LOGGER,
"Failed to acquire wallet lock file (multiple retries)", "Failed to acquire wallet lock file (multiple retries)",
); );
Err(e.into()) Err(e.into())
@ -390,7 +387,7 @@ where
fn read_or_create_paths(&mut self) -> Result<(), Error> { fn read_or_create_paths(&mut self) -> Result<(), Error> {
if !Path::new(&self.config.data_file_dir.clone()).exists() { if !Path::new(&self.config.data_file_dir.clone()).exists() {
fs::create_dir_all(&self.config.data_file_dir.clone()).unwrap_or_else(|why| { fs::create_dir_all(&self.config.data_file_dir.clone()).unwrap_or_else(|why| {
info!(LOGGER, "! {:?}", why.kind()); info!("! {:?}", why.kind());
}); });
} }
if Path::new(&self.data_file_path.clone()).exists() { if Path::new(&self.data_file_path.clone()).exists() {

View file

@ -24,7 +24,7 @@ extern crate serde;
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
extern crate term; extern crate term;
extern crate url; extern crate url;

View file

@ -30,7 +30,6 @@ use util::{kernel_sig_msg, secp};
use core::core::{Input, Output, OutputFeatures, Transaction, TxKernel}; use core::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use keychain::{self, BlindSum, BlindingFactor, Identifier, Keychain}; use keychain::{self, BlindSum, BlindingFactor, Identifier, Keychain};
use libtx::{aggsig, proof}; use libtx::{aggsig, proof};
use util::LOGGER;
/// Context information available to transaction combinators. /// Context information available to transaction combinators.
pub struct Context<'a, K: 'a> pub struct Context<'a, K: 'a>
@ -67,8 +66,8 @@ where
K: Keychain, K: Keychain,
{ {
debug!( debug!(
LOGGER, "Building input (spending regular output): {}, {}",
"Building input (spending regular output): {}, {}", value, key_id value, key_id
); );
build_input(value, OutputFeatures::DEFAULT_OUTPUT, key_id) build_input(value, OutputFeatures::DEFAULT_OUTPUT, key_id)
} }
@ -78,10 +77,7 @@ pub fn coinbase_input<K>(value: u64, key_id: Identifier) -> Box<Append<K>>
where where
K: Keychain, K: Keychain,
{ {
debug!( debug!("Building input (spending coinbase): {}, {}", value, key_id);
LOGGER,
"Building input (spending coinbase): {}, {}", value, key_id
);
build_input(value, OutputFeatures::COINBASE_OUTPUT, key_id) build_input(value, OutputFeatures::COINBASE_OUTPUT, key_id)
} }
@ -95,7 +91,7 @@ where
move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) { move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
let commit = build.keychain.commit(value, &key_id).unwrap(); let commit = build.keychain.commit(value, &key_id).unwrap();
debug!(LOGGER, "Building output: {}, {:?}", value, commit); debug!("Building output: {}, {:?}", value, commit);
let rproof = proof::create(build.keychain, value, &key_id, commit, None).unwrap(); let rproof = proof::create(build.keychain, value, &key_id, commit, None).unwrap();

View file

@ -21,7 +21,7 @@ use core::core::KernelFeatures;
use core::core::{Output, OutputFeatures, TxKernel}; use core::core::{Output, OutputFeatures, TxKernel};
use libtx::error::Error; use libtx::error::Error;
use libtx::{aggsig, proof}; use libtx::{aggsig, proof};
use util::{kernel_sig_msg, secp, static_secp_instance, LOGGER}; use util::{kernel_sig_msg, secp, static_secp_instance};
/// output a reward output /// output a reward output
pub fn output<K>( pub fn output<K>(
@ -36,7 +36,7 @@ where
let value = reward(fees); let value = reward(fees);
let commit = keychain.commit(value, key_id)?; let commit = keychain.commit(value, key_id)?;
trace!(LOGGER, "Block reward - Pedersen Commit is: {:?}", commit,); trace!("Block reward - Pedersen Commit is: {:?}", commit,);
let rproof = proof::create(keychain, value, key_id, commit, None)?; let rproof = proof::create(keychain, value, key_id, commit, None)?;

View file

@ -27,9 +27,9 @@ use keychain::{BlindSum, BlindingFactor, Keychain};
use libtx::error::{Error, ErrorKind}; use libtx::error::{Error, ErrorKind};
use libtx::{aggsig, build, tx_fee}; use libtx::{aggsig, build, tx_fee};
use util::secp;
use util::secp::key::{PublicKey, SecretKey}; use util::secp::key::{PublicKey, SecretKey};
use util::secp::Signature; use util::secp::Signature;
use util::{secp, LOGGER};
/// Public data for each participant in the slate /// Public data for each participant in the slate
@ -289,7 +289,7 @@ impl Slate {
amount_to_hr_string(fee, false), amount_to_hr_string(fee, false),
amount_to_hr_string(self.amount + self.fee, false) amount_to_hr_string(self.amount + self.fee, false)
); );
info!(LOGGER, "{}", reason); info!("{}", reason);
return Err(ErrorKind::Fee(reason.to_string()))?; return Err(ErrorKind::Fee(reason.to_string()))?;
} }
@ -395,7 +395,7 @@ impl Slate {
final_tx.kernels_mut()[0].excess_sig = final_sig.clone(); final_tx.kernels_mut()[0].excess_sig = final_sig.clone();
// confirm the kernel verifies successfully before proceeding // confirm the kernel verifies successfully before proceeding
debug!(LOGGER, "Validating final transaction"); debug!("Validating final transaction");
final_tx.kernels()[0].verify()?; final_tx.kernels()[0].verify()?;
// confirm the overall transaction is valid (including the updated kernel) // confirm the overall transaction is valid (including the updated kernel)

View file

@ -36,8 +36,8 @@ use libwallet::types::{
WalletClient, WalletInfo, WalletClient, WalletInfo,
}; };
use libwallet::{Error, ErrorKind}; use libwallet::{Error, ErrorKind};
use util;
use util::secp::pedersen; use util::secp::pedersen;
use util::{self, LOGGER};
/// Wrapper around internal API functions, containing a reference to /// Wrapper around internal API functions, containing a reference to
/// the wallet/keychain that they're acting upon /// the wallet/keychain that they're acting upon
@ -188,7 +188,6 @@ where
Ok(s) => s, Ok(s) => s,
Err(e) => { Err(e) => {
error!( error!(
LOGGER,
"Communication with receiver failed on SenderInitiation send. Aborting transaction {:?}", "Communication with receiver failed on SenderInitiation send. Aborting transaction {:?}",
e, e,
); );
@ -321,11 +320,10 @@ where
}; };
let res = client.post_tx(&TxWrapper { tx_hex: tx_hex }, fluff); let res = client.post_tx(&TxWrapper { tx_hex: tx_hex }, fluff);
if let Err(e) = res { if let Err(e) = res {
error!(LOGGER, "api: post_tx: failed with error: {}", e); error!("api: post_tx: failed with error: {}", e);
Err(e) Err(e)
} else { } else {
debug!( debug!(
LOGGER,
"api: post_tx: successfully posted tx: {}, fluff? {}", "api: post_tx: successfully posted tx: {}, fluff? {}",
slate.tx.hash(), slate.tx.hash(),
fluff fluff
@ -351,14 +349,14 @@ where
}; };
if confirmed { if confirmed {
warn!( warn!(
LOGGER, "api: dump_stored_tx: transaction at {} is already confirmed.",
"api: dump_stored_tx: transaction at {} is already confirmed.", tx_id tx_id
); );
} }
if tx_hex.is_none() { if tx_hex.is_none() {
error!( error!(
LOGGER, "api: dump_stored_tx: completed transaction at {} does not exist.",
"api: dump_stored_tx: completed transaction at {} does not exist.", tx_id tx_id
); );
return Err(ErrorKind::TransactionBuildingNotCompleted(tx_id))?; return Err(ErrorKind::TransactionBuildingNotCompleted(tx_id))?;
} }
@ -386,15 +384,15 @@ where
}; };
if confirmed { if confirmed {
error!( error!(
LOGGER, "api: repost_tx: transaction at {} is confirmed. NOT resending.",
"api: repost_tx: transaction at {} is confirmed. NOT resending.", tx_id tx_id
); );
return Err(ErrorKind::TransactionAlreadyConfirmed)?; return Err(ErrorKind::TransactionAlreadyConfirmed)?;
} }
if tx_hex.is_none() { if tx_hex.is_none() {
error!( error!(
LOGGER, "api: repost_tx: completed transaction at {} does not exist.",
"api: repost_tx: completed transaction at {} does not exist.", tx_id tx_id
); );
return Err(ErrorKind::TransactionBuildingNotCompleted(tx_id))?; return Err(ErrorKind::TransactionBuildingNotCompleted(tx_id))?;
} }
@ -406,12 +404,12 @@ where
fluff, fluff,
); );
if let Err(e) = res { if let Err(e) = res {
error!(LOGGER, "api: repost_tx: failed with error: {}", e); error!("api: repost_tx: failed with error: {}", e);
Err(e) Err(e)
} else { } else {
debug!( debug!(
LOGGER, "api: repost_tx: successfully posted tx at: {}, fluff? {}",
"api: repost_tx: successfully posted tx at: {}, fluff? {}", tx_id, fluff tx_id, fluff
); );
Ok(()) Ok(())
} }
@ -541,11 +539,10 @@ where
w.close()?; w.close()?;
if let Err(e) = res { if let Err(e) = res {
error!(LOGGER, "api: receive_tx: failed with error: {}", e); error!("api: receive_tx: failed with error: {}", e);
Err(e) Err(e)
} else { } else {
debug!( debug!(
LOGGER,
"api: receive_tx: successfully received tx: {}", "api: receive_tx: successfully received tx: {}",
slate.tx.hash() slate.tx.hash()
); );

View file

@ -37,7 +37,7 @@ use std::sync::Arc;
use url::form_urlencoded; use url::form_urlencoded;
use util::secp::pedersen; use util::secp::pedersen;
use util::Mutex; use util::Mutex;
use util::{to_base64, LOGGER}; use util::to_base64;
/// Instantiate wallet Owner API for a single-use (command line) call /// Instantiate wallet Owner API for a single-use (command line) call
/// Return a function containing a loaded API context to call /// Return a function containing a loaded API context to call
@ -95,7 +95,7 @@ where
.map_err(|_| ErrorKind::GenericError("Router failed to add route".to_string()))?; .map_err(|_| ErrorKind::GenericError("Router failed to add route".to_string()))?;
let mut apis = ApiServer::new(); let mut apis = ApiServer::new();
info!(LOGGER, "Starting HTTP Owner API server at {}.", addr); info!("Starting HTTP Owner API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address"); let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
let api_thread = let api_thread =
apis.start(socket_addr, router, tls_config) apis.start(socket_addr, router, tls_config)
@ -127,7 +127,7 @@ where
.map_err(|_| ErrorKind::GenericError("Router failed to add route".to_string()))?; .map_err(|_| ErrorKind::GenericError("Router failed to add route".to_string()))?;
let mut apis = ApiServer::new(); let mut apis = ApiServer::new();
info!(LOGGER, "Starting HTTP Foreign API server at {}.", addr); info!("Starting HTTP Foreign API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address"); let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
let api_thread = let api_thread =
apis.start(socket_addr, router, tls_config) apis.start(socket_addr, router, tls_config)
@ -226,12 +226,12 @@ where
Ok(id) => match api.dump_stored_tx(id, false, "") { Ok(id) => match api.dump_stored_tx(id, false, "") {
Ok(tx) => Ok(tx), Ok(tx) => Ok(tx),
Err(e) => { Err(e) => {
error!(LOGGER, "dump_stored_tx: failed with error: {}", e); error!("dump_stored_tx: failed with error: {}", e);
Err(e) Err(e)
} }
}, },
Err(e) => { Err(e) => {
error!(LOGGER, "dump_stored_tx: could not parse id: {}", e); error!("dump_stored_tx: could not parse id: {}", e);
Err(ErrorKind::TransactionDumpError( Err(ErrorKind::TransactionDumpError(
"dump_stored_tx: cannot dump transaction. Could not parse id in request.", "dump_stored_tx: cannot dump transaction. Could not parse id in request.",
).into()) ).into())
@ -307,7 +307,7 @@ where
args.selection_strategy_is_use_all, args.selection_strategy_is_use_all,
) )
} else { } else {
error!(LOGGER, "unsupported payment method: {}", args.method); error!("unsupported payment method: {}", args.method);
return Err(ErrorKind::ClientCallback("unsupported payment method"))?; return Err(ErrorKind::ClientCallback("unsupported payment method"))?;
} }
})) }))
@ -322,7 +322,7 @@ where
parse_body(req).and_then(move |mut slate| match api.finalize_tx(&mut slate) { parse_body(req).and_then(move |mut slate| match api.finalize_tx(&mut slate) {
Ok(_) => ok(slate.clone()), Ok(_) => ok(slate.clone()),
Err(e) => { Err(e) => {
error!(LOGGER, "finalize_tx: failed with error: {}", e); error!("finalize_tx: failed with error: {}", e);
err(e) err(e)
} }
}), }),
@ -340,12 +340,12 @@ where
Ok(id) => match api.cancel_tx(id) { Ok(id) => match api.cancel_tx(id) {
Ok(_) => ok(()), Ok(_) => ok(()),
Err(e) => { Err(e) => {
error!(LOGGER, "cancel_tx: failed with error: {}", e); error!("cancel_tx: failed with error: {}", e);
err(e) err(e)
} }
}, },
Err(e) => { Err(e) => {
error!(LOGGER, "cancel_tx: could not parse id: {}", e); error!("cancel_tx: could not parse id: {}", e);
err(ErrorKind::TransactionCancellationError( err(ErrorKind::TransactionCancellationError(
"cancel_tx: cannot cancel transaction. Could not parse id in request.", "cancel_tx: cannot cancel transaction. Could not parse id in request.",
).into()) ).into())
@ -443,7 +443,7 @@ where
match self.handle_get_request(&req) { match self.handle_get_request(&req) {
Ok(r) => Box::new(ok(r)), Ok(r) => Box::new(ok(r)),
Err(e) => { Err(e) => {
error!(LOGGER, "Request Error: {:?}", e); error!("Request Error: {:?}", e);
Box::new(ok(create_error_response(e))) Box::new(ok(create_error_response(e)))
} }
} }
@ -454,7 +454,7 @@ where
self.handle_post_request(req) self.handle_post_request(req)
.and_then(|r| ok(r)) .and_then(|r| ok(r))
.or_else(|e| { .or_else(|e| {
error!(LOGGER, "Request Error: {:?}", e); error!("Request Error: {:?}", e);
ok(create_error_response(e)) ok(create_error_response(e))
}), }),
) )
@ -511,7 +511,7 @@ where
parse_body(req).and_then(move |mut slate| match api.receive_tx(&mut slate) { parse_body(req).and_then(move |mut slate| match api.receive_tx(&mut slate) {
Ok(_) => ok(slate.clone()), Ok(_) => ok(slate.clone()),
Err(e) => { Err(e) => {
error!(LOGGER, "receive_tx: failed with error: {}", e); error!("receive_tx: failed with error: {}", e);
err(e) err(e)
} }
}), }),
@ -548,7 +548,7 @@ where
{ {
fn post(&self, req: Request<Body>) -> ResponseFuture { fn post(&self, req: Request<Body>) -> ResponseFuture {
Box::new(self.handle_request(req).and_then(|r| ok(r)).or_else(|e| { Box::new(self.handle_request(req).and_then(|r| ok(r)).or_else(|e| {
error!(LOGGER, "Request Error: {:?}", e); error!("Request Error: {:?}", e);
ok(create_error_response(e)) ok(create_error_response(e))
})) }))
} }

View file

@ -21,7 +21,6 @@ use libwallet::types::*;
use libwallet::Error; use libwallet::Error;
use std::collections::HashMap; use std::collections::HashMap;
use util::secp::{key::SecretKey, pedersen}; use util::secp::{key::SecretKey, pedersen};
use util::LOGGER;
/// Utility struct for return values from below /// Utility struct for return values from below
struct OutputResult { struct OutputResult {
@ -55,7 +54,6 @@ where
let mut wallet_outputs: Vec<OutputResult> = Vec::new(); let mut wallet_outputs: Vec<OutputResult> = Vec::new();
info!( info!(
LOGGER,
"Scanning {} outputs in the current Grin utxo set", "Scanning {} outputs in the current Grin utxo set",
outputs.len(), outputs.len(),
); );
@ -70,10 +68,7 @@ where
continue; continue;
} }
info!( info!("Output found: {:?}, amount: {:?}", commit, info.value);
LOGGER,
"Output found: {:?}, amount: {:?}", commit, info.value
);
let lock_height = if *is_coinbase { let lock_height = if *is_coinbase {
*height + global::coinbase_maturity() *height + global::coinbase_maturity()
@ -109,14 +104,11 @@ where
// Don't proceed if wallet_data has anything in it // Don't proceed if wallet_data has anything in it
let is_empty = wallet.iter().next().is_none(); let is_empty = wallet.iter().next().is_none();
if !is_empty { if !is_empty {
error!( error!("Not restoring. Please back up and remove existing db directory first.");
LOGGER,
"Not restoring. Please back up and remove existing db directory first."
);
return Ok(()); return Ok(());
} }
info!(LOGGER, "Starting restore."); info!("Starting restore.");
let batch_size = 1000; let batch_size = 1000;
let mut start_index = 1; let mut start_index = 1;
@ -126,7 +118,6 @@ where
.client() .client()
.get_outputs_by_pmmr_index(start_index, batch_size)?; .get_outputs_by_pmmr_index(start_index, batch_size)?;
info!( info!(
LOGGER,
"Retrieved {} outputs, up to index {}. (Highest index: {})", "Retrieved {} outputs, up to index {}. (Highest index: {})",
outputs.len(), outputs.len(),
highest_index, highest_index,
@ -142,7 +133,6 @@ where
} }
info!( info!(
LOGGER,
"Identified {} wallet_outputs as belonging to this wallet", "Identified {} wallet_outputs as belonging to this wallet",
result_vec.len(), result_vec.len(),
); );

View file

@ -20,8 +20,6 @@ use libwallet::error::{Error, ErrorKind};
use libwallet::internal::keys; use libwallet::internal::keys;
use libwallet::types::*; use libwallet::types::*;
use util::LOGGER;
/// Initialize a transaction on the sender side, returns a corresponding /// Initialize a transaction on the sender side, returns a corresponding
/// libwallet transaction slate with the appropriate inputs selected, /// libwallet transaction slate with the appropriate inputs selected,
/// and saves the private wallet identifiers of our selected outputs /// and saves the private wallet identifiers of our selected outputs
@ -356,14 +354,11 @@ where
let mut change_amounts_derivations = vec![]; let mut change_amounts_derivations = vec![];
if change == 0 { if change == 0 {
debug!( debug!("No change (sending exactly amount + fee), no change outputs to build");
LOGGER,
"No change (sending exactly amount + fee), no change outputs to build"
);
} else { } else {
debug!( debug!(
LOGGER, "Building change outputs: total change: {} ({} outputs)",
"Building change outputs: total change: {} ({} outputs)", change, num_change_outputs change, num_change_outputs
); );
let part_change = change / num_change_outputs as u64; let part_change = change / num_change_outputs as u64;
@ -442,7 +437,6 @@ where
// coins = the amount. // coins = the amount.
if let Some(outputs) = select_from(amount, false, eligible.clone()) { if let Some(outputs) = select_from(amount, false, eligible.clone()) {
debug!( debug!(
LOGGER,
"Extending maximum number of outputs. {} outputs selected.", "Extending maximum number of outputs. {} outputs selected.",
outputs.len() outputs.len()
); );

View file

@ -25,7 +25,6 @@ use libtx::{build, tx_fee};
use libwallet::internal::{selection, updater}; use libwallet::internal::{selection, updater};
use libwallet::types::{Context, TxLogEntryType, WalletBackend, WalletClient}; use libwallet::types::{Context, TxLogEntryType, WalletBackend, WalletClient};
use libwallet::{Error, ErrorKind}; use libwallet::{Error, ErrorKind};
use util::LOGGER;
/// Receive a transaction, modifying the slate accordingly (which can then be /// Receive a transaction, modifying the slate accordingly (which can then be
/// sent back to sender for posting) /// sent back to sender for posting)
@ -225,7 +224,7 @@ where
parent_key_id, parent_key_id,
); );
debug!(LOGGER, "selected some coins - {}", coins.len()); debug!("selected some coins - {}", coins.len());
let fee = tx_fee(coins.len(), 2, 1, None); let fee = tx_fee(coins.len(), 2, 1, None);
let num_change_outputs = 1; let num_change_outputs = 1;

View file

@ -30,8 +30,8 @@ use libwallet::types::{
BlockFees, CbData, OutputData, OutputStatus, TxLogEntry, TxLogEntryType, WalletBackend, BlockFees, CbData, OutputData, OutputStatus, TxLogEntry, TxLogEntryType, WalletBackend,
WalletClient, WalletInfo, WalletClient, WalletInfo,
}; };
use util;
use util::secp::pedersen; use util::secp::pedersen;
use util::{self, LOGGER};
/// Retrieve all of the outputs (doesn't attempt to update from node) /// Retrieve all of the outputs (doesn't attempt to update from node)
pub fn retrieve_outputs<T: ?Sized, C, K>( pub fn retrieve_outputs<T: ?Sized, C, K>(
@ -201,14 +201,10 @@ where
// these changes as the chain is syncing, incorrect or forking // these changes as the chain is syncing, incorrect or forking
if height < last_confirmed_height { if height < last_confirmed_height {
warn!( warn!(
LOGGER,
"Not updating outputs as the height of the node's chain \ "Not updating outputs as the height of the node's chain \
is less than the last reported wallet update height." is less than the last reported wallet update height."
); );
warn!( warn!("Please wait for sync on node to complete or fork to resolve and try again.");
LOGGER,
"Please wait for sync on node to complete or fork to resolve and try again."
);
return Ok(()); return Ok(());
} }
let mut batch = wallet.batch()?; let mut batch = wallet.batch()?;
@ -274,7 +270,7 @@ where
C: WalletClient, C: WalletClient,
K: Keychain, K: Keychain,
{ {
debug!(LOGGER, "Refreshing wallet outputs"); debug!("Refreshing wallet outputs");
// build a local map of wallet outputs keyed by commit // build a local map of wallet outputs keyed by commit
// and a list of outputs we want to query the node for // and a list of outputs we want to query the node for
@ -423,7 +419,6 @@ where
} }
debug!( debug!(
LOGGER,
"receive_coinbase: built candidate output - {:?}, {}", "receive_coinbase: built candidate output - {:?}, {}",
key_id.clone(), key_id.clone(),
key_id, key_id,
@ -432,7 +427,7 @@ where
let mut block_fees = block_fees.clone(); let mut block_fees = block_fees.clone();
block_fees.key_id = Some(key_id.clone()); block_fees.key_id = Some(key_id.clone());
debug!(LOGGER, "receive_coinbase: {:?}", block_fees); debug!("receive_coinbase: {:?}", block_fees);
let (out, kern) = reward::output( let (out, kern) = reward::output(
wallet.keychain(), wallet.keychain(),

View file

@ -26,7 +26,6 @@ use error::{Error, ErrorKind};
use failure::ResultExt; use failure::ResultExt;
use keychain::Keychain; use keychain::Keychain;
use util; use util;
use util::LOGGER;
pub const SEED_FILE: &'static str = "wallet.seed"; pub const SEED_FILE: &'static str = "wallet.seed";
@ -118,7 +117,7 @@ impl WalletSeed {
wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE, wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE,
); );
debug!(LOGGER, "Generating wallet seed file at: {}", seed_file_path); debug!("Generating wallet seed file at: {}", seed_file_path);
if Path::new(seed_file_path).exists() { if Path::new(seed_file_path).exists() {
Err(ErrorKind::WalletSeedExists)? Err(ErrorKind::WalletSeedExists)?
@ -140,7 +139,7 @@ impl WalletSeed {
wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE, wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE,
); );
debug!(LOGGER, "Using wallet seed file at: {}", seed_file_path,); debug!("Using wallet seed file at: {}", seed_file_path,);
if Path::new(seed_file_path).exists() { if Path::new(seed_file_path).exists() {
let mut file = File::open(seed_file_path).context(ErrorKind::IO)?; let mut file = File::open(seed_file_path).context(ErrorKind::IO)?;
@ -150,7 +149,6 @@ impl WalletSeed {
Ok(wallet_seed) Ok(wallet_seed)
} else { } else {
error!( error!(
LOGGER,
"wallet seed file {} could not be opened (grin wallet init). \ "wallet seed file {} could not be opened (grin wallet init). \
Run \"grin wallet init\" to initialize a new wallet.", Run \"grin wallet init\" to initialize a new wallet.",
seed_file_path seed_file_path

View file

@ -20,7 +20,7 @@ extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
extern crate rand; extern crate rand;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
extern crate serde; extern crate serde;
extern crate uuid; extern crate uuid;
@ -35,7 +35,6 @@ use std::time::Duration;
use core::global; use core::global;
use core::global::ChainTypes; use core::global::ChainTypes;
use keychain::{ExtKeychain, Keychain}; use keychain::{ExtKeychain, Keychain};
use util::LOGGER;
use wallet::libwallet; use wallet::libwallet;
fn clean_output_dir(test_dir: &str) { fn clean_output_dir(test_dir: &str) {
@ -69,7 +68,7 @@ fn accounts_test_impl(test_dir: &str) -> Result<(), libwallet::Error> {
// Set the wallet proxy listener running // Set the wallet proxy listener running
thread::spawn(move || { thread::spawn(move || {
if let Err(e) = wallet_proxy.run() { if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e); error!("Wallet Proxy error: {}", e);
} }
}); });

View file

@ -28,8 +28,8 @@ use util::{Mutex, RwLock};
use common::api; use common::api;
use common::serde_json; use common::serde_json;
use store; use store;
use util;
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
use util::{self, LOGGER};
use common::failure::ResultExt; use common::failure::ResultExt;
@ -146,7 +146,7 @@ where
thread::sleep(Duration::from_millis(10)); thread::sleep(Duration::from_millis(10));
// read queue // read queue
let m = self.rx.recv().unwrap(); let m = self.rx.recv().unwrap();
trace!(LOGGER, "Wallet Client Proxy Received: {:?}", m); trace!("Wallet Client Proxy Received: {:?}", m);
let resp = match m.method.as_ref() { let resp = match m.method.as_ref() {
"get_chain_height" => self.get_chain_height(m)?, "get_chain_height" => self.get_chain_height(m)?,
"get_outputs_from_node" => self.get_outputs_from_node(m)?, "get_outputs_from_node" => self.get_outputs_from_node(m)?,
@ -345,7 +345,7 @@ impl WalletClient for LocalWalletClient {
} }
let r = self.rx.lock(); let r = self.rx.lock();
let m = r.recv().unwrap(); let m = r.recv().unwrap();
trace!(LOGGER, "Received send_tx_slate response: {:?}", m.clone()); trace!("Received send_tx_slate response: {:?}", m.clone());
Ok( Ok(
serde_json::from_str(&m.body).context(libwallet::ErrorKind::ClientCallback( serde_json::from_str(&m.body).context(libwallet::ErrorKind::ClientCallback(
"Parsing send_tx_slate response", "Parsing send_tx_slate response",
@ -369,7 +369,7 @@ impl WalletClient for LocalWalletClient {
} }
let r = self.rx.lock(); let r = self.rx.lock();
let m = r.recv().unwrap(); let m = r.recv().unwrap();
trace!(LOGGER, "Received post_tx response: {:?}", m.clone()); trace!("Received post_tx response: {:?}", m.clone());
Ok(()) Ok(())
} }
@ -389,11 +389,7 @@ impl WalletClient for LocalWalletClient {
} }
let r = self.rx.lock(); let r = self.rx.lock();
let m = r.recv().unwrap(); let m = r.recv().unwrap();
trace!( trace!("Received get_chain_height response: {:?}", m.clone());
LOGGER,
"Received get_chain_height response: {:?}",
m.clone()
);
Ok(m.body Ok(m.body
.parse::<u64>() .parse::<u64>()
.context(libwallet::ErrorKind::ClientCallback( .context(libwallet::ErrorKind::ClientCallback(

View file

@ -20,7 +20,7 @@ extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
extern crate rand; extern crate rand;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
extern crate serde; extern crate serde;
extern crate uuid; extern crate uuid;
@ -35,7 +35,6 @@ use std::time::Duration;
use core::global; use core::global;
use core::global::ChainTypes; use core::global::ChainTypes;
use keychain::{ExtKeychain, Identifier, Keychain}; use keychain::{ExtKeychain, Identifier, Keychain};
use util::LOGGER;
use wallet::libtx::slate::Slate; use wallet::libtx::slate::Slate;
use wallet::libwallet; use wallet::libwallet;
use wallet::libwallet::types::AcctPathMapping; use wallet::libwallet::types::AcctPathMapping;
@ -67,7 +66,7 @@ fn restore_wallet(base_dir: &str, wallet_dir: &str) -> Result<(), libwallet::Err
// Set the wallet proxy listener running // Set the wallet proxy listener running
thread::spawn(move || { thread::spawn(move || {
if let Err(e) = wallet_proxy.run() { if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e); error!("Wallet Proxy error: {}", e);
} }
}); });
@ -121,7 +120,7 @@ fn compare_wallet_restore(
// Set the wallet proxy listener running // Set the wallet proxy listener running
thread::spawn(move || { thread::spawn(move || {
if let Err(e) = wallet_proxy.run() { if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e); error!("Wallet Proxy error: {}", e);
} }
}); });
@ -218,7 +217,7 @@ fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
// Set the wallet proxy listener running // Set the wallet proxy listener running
thread::spawn(move || { thread::spawn(move || {
if let Err(e) = wallet_proxy.run() { if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e); error!("Wallet Proxy error: {}", e);
} }
}); });

View file

@ -20,7 +20,7 @@ extern crate grin_util as util;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
extern crate rand; extern crate rand;
#[macro_use] #[macro_use]
extern crate slog; extern crate log;
extern crate chrono; extern crate chrono;
extern crate serde; extern crate serde;
extern crate uuid; extern crate uuid;
@ -35,7 +35,6 @@ use std::time::Duration;
use core::global; use core::global;
use core::global::ChainTypes; use core::global::ChainTypes;
use keychain::ExtKeychain; use keychain::ExtKeychain;
use util::LOGGER;
use wallet::libtx::slate::Slate; use wallet::libtx::slate::Slate;
use wallet::libwallet; use wallet::libwallet;
use wallet::libwallet::types::OutputStatus; use wallet::libwallet::types::OutputStatus;
@ -73,7 +72,7 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
// Set the wallet proxy listener running // Set the wallet proxy listener running
thread::spawn(move || { thread::spawn(move || {
if let Err(e) = wallet_proxy.run() { if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e); error!("Wallet Proxy error: {}", e);
} }
}); });
@ -87,10 +86,8 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
wallet::controller::owner_single_use(wallet1.clone(), |api| { wallet::controller::owner_single_use(wallet1.clone(), |api| {
let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?; let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?;
debug!( debug!(
LOGGER,
"Wallet 1 Info Pre-Transaction, after {} blocks: {:?}", "Wallet 1 Info Pre-Transaction, after {} blocks: {:?}",
wallet1_info.last_confirmed_height, wallet1_info.last_confirmed_height, wallet1_info
wallet1_info
); );
assert!(wallet1_refreshed); assert!(wallet1_refreshed);
assert_eq!( assert_eq!(
@ -166,10 +163,8 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
wallet::controller::owner_single_use(wallet1.clone(), |api| { wallet::controller::owner_single_use(wallet1.clone(), |api| {
let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?; let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?;
debug!( debug!(
LOGGER,
"Wallet 1 Info Post Transaction, after {} blocks: {:?}", "Wallet 1 Info Post Transaction, after {} blocks: {:?}",
wallet1_info.last_confirmed_height, wallet1_info.last_confirmed_height, wallet1_info
wallet1_info
); );
let fee = wallet::libtx::tx_fee( let fee = wallet::libtx::tx_fee(
wallet1_info.last_confirmed_height as usize - 1 - cm as usize, wallet1_info.last_confirmed_height as usize - 1 - cm as usize,
@ -207,7 +202,7 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
// refresh wallets and retrieve info/tests for each wallet after maturity // refresh wallets and retrieve info/tests for each wallet after maturity
wallet::controller::owner_single_use(wallet1.clone(), |api| { wallet::controller::owner_single_use(wallet1.clone(), |api| {
let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?; let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?;
debug!(LOGGER, "Wallet 1 Info: {:?}", wallet1_info); debug!("Wallet 1 Info: {:?}", wallet1_info);
assert!(wallet1_refreshed); assert!(wallet1_refreshed);
assert_eq!( assert_eq!(
wallet1_info.total, wallet1_info.total,
@ -318,7 +313,7 @@ fn tx_rollback(test_dir: &str) -> Result<(), libwallet::Error> {
// Set the wallet proxy listener running // Set the wallet proxy listener running
thread::spawn(move || { thread::spawn(move || {
if let Err(e) = wallet_proxy.run() { if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e); error!("Wallet Proxy error: {}", e);
} }
}); });