Replace logging backend to log4rs and add log rotation (#1789)

* Replace logging backend to flexi-logger and add log rotation
* Changed flexi_logger to log4rs
* Disable logging level filtering in Root logger
* Support different logging levels for file and stdout
* Don't log messages from modules other than Grin-related
* Fix formatting
* Place backed up compressed log copies into log file directory
* Increase default log file size to 16 MiB
* Add comment to config file on log_max_size option
This commit is contained in:
eupn 2018-10-21 23:30:56 +03:00 committed by Ignotus Peverell
parent 0852b0c4cf
commit 1195071f5b
83 changed files with 582 additions and 897 deletions

View file

@ -24,7 +24,7 @@ humansize = "1.1.0"
daemonize = "0.3"
serde = "1"
serde_json = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
term = "0.5"
grin_api = { path = "./api" }

View file

@ -15,7 +15,7 @@ ring = "0.13"
serde = "1"
serde_derive = "1"
serde_json = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
tokio = "0.1.7"
tokio-core = "0.1.17"
tokio-tcp = "0.1"

View file

@ -37,7 +37,6 @@ use types::*;
use url::form_urlencoded;
use util;
use util::secp::pedersen::Commitment;
use util::LOGGER;
use web::*;
// All handlers use `Weak` references instead of `Arc` to avoid cycles that
@ -206,12 +205,8 @@ impl OutputHandler {
}
debug!(
LOGGER,
"outputs_block_batch: {}-{}, {:?}, {:?}",
start_height,
end_height,
commitments,
include_rp,
start_height, end_height, commitments, include_rp,
);
let mut return_vec = vec![];
@ -745,7 +740,6 @@ impl PoolPushHandler {
identifier: "?.?.?.?".to_string(),
};
info!(
LOGGER,
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
tx.hash(),
tx.inputs().len(),
@ -759,7 +753,7 @@ impl PoolPushHandler {
tx_pool
.add_to_pool(source, tx, !fluff, &header)
.map_err(|e| {
error!(LOGGER, "update_pool: failed with error: {:?}", e);
error!("update_pool: failed with error: {:?}", e);
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
})
}),
@ -808,7 +802,7 @@ pub fn start_rest_apis(
router.add_middleware(basic_auth_middleware);
}
info!(LOGGER, "Starting HTTP API server at {}.", addr);
info!("Starting HTTP API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
apis.start(socket_addr, router, tls_config).is_ok()
}

View file

@ -33,7 +33,7 @@ extern crate serde;
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
extern crate futures;
extern crate http;
extern crate hyper_rustls;

View file

@ -33,7 +33,6 @@ use std::sync::Arc;
use std::{io, thread};
use tokio_rustls::ServerConfigExt;
use tokio_tcp;
use util::LOGGER;
/// Errors that can be returned by an ApiEndpoint implementation.
#[derive(Debug)]
@ -243,13 +242,10 @@ impl ApiServer {
// TODO re-enable stop after investigation
//let tx = mem::replace(&mut self.shutdown_sender, None).unwrap();
//tx.send(()).expect("Failed to stop API server");
info!(LOGGER, "API server has been stoped");
info!("API server has been stoped");
true
} else {
error!(
LOGGER,
"Can't stop API server, it's not running or doesn't spport stop operation"
);
error!("Can't stop API server, it's not running or doesn't spport stop operation");
false
}
}
@ -263,7 +259,7 @@ impl Handler for LoggingMiddleware {
req: Request<Body>,
mut handlers: Box<Iterator<Item = HandlerObj>>,
) -> ResponseFuture {
debug!(LOGGER, "REST call: {} {}", req.method(), req.uri().path());
debug!("REST call: {} {}", req.method(), req.uri().path());
handlers.next().unwrap().call(req, handlers)
}
}

View file

@ -12,7 +12,7 @@ lmdb-zero = "0.4.4"
failure = "0.1"
failure_derive = "0.1"
croaring = "0.3"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
serde = "1"
serde_derive = "1"
chrono = "0.4.4"

View file

@ -38,7 +38,6 @@ use store;
use txhashset;
use types::{ChainAdapter, NoStatus, Options, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use util::secp::pedersen::{Commitment, RangeProof};
use util::LOGGER;
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
pub const MAX_ORPHAN_SIZE: usize = 200;
@ -184,7 +183,6 @@ impl Chain {
let head = store.head()?;
debug!(
LOGGER,
"Chain init: {} @ {} [{}]",
head.total_difficulty.to_num(),
head.height,
@ -261,7 +259,6 @@ impl Chain {
&self.orphans.add(orphan);
debug!(
LOGGER,
"process_block: orphan: {:?}, # orphans {}{}",
block_hash,
self.orphans.len(),
@ -275,7 +272,6 @@ impl Chain {
}
ErrorKind::Unfit(ref msg) => {
debug!(
LOGGER,
"Block {} at {} is unfit at this time: {}",
b.hash(),
b.header.height,
@ -285,7 +281,6 @@ impl Chain {
}
_ => {
info!(
LOGGER,
"Rejected block {} at {}: {:?}",
b.hash(),
b.header.height,
@ -360,7 +355,6 @@ impl Chain {
// Is there an orphan in our orphans that we can now process?
loop {
trace!(
LOGGER,
"check_orphans: at {}, # orphans {}",
height,
self.orphans.len(),
@ -373,7 +367,6 @@ impl Chain {
let orphans_len = orphans.len();
for (i, orphan) in orphans.into_iter().enumerate() {
debug!(
LOGGER,
"check_orphans: get block {} at {}{}",
orphan.block.hash(),
height,
@ -402,7 +395,6 @@ impl Chain {
if initial_height != height {
debug!(
LOGGER,
"check_orphans: {} blocks accepted since height {}, remaining # orphans {}",
height - initial_height,
initial_height,
@ -589,7 +581,6 @@ impl Chain {
txhashset: &txhashset::TxHashSet,
) -> Result<(), Error> {
debug!(
LOGGER,
"chain: validate_kernel_history: rewinding and validating kernel history (readonly)"
);
@ -606,8 +597,8 @@ impl Chain {
})?;
debug!(
LOGGER,
"chain: validate_kernel_history: validated kernel root on {} headers", count,
"chain: validate_kernel_history: validated kernel root on {} headers",
count,
);
Ok(())
@ -682,10 +673,7 @@ impl Chain {
self.validate_kernel_history(&header, &txhashset)?;
// all good, prepare a new batch and update all the required records
debug!(
LOGGER,
"chain: txhashset_write: rewinding a 2nd time (writeable)"
);
debug!("chain: txhashset_write: rewinding a 2nd time (writeable)");
let mut batch = self.store.batch()?;
@ -709,10 +697,7 @@ impl Chain {
Ok(())
})?;
debug!(
LOGGER,
"chain: txhashset_write: finished validating and rebuilding"
);
debug!("chain: txhashset_write: finished validating and rebuilding");
status.on_save();
@ -727,10 +712,7 @@ impl Chain {
// Commit all the changes to the db.
batch.commit()?;
debug!(
LOGGER,
"chain: txhashset_write: finished committing the batch (head etc.)"
);
debug!("chain: txhashset_write: finished committing the batch (head etc.)");
// Replace the chain txhashset with the newly built one.
{
@ -738,10 +720,7 @@ impl Chain {
*txhashset_ref = txhashset;
}
debug!(
LOGGER,
"chain: txhashset_write: replaced our txhashset with the new one"
);
debug!("chain: txhashset_write: replaced our txhashset with the new one");
// Check for any orphan blocks and process them based on the new chain state.
self.check_orphans(header.height + 1);
@ -763,14 +742,11 @@ impl Chain {
/// therefore be called judiciously.
pub fn compact(&self) -> Result<(), Error> {
if self.archive_mode {
debug!(
LOGGER,
"Blockchain compaction disabled, node running in archive mode."
);
debug!("Blockchain compaction disabled, node running in archive mode.");
return Ok(());
}
debug!(LOGGER, "Starting blockchain compaction.");
debug!("Starting blockchain compaction.");
// Compact the txhashset via the extension.
{
let mut txhashset = self.txhashset.write();
@ -785,7 +761,7 @@ impl Chain {
// Now check we can still successfully validate the chain state after
// compacting, shouldn't be necessary once all of this is well-oiled
debug!(LOGGER, "Validating state after compaction.");
debug!("Validating state after compaction.");
self.validate(true)?;
// we need to be careful here in testing as 20 blocks is not that long
@ -798,7 +774,6 @@ impl Chain {
}
debug!(
LOGGER,
"Compaction remove blocks older than {}.",
head.height - horizon
);
@ -831,7 +806,7 @@ impl Chain {
}
}
batch.commit()?;
debug!(LOGGER, "Compaction removed {} blocks, done.", count);
debug!("Compaction removed {} blocks, done.", count);
Ok(())
}
@ -1052,7 +1027,6 @@ fn setup_head(
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
{
debug!(
LOGGER,
"chain: init: building (missing) block sums for {} @ {}",
header.height,
header.hash()
@ -1073,7 +1047,6 @@ fn setup_head(
}
debug!(
LOGGER,
"chain: init: rewinding and validating before we start... {} at {}",
header.hash(),
header.height,
@ -1110,7 +1083,7 @@ fn setup_head(
// Save the block_sums to the db for use later.
batch.save_block_sums(&genesis.hash(), &BlockSums::default())?;
info!(LOGGER, "chain: init: saved genesis: {:?}", genesis.hash());
info!("chain: init: saved genesis: {:?}", genesis.hash());
}
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
};

View file

@ -30,7 +30,7 @@ extern crate serde;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate failure;
#[macro_use]

View file

@ -35,7 +35,6 @@ use grin_store;
use store;
use txhashset;
use types::{Options, Tip};
use util::LOGGER;
/// Contextual information required to process a new block and either reject or
/// accept it.
@ -71,7 +70,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
// spend resources reading the full block when its header is invalid
debug!(
LOGGER,
"pipe: process_block {} at {} with {} inputs, {} outputs, {} kernels",
b.hash(),
b.header.height,
@ -168,7 +166,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, E
})?;
trace!(
LOGGER,
"pipe: process_block: {} at {} is valid, save and append.",
b.hash(),
b.header.height,
@ -190,7 +187,6 @@ pub fn sync_block_headers(
) -> Result<Option<Tip>, Error> {
if let Some(header) = headers.first() {
debug!(
LOGGER,
"pipe: sync_block_headers: {} headers from {} at {}",
headers.len(),
header.hash(),
@ -251,7 +247,6 @@ pub fn sync_block_headers(
/// it.
pub fn process_block_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
debug!(
LOGGER,
"pipe: process_block_header: {} at {}",
header.hash(),
header.height,
@ -356,8 +351,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
// check version, enforces scheduled hard fork
if !consensus::valid_header_version(header.height, header.version) {
error!(
LOGGER,
"Invalid block header version received ({}), maybe update Grin?", header.version
"Invalid block header version received ({}), maybe update Grin?",
header.version
);
return Err(ErrorKind::InvalidBlockVersion(header.version).into());
}
@ -378,8 +373,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
let edge_bits = header.pow.edge_bits();
if !(ctx.pow_verifier)(header, edge_bits).is_ok() {
error!(
LOGGER,
"pipe: error validating header with cuckoo edge_bits {}", edge_bits
"pipe: error validating header with cuckoo edge_bits {}",
edge_bits
);
return Err(ErrorKind::InvalidPow.into());
}
@ -434,7 +429,6 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
let next_header_info = consensus::next_difficulty(header.height, diff_iter);
if target_difficulty != next_header_info.difficulty {
info!(
LOGGER,
"validate_header: header target difficulty {} != {}",
target_difficulty.to_num(),
next_header_info.difficulty.to_num()
@ -548,8 +542,8 @@ fn update_head(b: &Block, ctx: &BlockContext) -> Result<Option<Tip>, Error> {
.map_err(|e| ErrorKind::StoreErr(e, "pipe save body".to_owned()))?;
debug!(
LOGGER,
"pipe: head updated to {} at {}", tip.last_block_h, tip.height
"pipe: head updated to {} at {}",
tip.last_block_h, tip.height
);
Ok(Some(tip))
@ -569,7 +563,7 @@ fn update_sync_head(bh: &BlockHeader, batch: &mut store::Batch) -> Result<(), Er
batch
.save_sync_head(&tip)
.map_err(|e| ErrorKind::StoreErr(e, "pipe save sync head".to_owned()))?;
debug!(LOGGER, "sync head {} @ {}", bh.hash(), bh.height);
debug!("sync head {} @ {}", bh.hash(), bh.height);
Ok(())
}
@ -583,8 +577,8 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
.map_err(|e| ErrorKind::StoreErr(e, "pipe save header head".to_owned()))?;
debug!(
LOGGER,
"pipe: header_head updated to {} at {}", tip.last_block_h, tip.height
"pipe: header_head updated to {} at {}",
tip.last_block_h, tip.height
);
Ok(Some(tip))
@ -616,7 +610,6 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Resul
let forked_header = ext.batch.get_block_header(&current)?;
trace!(
LOGGER,
"rewind_and_apply_fork @ {} [{}], was @ {} [{}]",
forked_header.height,
forked_header.hash(),
@ -627,11 +620,7 @@ pub fn rewind_and_apply_fork(b: &Block, ext: &mut txhashset::Extension) -> Resul
// Rewind the txhashset state back to the block where we forked from the most work chain.
ext.rewind(&forked_header)?;
trace!(
LOGGER,
"rewind_and_apply_fork: blocks on fork: {:?}",
fork_hashes,
);
trace!("rewind_and_apply_fork: blocks on fork: {:?}", fork_hashes,);
// Now re-apply all blocks on this fork.
for (_, h) in fork_hashes {

View file

@ -40,7 +40,7 @@ use grin_store::types::prune_noop;
use store::{Batch, ChainStore};
use txhashset::{RewindableKernelView, UTXOView};
use types::{Tip, TxHashSetRoots, TxHashsetWriteStatus};
use util::{file, secp_static, zip, LOGGER};
use util::{file, secp_static, zip};
const HEADERHASHSET_SUBDIR: &'static str = "header";
const TXHASHSET_SUBDIR: &'static str = "txhashset";
@ -328,7 +328,7 @@ where
// we explicitly rewind the extension.
let header = batch.head_header()?;
trace!(LOGGER, "Starting new txhashset (readonly) extension.");
trace!("Starting new txhashset (readonly) extension.");
let res = {
let mut extension = Extension::new(trees, &batch, header);
@ -340,14 +340,14 @@ where
inner(&mut extension)
};
trace!(LOGGER, "Rollbacking txhashset (readonly) extension.");
trace!("Rollbacking txhashset (readonly) extension.");
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
trace!(LOGGER, "TxHashSet (readonly) extension done.");
trace!("TxHashSet (readonly) extension done.");
res
}
@ -423,7 +423,7 @@ where
// index saving can be undone
let child_batch = batch.child()?;
{
trace!(LOGGER, "Starting new txhashset extension.");
trace!("Starting new txhashset extension.");
// TODO - header_mmr may be out ahead via the header_head
// TODO - do we need to handle this via an explicit rewind on the header_mmr?
@ -436,10 +436,7 @@ where
match res {
Err(e) => {
debug!(
LOGGER,
"Error returned, discarding txhashset extension: {}", e
);
debug!("Error returned, discarding txhashset extension: {}", e);
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
@ -448,13 +445,13 @@ where
}
Ok(r) => {
if rollback {
trace!(LOGGER, "Rollbacking txhashset extension. sizes {:?}", sizes);
trace!("Rollbacking txhashset extension. sizes {:?}", sizes);
trees.header_pmmr_h.backend.discard();
trees.output_pmmr_h.backend.discard();
trees.rproof_pmmr_h.backend.discard();
trees.kernel_pmmr_h.backend.discard();
} else {
trace!(LOGGER, "Committing txhashset extension. sizes {:?}", sizes);
trace!("Committing txhashset extension. sizes {:?}", sizes);
child_batch.commit()?;
trees.header_pmmr_h.backend.sync()?;
trees.output_pmmr_h.backend.sync()?;
@ -466,7 +463,7 @@ where
trees.kernel_pmmr_h.last_pos = sizes.3;
}
trace!(LOGGER, "TxHashSet extension done.");
trace!("TxHashSet extension done.");
Ok(r)
}
}
@ -497,7 +494,7 @@ where
// index saving can be undone
let child_batch = batch.child()?;
{
trace!(LOGGER, "Starting new txhashset sync_head extension.");
trace!("Starting new txhashset sync_head extension.");
let pmmr = DBPMMR::at(&mut trees.sync_pmmr_h.backend, trees.sync_pmmr_h.last_pos);
let mut extension = HeaderExtension::new(pmmr, &child_batch, header);
@ -510,31 +507,23 @@ where
match res {
Err(e) => {
debug!(
LOGGER,
"Error returned, discarding txhashset sync_head extension: {}", e
"Error returned, discarding txhashset sync_head extension: {}",
e
);
trees.sync_pmmr_h.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
trace!(
LOGGER,
"Rollbacking txhashset sync_head extension. size {:?}",
size
);
trace!("Rollbacking txhashset sync_head extension. size {:?}", size);
trees.sync_pmmr_h.backend.discard();
} else {
trace!(
LOGGER,
"Committing txhashset sync_head extension. size {:?}",
size
);
trace!("Committing txhashset sync_head extension. size {:?}", size);
child_batch.commit()?;
trees.sync_pmmr_h.backend.sync()?;
trees.sync_pmmr_h.last_pos = size;
}
trace!(LOGGER, "TxHashSet sync_head extension done.");
trace!("TxHashSet sync_head extension done.");
Ok(r)
}
}
@ -564,7 +553,7 @@ where
// index saving can be undone
let child_batch = batch.child()?;
{
trace!(LOGGER, "Starting new txhashset header extension.");
trace!("Starting new txhashset header extension.");
let pmmr = DBPMMR::at(
&mut trees.header_pmmr_h.backend,
trees.header_pmmr_h.last_pos,
@ -579,31 +568,23 @@ where
match res {
Err(e) => {
debug!(
LOGGER,
"Error returned, discarding txhashset header extension: {}", e
"Error returned, discarding txhashset header extension: {}",
e
);
trees.header_pmmr_h.backend.discard();
Err(e)
}
Ok(r) => {
if rollback {
trace!(
LOGGER,
"Rollbacking txhashset header extension. size {:?}",
size
);
trace!("Rollbacking txhashset header extension. size {:?}", size);
trees.header_pmmr_h.backend.discard();
} else {
trace!(
LOGGER,
"Committing txhashset header extension. size {:?}",
size
);
trace!("Committing txhashset header extension. size {:?}", size);
child_batch.commit()?;
trees.header_pmmr_h.backend.sync()?;
trees.header_pmmr_h.last_pos = size;
}
trace!(LOGGER, "TxHashSet header extension done.");
trace!("TxHashSet header extension done.");
Ok(r)
}
}
@ -654,7 +635,6 @@ impl<'a> HeaderExtension<'a> {
/// Note the close relationship between header height and insertion index.
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!(
LOGGER,
"Rewind header extension to {} at {}",
header.hash(),
header.height
@ -675,7 +655,7 @@ impl<'a> HeaderExtension<'a> {
/// Used when rebuilding the header MMR by reapplying all headers
/// including the genesis block header.
pub fn truncate(&mut self) -> Result<(), Error> {
debug!(LOGGER, "Truncating header extension.");
debug!("Truncating header extension.");
self.pmmr.rewind(0).map_err(&ErrorKind::TxHashSetErr)?;
Ok(())
}
@ -689,7 +669,6 @@ impl<'a> HeaderExtension<'a> {
/// Requires *all* header hashes to be iterated over in ascending order.
pub fn rebuild(&mut self, head: &Tip, genesis: &BlockHeader) -> Result<(), Error> {
debug!(
LOGGER,
"About to rebuild header extension from {:?} to {:?}.",
genesis.hash(),
head.last_block_h,
@ -712,7 +691,6 @@ impl<'a> HeaderExtension<'a> {
if header_hashes.len() > 0 {
debug!(
LOGGER,
"Re-applying {} headers to extension, from {:?} to {:?}.",
header_hashes.len(),
header_hashes.first().unwrap(),
@ -995,10 +973,7 @@ impl<'a> Extension<'a> {
/// We need the hash of each sibling pos from the pos up to the peak
/// including the sibling leaf node which may have been removed.
pub fn merkle_proof(&self, output: &OutputIdentifier) -> Result<MerkleProof, Error> {
debug!(
LOGGER,
"txhashset: merkle_proof: output: {:?}", output.commit,
);
debug!("txhashset: merkle_proof: output: {:?}", output.commit,);
// then calculate the Merkle Proof based on the known pos
let pos = self.batch.get_output_pos(&output.commit)?;
let merkle_proof = self
@ -1027,12 +1002,7 @@ impl<'a> Extension<'a> {
/// Rewinds the MMRs to the provided block, rewinding to the last output pos
/// and last kernel pos of that block.
pub fn rewind(&mut self, header: &BlockHeader) -> Result<(), Error> {
debug!(
LOGGER,
"Rewind to header {} at {}",
header.hash(),
header.height,
);
debug!("Rewind to header {} at {}", header.hash(), header.height,);
// We need to build bitmaps of added and removed output positions
// so we can correctly rewind all operations applied to the output MMR
@ -1067,11 +1037,8 @@ impl<'a> Extension<'a> {
rewind_rm_pos: &Bitmap,
) -> Result<(), Error> {
debug!(
LOGGER,
"txhashset: rewind_to_pos: header {}, output {}, kernel {}",
header_pos,
output_pos,
kernel_pos,
header_pos, output_pos, kernel_pos,
);
self.header_pmmr
@ -1191,7 +1158,6 @@ impl<'a> Extension<'a> {
}
debug!(
LOGGER,
"txhashset: validated the header {}, output {}, rproof {}, kernel {} mmrs, took {}s",
self.header_pmmr.unpruned_size(),
self.output_pmmr.unpruned_size(),
@ -1270,22 +1236,22 @@ impl<'a> Extension<'a> {
/// Dumps the output MMR.
/// We use this after compacting for visual confirmation that it worked.
pub fn dump_output_pmmr(&self) {
debug!(LOGGER, "-- outputs --");
debug!("-- outputs --");
self.output_pmmr.dump_from_file(false);
debug!(LOGGER, "--");
debug!("--");
self.output_pmmr.dump_stats();
debug!(LOGGER, "-- end of outputs --");
debug!("-- end of outputs --");
}
/// Dumps the state of the 3 sum trees to stdout for debugging. Short
/// version only prints the Output tree.
pub fn dump(&self, short: bool) {
debug!(LOGGER, "-- outputs --");
debug!("-- outputs --");
self.output_pmmr.dump(short);
if !short {
debug!(LOGGER, "-- range proofs --");
debug!("-- range proofs --");
self.rproof_pmmr.dump(short);
debug!(LOGGER, "-- kernels --");
debug!("-- kernels --");
self.kernel_pmmr.dump(short);
}
}
@ -1318,7 +1284,6 @@ impl<'a> Extension<'a> {
}
debug!(
LOGGER,
"txhashset: verified {} kernel signatures, pmmr size {}, took {}s",
kern_count,
self.kernel_pmmr.unpruned_size(),
@ -1353,8 +1318,8 @@ impl<'a> Extension<'a> {
commits.clear();
proofs.clear();
debug!(
LOGGER,
"txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count,
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}
}
@ -1370,13 +1335,12 @@ impl<'a> Extension<'a> {
commits.clear();
proofs.clear();
debug!(
LOGGER,
"txhashset: verify_rangeproofs: verified {} rangeproofs", proof_count,
"txhashset: verify_rangeproofs: verified {} rangeproofs",
proof_count,
);
}
debug!(
LOGGER,
"txhashset: verified {} rangeproofs, pmmr size {}, took {}s",
proof_count,
self.rproof_pmmr.unpruned_size(),
@ -1452,10 +1416,7 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
// Removing unexpected directories if needed
if !dir_difference.is_empty() {
debug!(
LOGGER,
"Unexpected folder(s) found in txhashset folder, removing."
);
debug!("Unexpected folder(s) found in txhashset folder, removing.");
for diff in dir_difference {
let diff_path = txhashset_path.join(diff);
file::delete(diff_path)?;
@ -1492,7 +1453,6 @@ fn check_and_remove_files(txhashset_path: &PathBuf, header: &BlockHeader) -> Res
.collect();
if !difference.is_empty() {
debug!(
LOGGER,
"Unexpected file(s) found in txhashset subfolder {:?}, removing.",
&subdirectory_path
);
@ -1520,10 +1480,8 @@ pub fn input_pos_to_rewind(
if head_header.height < block_header.height {
debug!(
LOGGER,
"input_pos_to_rewind: {} < {}, nothing to rewind",
head_header.height,
block_header.height
head_header.height, block_header.height
);
return Ok(Bitmap::create());
}

View file

@ -367,7 +367,7 @@ fn comments() -> HashMap<String, String> {
retval.insert(
"stdout_log_level".to_string(),
"
#log level for stdout: Critical, Error, Warning, Info, Debug, Trace
#log level for stdout: Error, Warning, Info, Debug, Trace
".to_string(),
);
@ -381,7 +381,7 @@ fn comments() -> HashMap<String, String> {
retval.insert(
"file_log_level".to_string(),
"
#log level for file: Critical, Error, Warning, Info, Debug, Trace
#log level for file: Error, Warning, Info, Debug, Trace
".to_string(),
);
@ -399,6 +399,14 @@ fn comments() -> HashMap<String, String> {
".to_string(),
);
retval.insert(
"log_max_size".to_string(),
"
#maximum log file size in bytes before performing log rotation
#comment it to disable log rotation
".to_string(),
);
retval
}

View file

@ -20,7 +20,7 @@ rand = "0.5"
serde = "1"
serde_derive = "1"
siphasher = "0.2"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
chrono = "0.4.4"
grin_keychain = { path = "../keychain" }

View file

@ -36,7 +36,7 @@ use global;
use keychain::{self, BlindingFactor};
use pow::{Difficulty, Proof, ProofOfWork};
use ser::{self, PMMRable, Readable, Reader, Writeable, Writer};
use util::{secp, static_secp_instance, LOGGER};
use util::{secp, static_secp_instance};
/// Errors thrown by Block validation
#[derive(Debug, Clone, Eq, PartialEq, Fail)]
@ -425,12 +425,7 @@ impl Block {
/// Note: caller must validate the block themselves, we do not validate it
/// here.
pub fn hydrate_from(cb: CompactBlock, txs: Vec<Transaction>) -> Result<Block, Error> {
trace!(
LOGGER,
"block: hydrate_from: {}, {} txs",
cb.hash(),
txs.len(),
);
trace!("block: hydrate_from: {}, {} txs", cb.hash(), txs.len(),);
let header = cb.header.clone();

View file

@ -22,7 +22,6 @@ use core::merkle_proof::MerkleProof;
use core::pmmr::{Backend, ReadonlyPMMR};
use core::BlockHeader;
use ser::{PMMRIndexHashable, PMMRable};
use util::LOGGER;
/// 64 bits all ones: 0b11111111...1
const ALL_ONES: u64 = u64::MAX;
@ -137,7 +136,7 @@ where
/// Build a Merkle proof for the element at the given position.
pub fn merkle_proof(&self, pos: u64) -> Result<MerkleProof, String> {
debug!(LOGGER, "merkle_proof {}, last_pos {}", pos, self.last_pos);
debug!("merkle_proof {}, last_pos {}", pos, self.last_pos);
// check this pos is actually a leaf in the MMR
if !is_leaf(pos) {
@ -384,14 +383,14 @@ where
None => hashes.push_str(&format!("{:>8} ", "??")),
}
}
trace!(LOGGER, "{}", idx);
trace!(LOGGER, "{}", hashes);
trace!("{}", idx);
trace!("{}", hashes);
}
}
/// Prints PMMR statistics to the logs, used for debugging.
pub fn dump_stats(&self) {
debug!(LOGGER, "pmmr: unpruned - {}", self.unpruned_size());
debug!("pmmr: unpruned - {}", self.unpruned_size());
self.backend.dump_stats();
}
@ -418,8 +417,8 @@ where
None => hashes.push_str(&format!("{:>8} ", " .")),
}
}
debug!(LOGGER, "{}", idx);
debug!(LOGGER, "{}", hashes);
debug!("{}", idx);
debug!("{}", hashes);
}
}
}

View file

@ -19,7 +19,6 @@ use lru_cache::LruCache;
use core::hash::{Hash, Hashed};
use core::{Output, TxKernel};
use util::LOGGER;
/// Verifier cache for caching expensive verification results.
/// Specifically the following -
@ -72,7 +71,6 @@ impl VerifierCache for LruVerifierCache {
}).cloned()
.collect::<Vec<_>>();
debug!(
LOGGER,
"lru_verifier_cache: kernel sigs: {}, not cached (must verify): {}",
kernels.len(),
res.len()
@ -91,7 +89,6 @@ impl VerifierCache for LruVerifierCache {
}).cloned()
.collect::<Vec<_>>();
debug!(
LOGGER,
"lru_verifier_cache: rangeproofs: {}, not cached (must verify): {}",
outputs.len(),
res.len()

View file

@ -38,7 +38,7 @@ extern crate serde;
extern crate serde_derive;
extern crate siphasher;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate failure;
#[macro_use]

View file

@ -9,7 +9,7 @@ publish = false
byteorder = "1"
blake2-rfc = "0.2"
rand = "0.5"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
serde = "1"
serde_derive = "1"
serde_json = "1"

View file

@ -24,10 +24,10 @@ extern crate serde;
extern crate serde_derive;
extern crate digest;
extern crate hmac;
extern crate log;
extern crate ripemd160;
extern crate serde_json;
extern crate sha2;
extern crate slog;
extern crate uuid;
mod base58;

View file

@ -15,7 +15,7 @@ num = "0.1"
rand = "0.5"
serde = "1"
serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
chrono = { version = "0.4.4", features = ["serde"] }
grin_core = { path = "../core" }

View file

@ -31,7 +31,6 @@ use util::RwLock;
use core::ser;
use msg::{read_body, read_exact, read_header, write_all, write_to_buf, MsgHeader, Type};
use types::Error;
use util::LOGGER;
/// A trait to be implemented in order to receive messages from the
/// connection. Allows providing an optional response.
@ -234,7 +233,6 @@ fn poll<H>(
if let Some(h) = try_break!(error_tx, read_header(conn, None)) {
let msg = Message::from_header(h, conn);
trace!(
LOGGER,
"Received message header, type {:?}, len {}.",
msg.header.msg_type,
msg.header.msg_len
@ -276,7 +274,6 @@ fn poll<H>(
// check the close channel
if let Ok(_) = close_rx.try_recv() {
debug!(
LOGGER,
"Connection close with {} initiated by us",
conn.peer_addr()
.map(|a| a.to_string())

View file

@ -25,7 +25,6 @@ use core::pow::Difficulty;
use msg::{read_message, write_message, Hand, Shake, SockAddr, Type, PROTOCOL_VERSION, USER_AGENT};
use peer::Peer;
use types::{Capabilities, Direction, Error, P2PConfig, PeerInfo, PeerLiveInfo};
use util::LOGGER;
const NONCES_CAP: usize = 100;
@ -115,7 +114,6 @@ impl Handshake {
}
debug!(
LOGGER,
"Connected! Cumulative {} offered from {:?} {:?} {:?}",
shake.total_difficulty.to_num(),
peer_info.addr,
@ -186,7 +184,7 @@ impl Handshake {
};
write_message(conn, shake, Type::Shake)?;
trace!(LOGGER, "Success handshake with {}.", peer_info.addr);
trace!("Success handshake with {}.", peer_info.addr);
// when more than one protocol version is supported, choosing should go here
Ok(peer_info)

View file

@ -37,7 +37,7 @@ extern crate serde;
#[macro_use]
extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
mod conn;

View file

@ -26,7 +26,6 @@ use core::pow::Difficulty;
use core::ser::{self, Readable, Reader, Writeable, Writer};
use types::{Capabilities, Error, ReasonForBan, MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS};
use util::LOGGER;
/// Current latest version of the protocol
pub const PROTOCOL_VERSION: u32 = 1;
@ -207,8 +206,8 @@ pub fn read_header(conn: &mut TcpStream, msg_type: Option<Type>) -> Result<MsgHe
// TODO 4x the limits for now to leave ourselves space to change things
if header.msg_len > max_len * 4 {
error!(
LOGGER,
"Too large read {}, had {}, wanted {}.", header.msg_type as u8, max_len, header.msg_len
"Too large read {}, had {}, wanted {}.",
header.msg_type as u8, max_len, header.msg_len
);
return Err(Error::Serialization(ser::Error::TooLargeReadErr));
}

View file

@ -28,7 +28,6 @@ use protocol::Protocol;
use types::{
Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, PeerInfo, ReasonForBan, TxHashSetRead,
};
use util::LOGGER;
const MAX_TRACK_SIZE: usize = 30;
@ -104,8 +103,8 @@ impl Peer {
if let Some(ref denied) = config.peers_deny {
if denied.contains(&peer) {
debug!(
LOGGER,
"checking peer allowed/denied: {:?} explicitly denied", peer_addr
"checking peer allowed/denied: {:?} explicitly denied",
peer_addr
);
return true;
}
@ -113,14 +112,14 @@ impl Peer {
if let Some(ref allowed) = config.peers_allow {
if allowed.contains(&peer) {
debug!(
LOGGER,
"checking peer allowed/denied: {:?} explicitly allowed", peer_addr
"checking peer allowed/denied: {:?} explicitly allowed",
peer_addr
);
return false;
} else {
debug!(
LOGGER,
"checking peer allowed/denied: {:?} not explicitly allowed, denying", peer_addr
"checking peer allowed/denied: {:?} not explicitly allowed, denying",
peer_addr
);
return true;
}
@ -198,13 +197,10 @@ impl Peer {
.unwrap()
.send(ban_reason_msg, msg::Type::BanReason)
{
Ok(_) => debug!(
LOGGER,
"Sent ban reason {:?} to {}", ban_reason, self.info.addr
),
Ok(_) => debug!("Sent ban reason {:?} to {}", ban_reason, self.info.addr),
Err(e) => error!(
LOGGER,
"Could not send ban reason {:?} to {}: {:?}", ban_reason, self.info.addr, e
"Could not send ban reason {:?} to {}: {:?}",
ban_reason, self.info.addr, e
),
};
}
@ -213,7 +209,7 @@ impl Peer {
/// if the remote peer is known to already have the block.
pub fn send_block(&self, b: &core::Block) -> Result<bool, Error> {
if !self.tracking_adapter.has(b.hash()) {
trace!(LOGGER, "Send block {} to {}", b.hash(), self.info.addr);
trace!("Send block {} to {}", b.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -221,7 +217,6 @@ impl Peer {
Ok(true)
} else {
debug!(
LOGGER,
"Suppress block send {} to {} (already seen)",
b.hash(),
self.info.addr,
@ -232,12 +227,7 @@ impl Peer {
pub fn send_compact_block(&self, b: &core::CompactBlock) -> Result<bool, Error> {
if !self.tracking_adapter.has(b.hash()) {
trace!(
LOGGER,
"Send compact block {} to {}",
b.hash(),
self.info.addr
);
trace!("Send compact block {} to {}", b.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -245,7 +235,6 @@ impl Peer {
Ok(true)
} else {
debug!(
LOGGER,
"Suppress compact block send {} to {} (already seen)",
b.hash(),
self.info.addr,
@ -256,7 +245,7 @@ impl Peer {
pub fn send_header(&self, bh: &core::BlockHeader) -> Result<bool, Error> {
if !self.tracking_adapter.has(bh.hash()) {
debug!(LOGGER, "Send header {} to {}", bh.hash(), self.info.addr);
debug!("Send header {} to {}", bh.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -264,7 +253,6 @@ impl Peer {
Ok(true)
} else {
debug!(
LOGGER,
"Suppress header send {} to {} (already seen)",
bh.hash(),
self.info.addr,
@ -277,7 +265,7 @@ impl Peer {
/// dropped if the remote peer is known to already have the transaction.
pub fn send_transaction(&self, tx: &core::Transaction) -> Result<bool, Error> {
if !self.tracking_adapter.has(tx.hash()) {
debug!(LOGGER, "Send tx {} to {}", tx.hash(), self.info.addr);
debug!("Send tx {} to {}", tx.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -285,7 +273,6 @@ impl Peer {
Ok(true)
} else {
debug!(
LOGGER,
"Not sending tx {} to {} (already seen)",
tx.hash(),
self.info.addr
@ -298,7 +285,7 @@ impl Peer {
/// Note: tracking adapter is ignored for stem transactions (while under
/// embargo).
pub fn send_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
debug!(LOGGER, "Send (stem) tx {} to {}", tx.hash(), self.info.addr);
debug!("Send (stem) tx {} to {}", tx.hash(), self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -316,10 +303,7 @@ impl Peer {
/// Sends a request for a specific block by hash
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
debug!(
LOGGER,
"Requesting block {} from peer {}.", h, self.info.addr
);
debug!("Requesting block {} from peer {}.", h, self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -328,10 +312,7 @@ impl Peer {
/// Sends a request for a specific compact block by hash
pub fn send_compact_block_request(&self, h: Hash) -> Result<(), Error> {
debug!(
LOGGER,
"Requesting compact block {} from {}", h, self.info.addr
);
debug!("Requesting compact block {} from {}", h, self.info.addr);
self.connection
.as_ref()
.unwrap()
@ -339,7 +320,7 @@ impl Peer {
}
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
debug!(LOGGER, "Asking {} for more peers.", self.info.addr);
debug!("Asking {} for more peers.", self.info.addr);
self.connection.as_ref().unwrap().send(
&GetPeerAddrs {
capabilities: capab,
@ -350,8 +331,8 @@ impl Peer {
pub fn send_txhashset_request(&self, height: u64, hash: Hash) -> Result<(), Error> {
debug!(
LOGGER,
"Asking {} for txhashset archive at {} {}.", self.info.addr, height, hash
"Asking {} for txhashset archive at {} {}.",
self.info.addr, height, hash
);
self.connection.as_ref().unwrap().send(
&TxHashSetRequest { hash, height },
@ -378,8 +359,8 @@ impl Peer {
};
if need_stop {
debug!(
LOGGER,
"Client {} corrupted, will disconnect ({:?}).", self.info.addr, e
"Client {} corrupted, will disconnect ({:?}).",
self.info.addr, e
);
self.stop();
}
@ -396,7 +377,7 @@ impl Peer {
}
};
if need_stop {
debug!(LOGGER, "Client {} connection lost: {:?}", self.info.addr, e);
debug!("Client {} connection lost: {:?}", self.info.addr, e);
self.stop();
}
false

View file

@ -24,7 +24,6 @@ use chrono::prelude::*;
use core::core;
use core::core::hash::{Hash, Hashed};
use core::pow::Difficulty;
use util::LOGGER;
use peer::Peer;
use store::{PeerData, PeerStore, State};
@ -71,7 +70,7 @@ impl Peers {
};
addr = peer.info.addr.clone();
}
debug!(LOGGER, "Saving newly connected peer {}.", addr);
debug!("Saving newly connected peer {}.", addr);
self.save_peer(&peer_data)?;
{
@ -94,11 +93,11 @@ impl Peers {
.write()
.insert(Utc::now().timestamp(), peer.clone());
debug!(
LOGGER,
"Successfully updated Dandelion relay to: {}", peer.info.addr
"Successfully updated Dandelion relay to: {}",
peer.info.addr
);
}
None => debug!(LOGGER, "Could not update dandelion relay"),
None => debug!("Could not update dandelion relay"),
};
}
@ -238,11 +237,11 @@ impl Peers {
/// Ban a peer, disconnecting it if we're currently connected
pub fn ban_peer(&self, peer_addr: &SocketAddr, ban_reason: ReasonForBan) {
if let Err(e) = self.update_state(*peer_addr, State::Banned) {
error!(LOGGER, "Couldn't ban {}: {:?}", peer_addr, e);
error!("Couldn't ban {}: {:?}", peer_addr, e);
}
if let Some(peer) = self.get_connected_peer(peer_addr) {
debug!(LOGGER, "Banning peer {}", peer_addr);
debug!("Banning peer {}", peer_addr);
// setting peer status will get it removed at the next clean_peer
peer.send_ban_reason(ban_reason);
peer.set_banned();
@ -256,13 +255,13 @@ impl Peers {
Ok(_) => {
if self.is_banned(*peer_addr) {
if let Err(e) = self.update_state(*peer_addr, State::Healthy) {
error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e);
error!("Couldn't unban {}: {:?}", peer_addr, e);
}
} else {
error!(LOGGER, "Couldn't unban {}: peer is not banned", peer_addr);
error!("Couldn't unban {}: peer is not banned", peer_addr);
}
}
Err(e) => error!(LOGGER, "Couldn't unban {}: {:?}", peer_addr, e),
Err(e) => error!("Couldn't unban {}: {:?}", peer_addr, e),
};
}
@ -278,7 +277,7 @@ impl Peers {
match inner(&p) {
Ok(true) => count += 1,
Ok(false) => (),
Err(e) => debug!(LOGGER, "Error sending {} to peer: {:?}", obj_name, e),
Err(e) => debug!("Error sending {} to peer: {:?}", obj_name, e),
}
if count >= num_peers {
@ -297,7 +296,6 @@ impl Peers {
let num_peers = self.config.peer_max_count();
let count = self.broadcast("compact block", num_peers, |p| p.send_compact_block(b));
debug!(
LOGGER,
"broadcast_compact_block: {}, {} at {}, to {} peers, done.",
b.hash(),
b.header.pow.total_difficulty,
@ -315,7 +313,6 @@ impl Peers {
let num_peers = self.config.peer_min_preferred_count();
let count = self.broadcast("header", num_peers, |p| p.send_header(bh));
debug!(
LOGGER,
"broadcast_header: {}, {} at {}, to {} peers, done.",
bh.hash(),
bh.pow.total_difficulty,
@ -328,7 +325,7 @@ impl Peers {
pub fn broadcast_stem_transaction(&self, tx: &core::Transaction) -> Result<(), Error> {
let dandelion_relay = self.get_dandelion_relay();
if dandelion_relay.is_empty() {
debug!(LOGGER, "No dandelion relay, updating.");
debug!("No dandelion relay, updating.");
self.update_dandelion_relay();
}
// If still return an error, let the caller handle this as they see fit.
@ -339,10 +336,7 @@ impl Peers {
for relay in dandelion_relay.values() {
if relay.is_connected() {
if let Err(e) = relay.send_stem_transaction(tx) {
debug!(
LOGGER,
"Error sending stem transaction to peer relay: {:?}", e
);
debug!("Error sending stem transaction to peer relay: {:?}", e);
}
}
}
@ -358,7 +352,6 @@ impl Peers {
let num_peers = self.config.peer_min_preferred_count();
let count = self.broadcast("transaction", num_peers, |p| p.send_transaction(tx));
trace!(
LOGGER,
"broadcast_transaction: {}, to {} peers, done.",
tx.hash(),
count,
@ -417,15 +410,15 @@ impl Peers {
// build a list of peers to be cleaned up
for peer in self.peers.read().values() {
if peer.is_banned() {
debug!(LOGGER, "clean_peers {:?}, peer banned", peer.info.addr);
debug!("clean_peers {:?}, peer banned", peer.info.addr);
rm.push(peer.clone());
} else if !peer.is_connected() {
debug!(LOGGER, "clean_peers {:?}, not connected", peer.info.addr);
debug!("clean_peers {:?}, not connected", peer.info.addr);
rm.push(peer.clone());
} else {
let (stuck, diff) = peer.is_stuck();
if stuck && diff < self.adapter.total_difficulty() {
debug!(LOGGER, "clean_peers {:?}, stuck peer", peer.info.addr);
debug!("clean_peers {:?}, stuck peer", peer.info.addr);
peer.stop();
let _ = self.update_state(peer.info.addr, State::Defunct);
rm.push(peer.clone());
@ -497,8 +490,8 @@ impl ChainAdapter for Peers {
// if the peer sent us a block that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban
debug!(
LOGGER,
"Received a bad block {} from {}, the peer will be banned", hash, peer_addr
"Received a bad block {} from {}, the peer will be banned",
hash, peer_addr
);
self.ban_peer(&peer_addr, ReasonForBan::BadBlock);
false
@ -513,10 +506,8 @@ impl ChainAdapter for Peers {
// if the peer sent us a block that's intrinsically bad
// they are either mistaken or malevolent, both of which require a ban
debug!(
LOGGER,
"Received a bad compact block {} from {}, the peer will be banned",
hash,
&peer_addr
hash, &peer_addr
);
self.ban_peer(&peer_addr, ReasonForBan::BadCompactBlock);
false
@ -566,8 +557,8 @@ impl ChainAdapter for Peers {
fn txhashset_write(&self, h: Hash, txhashset_data: File, peer_addr: SocketAddr) -> bool {
if !self.adapter.txhashset_write(h, txhashset_data, peer_addr) {
debug!(
LOGGER,
"Received a bad txhashset data from {}, the peer will be banned", &peer_addr
"Received a bad txhashset data from {}, the peer will be banned",
&peer_addr
);
self.ban_peer(&peer_addr, ReasonForBan::BadTxHashSet);
false
@ -592,17 +583,13 @@ impl NetAdapter for Peers {
/// addresses.
fn find_peer_addrs(&self, capab: Capabilities) -> Vec<SocketAddr> {
let peers = self.find_peers(State::Healthy, capab, MAX_PEER_ADDRS as usize);
trace!(
LOGGER,
"find_peer_addrs: {} healthy peers picked",
peers.len()
);
trace!("find_peer_addrs: {} healthy peers picked", peers.len());
map_vec!(peers, |p| p.addr)
}
/// A list of peers has been received from one of our peers.
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
trace!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len());
trace!("Received {} peer addrs, saving.", peer_addrs.len());
for pa in peer_addrs {
if let Ok(e) = self.exists_peer(pa) {
if e {
@ -618,7 +605,7 @@ impl NetAdapter for Peers {
ban_reason: ReasonForBan::None,
};
if let Err(e) = self.save_peer(&peer) {
error!(LOGGER, "Could not save received peer address: {:?}", e);
error!("Could not save received peer address: {:?}", e);
}
}
}

View file

@ -30,7 +30,6 @@ use msg::{
TxHashSetArchive, TxHashSetRequest, Type,
};
use types::{Error, NetAdapter};
use util::LOGGER;
pub struct Protocol {
adapter: Arc<NetAdapter>,
@ -52,10 +51,8 @@ impl MessageHandler for Protocol {
// banned peers up correctly?
if adapter.is_banned(self.addr.clone()) {
debug!(
LOGGER,
"handler: consume: peer {:?} banned, received: {:?}, dropping.",
self.addr,
msg.header.msg_type,
self.addr, msg.header.msg_type,
);
return Ok(None);
}
@ -82,14 +79,14 @@ impl MessageHandler for Protocol {
Type::BanReason => {
let ban_reason: BanReason = msg.body()?;
error!(LOGGER, "handle_payload: BanReason {:?}", ban_reason);
error!("handle_payload: BanReason {:?}", ban_reason);
Ok(None)
}
Type::Transaction => {
debug!(
LOGGER,
"handle_payload: received tx: msg_len: {}", msg.header.msg_len
"handle_payload: received tx: msg_len: {}",
msg.header.msg_len
);
let tx: core::Transaction = msg.body()?;
adapter.transaction_received(tx, false);
@ -98,8 +95,8 @@ impl MessageHandler for Protocol {
Type::StemTransaction => {
debug!(
LOGGER,
"handle_payload: received stem tx: msg_len: {}", msg.header.msg_len
"handle_payload: received stem tx: msg_len: {}",
msg.header.msg_len
);
let tx: core::Transaction = msg.body()?;
adapter.transaction_received(tx, true);
@ -109,7 +106,6 @@ impl MessageHandler for Protocol {
Type::GetBlock => {
let h: Hash = msg.body()?;
trace!(
LOGGER,
"handle_payload: Getblock: {}, msg_len: {}",
h,
msg.header.msg_len,
@ -124,8 +120,8 @@ impl MessageHandler for Protocol {
Type::Block => {
debug!(
LOGGER,
"handle_payload: received block: msg_len: {}", msg.header.msg_len
"handle_payload: received block: msg_len: {}",
msg.header.msg_len
);
let b: core::Block = msg.body()?;
@ -145,8 +141,8 @@ impl MessageHandler for Protocol {
Type::CompactBlock => {
debug!(
LOGGER,
"handle_payload: received compact block: msg_len: {}", msg.header.msg_len
"handle_payload: received compact block: msg_len: {}",
msg.header.msg_len
);
let b: core::CompactBlock = msg.body()?;
@ -218,8 +214,8 @@ impl MessageHandler for Protocol {
Type::TxHashSetRequest => {
let sm_req: TxHashSetRequest = msg.body()?;
debug!(
LOGGER,
"handle_payload: txhashset req for {} at {}", sm_req.hash, sm_req.height
"handle_payload: txhashset req for {} at {}",
sm_req.hash, sm_req.height
);
let txhashset = self.adapter.txhashset_read(sm_req.hash);
@ -244,15 +240,11 @@ impl MessageHandler for Protocol {
Type::TxHashSetArchive => {
let sm_arch: TxHashSetArchive = msg.body()?;
debug!(
LOGGER,
"handle_payload: txhashset archive for {} at {}. size={}",
sm_arch.hash,
sm_arch.height,
sm_arch.bytes,
sm_arch.hash, sm_arch.height, sm_arch.bytes,
);
if !self.adapter.txhashset_receive_ready() {
error!(
LOGGER,
"handle_payload: txhashset archive received but SyncStatus not on TxHashsetDownload",
);
return Err(Error::BadMessage);
@ -284,14 +276,13 @@ impl MessageHandler for Protocol {
if let Err(e) = save_txhashset_to_file(tmp.clone()) {
error!(
LOGGER,
"handle_payload: txhashset archive save to file fail. err={:?}", e
"handle_payload: txhashset archive save to file fail. err={:?}",
e
);
return Err(e);
}
trace!(
LOGGER,
"handle_payload: txhashset archive save to file {:?} success",
tmp,
);
@ -302,18 +293,15 @@ impl MessageHandler for Protocol {
.txhashset_write(sm_arch.hash, tmp_zip, self.addr);
debug!(
LOGGER,
"handle_payload: txhashset archive for {} at {}, DONE. Data Ok: {}",
sm_arch.hash,
sm_arch.height,
res
sm_arch.hash, sm_arch.height, res
);
Ok(None)
}
_ => {
debug!(LOGGER, "unknown message type {:?}", msg.header.msg_type);
debug!("unknown message type {:?}", msg.header.msg_type);
Ok(None)
}
}
@ -341,12 +329,8 @@ fn headers_header_size(conn: &mut TcpStream, msg_len: u64) -> Result<u64, Error>
let max_size = min_size + 6;
if average_header_size < min_size as u64 || average_header_size > max_size as u64 {
debug!(
LOGGER,
"headers_header_size - size of Vec: {}, average_header_size: {}, min: {}, max: {}",
total_headers,
average_header_size,
min_size,
max_size,
total_headers, average_header_size, min_size, max_size,
);
return Err(Error::Connection(io::Error::new(
io::ErrorKind::InvalidData,

View file

@ -30,7 +30,6 @@ use peer::Peer;
use peers::Peers;
use store::PeerStore;
use types::{Capabilities, ChainAdapter, Error, NetAdapter, P2PConfig, TxHashSetRead};
use util::LOGGER;
/// P2P server implementation, handling bootstrapping to find and connect to
/// peers, receiving connections from other peers and keep track of all of them.
@ -64,17 +63,14 @@ impl Server {
// Check that we have block 1
match block_1_hash {
Some(hash) => match adapter.get_block(hash) {
Some(_) => debug!(LOGGER, "Full block 1 found, archive capabilities confirmed"),
Some(_) => debug!("Full block 1 found, archive capabilities confirmed"),
None => {
debug!(
LOGGER,
"Full block 1 not found, archive capabilities disabled"
);
debug!("Full block 1 not found, archive capabilities disabled");
capab.remove(Capabilities::FULL_HIST);
}
},
None => {
debug!(LOGGER, "Block 1 not found, archive capabilities disabled");
debug!("Block 1 not found, archive capabilities disabled");
capab.remove(Capabilities::FULL_HIST);
}
}
@ -102,12 +98,7 @@ impl Server {
Ok((stream, peer_addr)) => {
if !self.check_banned(&stream) {
if let Err(e) = self.handle_new_peer(stream) {
warn!(
LOGGER,
"Error accepting peer {}: {:?}",
peer_addr.to_string(),
e
);
warn!("Error accepting peer {}: {:?}", peer_addr.to_string(), e);
}
}
}
@ -115,7 +106,7 @@ impl Server {
// nothing to do, will retry in next iteration
}
Err(e) => {
warn!(LOGGER, "Couldn't establish new client connection: {:?}", e);
warn!("Couldn't establish new client connection: {:?}", e);
}
}
if self.stop.load(Ordering::Relaxed) {
@ -130,10 +121,7 @@ impl Server {
/// we're already connected to the provided address.
pub fn connect(&self, addr: &SocketAddr) -> Result<Arc<Peer>, Error> {
if Peer::is_denied(&self.config, &addr) {
debug!(
LOGGER,
"connect_peer: peer {} denied, not connecting.", addr
);
debug!("connect_peer: peer {} denied, not connecting.", addr);
return Err(Error::ConnectionClose);
}
@ -148,12 +136,11 @@ impl Server {
if let Some(p) = self.peers.get_connected_peer(addr) {
// if we're already connected to the addr, just return the peer
trace!(LOGGER, "connect_peer: already connected {}", addr);
trace!("connect_peer: already connected {}", addr);
return Ok(p);
}
trace!(
LOGGER,
"connect_peer: on {}:{}. connecting to {}",
self.config.host,
self.config.port,
@ -179,12 +166,8 @@ impl Server {
}
Err(e) => {
debug!(
LOGGER,
"connect_peer: on {}:{}. Could not connect to {}: {:?}",
self.config.host,
self.config.port,
addr,
e
self.config.host, self.config.port, addr, e
);
Err(Error::Connection(e))
}
@ -211,9 +194,9 @@ impl Server {
// peer has been banned, go away!
if let Ok(peer_addr) = stream.peer_addr() {
if self.peers.is_banned(peer_addr) {
debug!(LOGGER, "Peer {} banned, refusing connection.", peer_addr);
debug!("Peer {} banned, refusing connection.", peer_addr);
if let Err(e) = stream.shutdown(Shutdown::Both) {
debug!(LOGGER, "Error shutting down conn: {:?}", e);
debug!("Error shutting down conn: {:?}", e);
}
return true;
}

View file

@ -26,7 +26,6 @@ use core::ser::{self, Readable, Reader, Writeable, Writer};
use grin_store::{self, option_to_not_found, to_key, Error};
use msg::SockAddr;
use types::{Capabilities, ReasonForBan};
use util::LOGGER;
const STORE_SUBPATH: &'static str = "peers";
@ -111,7 +110,7 @@ impl PeerStore {
}
pub fn save_peer(&self, p: &PeerData) -> Result<(), Error> {
debug!(LOGGER, "save_peer: {:?} marked {:?}", p.addr, p.flags);
debug!("save_peer: {:?} marked {:?}", p.addr, p.flags);
let batch = self.db.batch()?;
batch.put_ser(&peer_key(p.addr)[..], p)?;

View file

@ -10,7 +10,7 @@ blake2-rfc = "0.2"
rand = "0.5"
serde = "1"
serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
chrono = "0.4.4"
grin_core = { path = "../core" }

View file

@ -30,7 +30,7 @@ extern crate serde;
#[macro_use] // Needed for Serialize/Deserialize. The compiler complaining here is a bug.
extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
mod pool;

View file

@ -26,7 +26,6 @@ use core::core::transaction;
use core::core::verifier_cache::VerifierCache;
use core::core::{Block, BlockHeader, BlockSums, Committed, Transaction, TxKernel};
use types::{BlockChain, PoolEntry, PoolEntryState, PoolError};
use util::LOGGER;
// max weight leaving minimum space for a coinbase
const MAX_MINEABLE_WEIGHT: usize =
@ -192,7 +191,6 @@ impl Pool {
header: &BlockHeader,
) -> Result<(), PoolError> {
debug!(
LOGGER,
"pool [{}]: add_to_pool: {}, {:?}, inputs: {}, outputs: {}, kernels: {} (at block {})",
self.name,
entry.tx.hash(),

View file

@ -13,8 +13,8 @@ hyper-staticfile = "0.3"
itertools = "0.7"
lmdb-zero = "0.4.4"
rand = "0.5"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
serde = "1"
log = "0.4"
serde_derive = "1"
serde_json = "1"
chrono = "0.4.4"

View file

@ -35,7 +35,7 @@ use p2p;
use pool;
use rand::prelude::*;
use store;
use util::{OneTime, LOGGER};
use util::OneTime;
/// Implementation of the NetAdapter for the . Gets notified when new
/// blocks and transactions are received and forwards to the chain and pool
@ -74,7 +74,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let header = self.chain().head_header().unwrap();
debug!(
LOGGER,
"Received tx {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
tx_hash,
tx.inputs().len(),
@ -88,13 +87,12 @@ impl p2p::ChainAdapter for NetToChainAdapter {
};
if let Err(e) = res {
debug!(LOGGER, "Transaction {} rejected: {:?}", tx_hash, e);
debug!("Transaction {} rejected: {:?}", tx_hash, e);
}
}
fn block_received(&self, b: core::Block, addr: SocketAddr) -> bool {
debug!(
LOGGER,
"Received block {} at {} from {}, inputs: {}, outputs: {}, kernels: {}, going to process.",
b.hash(),
b.header.height,
@ -109,7 +107,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn compact_block_received(&self, cb: core::CompactBlock, addr: SocketAddr) -> bool {
let bhash = cb.hash();
debug!(
LOGGER,
"Received compact_block {} at {} from {}, outputs: {}, kernels: {}, kern_ids: {}, going to process.",
bhash,
cb.header.height,
@ -125,7 +122,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
match core::Block::hydrate_from(cb, vec![]) {
Ok(block) => self.process_block(block, addr),
Err(e) => {
debug!(LOGGER, "Invalid hydrated block {}: {}", cb_hash, e);
debug!("Invalid hydrated block {}: {}", cb_hash, e);
return false;
}
}
@ -135,7 +132,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.chain()
.process_block_header(&cb.header, self.chain_opts())
{
debug!(LOGGER, "Invalid compact block header {}: {}", cb_hash, e);
debug!("Invalid compact block header {}: {}", cb_hash, e);
return !e.is_bad_data();
}
@ -145,7 +142,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
};
debug!(
LOGGER,
"adapter: txs from tx pool - {}, (unknown kern_ids: {})",
txs.len(),
missing_short_ids.len(),
@ -159,7 +155,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let block = match core::Block::hydrate_from(cb.clone(), txs) {
Ok(block) => block,
Err(e) => {
debug!(LOGGER, "Invalid hydrated block {}: {}", cb.hash(), e);
debug!("Invalid hydrated block {}: {}", cb.hash(), e);
return false;
}
};
@ -169,29 +165,22 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.validate(&prev.total_kernel_offset, self.verifier_cache.clone())
.is_ok()
{
debug!(LOGGER, "adapter: successfully hydrated block from tx pool!");
debug!("adapter: successfully hydrated block from tx pool!");
self.process_block(block, addr)
} else {
if self.sync_state.status() == SyncStatus::NoSync {
debug!(
LOGGER,
"adapter: block invalid after hydration, requesting full block"
);
debug!("adapter: block invalid after hydration, requesting full block");
self.request_block(&cb.header, &addr);
true
} else {
debug!(
LOGGER,
"adapter: block invalid after hydration, ignoring it, cause still syncing"
);
true
}
}
} else {
debug!(
LOGGER,
"adapter: failed to retrieve previous block header (still syncing?)"
);
debug!("adapter: failed to retrieve previous block header (still syncing?)");
true
}
}
@ -200,8 +189,8 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn header_received(&self, bh: core::BlockHeader, addr: SocketAddr) -> bool {
let bhash = bh.hash();
debug!(
LOGGER,
"Received block header {} at {} from {}, going to process.", bhash, bh.height, addr,
"Received block header {} at {} from {}, going to process.",
bhash, bh.height, addr,
);
// pushing the new block header through the header chain pipeline
@ -209,16 +198,11 @@ impl p2p::ChainAdapter for NetToChainAdapter {
let res = self.chain().process_block_header(&bh, self.chain_opts());
if let &Err(ref e) = &res {
debug!(
LOGGER,
"Block header {} refused by chain: {:?}",
bhash,
e.kind()
);
debug!("Block header {} refused by chain: {:?}", bhash, e.kind());
if e.is_bad_data() {
debug!(
LOGGER,
"header_received: {} is a bad header, resetting header head", bhash
"header_received: {} is a bad header, resetting header head",
bhash
);
let _ = self.chain().reset_head();
return false;
@ -239,7 +223,6 @@ impl p2p::ChainAdapter for NetToChainAdapter {
fn headers_received(&self, bhs: Vec<core::BlockHeader>, addr: SocketAddr) -> bool {
info!(
LOGGER,
"Received block headers {:?} from {}",
bhs.iter().map(|x| x.hash()).collect::<Vec<_>>(),
addr,
@ -252,7 +235,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
// try to add headers to our header chain
let res = self.chain().sync_block_headers(&bhs, self.chain_opts());
if let &Err(ref e) = &res {
debug!(LOGGER, "Block headers refused by chain: {:?}", e);
debug!("Block headers refused by chain: {:?}", e);
if e.is_bad_data() {
return false;
@ -262,14 +245,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
}
fn locate_headers(&self, locator: Vec<Hash>) -> Vec<core::BlockHeader> {
debug!(LOGGER, "locate_headers: {:?}", locator,);
debug!("locate_headers: {:?}", locator,);
let header = match self.find_common_header(locator) {
Some(header) => header,
None => return vec![],
};
debug!(LOGGER, "locate_headers: common header: {:?}", header.hash(),);
debug!("locate_headers: common header: {:?}", header.hash(),);
// looks like we know one, getting as many following headers as allowed
let hh = header.height;
@ -281,18 +264,14 @@ impl p2p::ChainAdapter for NetToChainAdapter {
Err(e) => match e.kind() {
chain::ErrorKind::StoreErr(store::Error::NotFoundErr(_), _) => break,
_ => {
error!(LOGGER, "Could not build header locator: {:?}", e);
error!("Could not build header locator: {:?}", e);
return vec![];
}
},
}
}
debug!(
LOGGER,
"locate_headers: returning headers: {}",
headers.len(),
);
debug!("locate_headers: returning headers: {}", headers.len(),);
headers
}
@ -317,10 +296,7 @@ impl p2p::ChainAdapter for NetToChainAdapter {
reader: read,
}),
Err(e) => {
warn!(
LOGGER,
"Couldn't produce txhashset data for block {}: {:?}", h, e
);
warn!("Couldn't produce txhashset data for block {}: {:?}", h, e);
None
}
}
@ -367,12 +343,12 @@ impl p2p::ChainAdapter for NetToChainAdapter {
.chain()
.txhashset_write(h, txhashset_data, self.sync_state.as_ref())
{
error!(LOGGER, "Failed to save txhashset archive: {}", e);
error!("Failed to save txhashset archive: {}", e);
let is_good_data = !e.is_bad_data();
self.sync_state.set_sync_error(types::Error::Chain(e));
is_good_data
} else {
info!(LOGGER, "Received valid txhashset data for {}.", h);
info!("Received valid txhashset data for {}.", h);
true
}
}
@ -447,7 +423,7 @@ impl NetToChainAdapter {
self.find_common_header(locator[1..].to_vec())
}
_ => {
error!(LOGGER, "Could not build header locator: {:?}", e);
error!("Could not build header locator: {:?}", e);
None
}
},
@ -479,8 +455,8 @@ impl NetToChainAdapter {
}
Err(ref e) if e.is_bad_data() => {
debug!(
LOGGER,
"adapter: process_block: {} is a bad block, resetting head", bhash
"adapter: process_block: {} is a bad block, resetting head",
bhash
);
let _ = self.chain().reset_head();
@ -495,14 +471,13 @@ impl NetToChainAdapter {
chain::ErrorKind::Orphan => {
// make sure we did not miss the parent block
if !self.chain().is_orphan(&prev_hash) && !self.sync_state.is_syncing() {
debug!(LOGGER, "adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
debug!("adapter: process_block: received an orphan block, checking the parent: {:}", prev_hash);
self.request_block_by_hash(prev_hash, &addr)
}
true
}
_ => {
debug!(
LOGGER,
"adapter: process_block: block {} refused by chain: {}",
bhash,
e.kind()
@ -527,8 +502,8 @@ impl NetToChainAdapter {
let now = Instant::now();
debug!(
LOGGER,
"adapter: process_block: ***** validating full chain state at {}", bhash,
"adapter: process_block: ***** validating full chain state at {}",
bhash,
);
self.chain()
@ -536,7 +511,6 @@ impl NetToChainAdapter {
.expect("chain validation failed, hard stop");
debug!(
LOGGER,
"adapter: process_block: ***** done validating full chain state, took {}s",
now.elapsed().as_secs(),
);
@ -558,7 +532,7 @@ impl NetToChainAdapter {
.name("compactor".to_string())
.spawn(move || {
if let Err(e) = chain.compact() {
error!(LOGGER, "Could not compact chain: {:?}", e);
error!("Could not compact chain: {:?}", e);
}
});
}
@ -592,23 +566,19 @@ impl NetToChainAdapter {
match self.chain().block_exists(h) {
Ok(false) => match self.peers().get_connected_peer(addr) {
None => debug!(
LOGGER,
"send_block_request_to_peer: can't send request to peer {:?}, not connected",
addr
),
Some(peer) => {
if let Err(e) = f(&peer, h) {
error!(LOGGER, "send_block_request_to_peer: failed: {:?}", e)
error!("send_block_request_to_peer: failed: {:?}", e)
}
}
},
Ok(true) => debug!(
LOGGER,
"send_block_request_to_peer: block {} already known", h
),
Ok(true) => debug!("send_block_request_to_peer: block {} already known", h),
Err(e) => error!(
LOGGER,
"send_block_request_to_peer: failed to check block exists: {:?}", e
"send_block_request_to_peer: failed to check block exists: {:?}",
e
),
}
}
@ -639,11 +609,10 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
return;
}
debug!(LOGGER, "adapter: block_accepted: {:?}", b.hash());
debug!("adapter: block_accepted: {:?}", b.hash());
if let Err(e) = self.tx_pool.write().reconcile_block(b) {
error!(
LOGGER,
"Pool could not update itself at block {}: {:?}",
b.hash(),
e,

View file

@ -25,7 +25,6 @@ use core::{core, pow};
use p2p;
use pool;
use store;
use util::LOGGER;
use wallet;
/// Error type wrapping underlying module errors.
@ -314,10 +313,7 @@ impl SyncState {
let mut status = self.current.write();
debug!(
LOGGER,
"sync_state: sync_status: {:?} -> {:?}", *status, new_status,
);
debug!("sync_state: sync_status: {:?} -> {:?}", *status, new_status,);
*status = new_status;
}

View file

@ -24,7 +24,6 @@ use core::core::hash::Hashed;
use core::core::transaction;
use core::core::verifier_cache::VerifierCache;
use pool::{DandelionConfig, PoolEntryState, PoolError, TransactionPool, TxSource};
use util::LOGGER;
/// A process to monitor transactions in the stempool.
/// With Dandelion, transaction can be broadcasted in stem or fluff phase.
@ -40,7 +39,7 @@ pub fn monitor_transactions(
verifier_cache: Arc<RwLock<VerifierCache>>,
stop: Arc<AtomicBool>,
) {
debug!(LOGGER, "Started Dandelion transaction monitor.");
debug!("Started Dandelion transaction monitor.");
let _ = thread::Builder::new()
.name("dandelion".to_string())
@ -58,26 +57,26 @@ pub fn monitor_transactions(
// Aggregate them up to give a single (valid) aggregated tx and propagate it
// to the next Dandelion relay along the stem.
if process_stem_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem with stem phase.");
error!("dand_mon: Problem with stem phase.");
}
// Step 2: find all "ToFluff" entries in stempool from last run.
// Aggregate them up to give a single (valid) aggregated tx and (re)add it
// to our pool with stem=false (which will then broadcast it).
if process_fluff_phase(tx_pool.clone(), verifier_cache.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem with fluff phase.");
error!("dand_mon: Problem with fluff phase.");
}
// Step 3: now find all "Fresh" entries in stempool since last run.
// Coin flip for each (90/10) and label them as either "ToStem" or "ToFluff".
// We will process these in the next run (waiting patience secs).
if process_fresh_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem processing fresh pool entries.");
error!("dand_mon: Problem processing fresh pool entries.");
}
// Step 4: now find all expired entries based on embargo timer.
if process_expired_entries(dandelion_config.clone(), tx_pool.clone()).is_err() {
error!(LOGGER, "dand_mon: Problem processing fresh pool entries.");
error!("dand_mon: Problem processing fresh pool entries.");
}
}
});
@ -103,21 +102,14 @@ fn process_stem_phase(
.transition_to_state(&stem_txs, PoolEntryState::Stemmed);
if stem_txs.len() > 0 {
debug!(
LOGGER,
"dand_mon: Found {} txs for stemming.",
stem_txs.len()
);
debug!("dand_mon: Found {} txs for stemming.", stem_txs.len());
let agg_tx = transaction::aggregate(stem_txs)?;
agg_tx.validate(verifier_cache.clone())?;
let res = tx_pool.adapter.stem_tx_accepted(&agg_tx);
if res.is_err() {
debug!(
LOGGER,
"dand_mon: Unable to propagate stem tx. No relay, fluffing instead."
);
debug!("dand_mon: Unable to propagate stem tx. No relay, fluffing instead.");
let src = TxSource {
debug_name: "no_relay".to_string(),
@ -150,11 +142,7 @@ fn process_fluff_phase(
.transition_to_state(&stem_txs, PoolEntryState::Fluffed);
if stem_txs.len() > 0 {
debug!(
LOGGER,
"dand_mon: Found {} txs for fluffing.",
stem_txs.len()
);
debug!("dand_mon: Found {} txs for fluffing.", stem_txs.len());
let agg_tx = transaction::aggregate(stem_txs)?;
agg_tx.validate(verifier_cache.clone())?;
@ -186,7 +174,6 @@ fn process_fresh_entries(
if fresh_entries.len() > 0 {
debug!(
LOGGER,
"dand_mon: Found {} fresh entries in stempool.",
fresh_entries.len()
);
@ -220,21 +207,13 @@ fn process_expired_entries(
.iter()
.filter(|x| x.tx_at.timestamp() < cutoff)
{
debug!(
LOGGER,
"dand_mon: Embargo timer expired for {:?}",
entry.tx.hash()
);
debug!("dand_mon: Embargo timer expired for {:?}", entry.tx.hash());
expired_entries.push(entry.clone());
}
}
if expired_entries.len() > 0 {
debug!(
LOGGER,
"dand_mon: Found {} expired txs.",
expired_entries.len()
);
debug!("dand_mon: Found {} expired txs.", expired_entries.len());
{
let mut tx_pool = tx_pool.write();
@ -246,11 +225,8 @@ fn process_expired_entries(
identifier: "?.?.?.?".to_string(),
};
match tx_pool.add_to_pool(src, entry.tx, false, &header) {
Ok(_) => debug!(
LOGGER,
"dand_mon: embargo expired, fluffed tx successfully."
),
Err(e) => debug!(LOGGER, "dand_mon: Failed to fluff expired tx - {:?}", e),
Ok(_) => debug!("dand_mon: embargo expired, fluffed tx successfully."),
Err(e) => debug!("dand_mon: Failed to fluff expired tx - {:?}", e),
};
}
}

View file

@ -27,7 +27,6 @@ use std::{cmp, io, str, thread, time};
use p2p;
use p2p::ChainAdapter;
use pool::DandelionConfig;
use util::LOGGER;
// DNS Seeds with contact email associated
const DNS_SEEDS: &'static [&'static str] = &[
@ -119,8 +118,8 @@ fn monitor_peers(
if interval >= config.ban_window() {
peers.unban_peer(&x.addr);
debug!(
LOGGER,
"monitor_peers: unbanned {} after {} seconds", x.addr, interval
"monitor_peers: unbanned {} after {} seconds",
x.addr, interval
);
} else {
banned_count += 1;
@ -132,7 +131,6 @@ fn monitor_peers(
}
debug!(
LOGGER,
"monitor_peers: on {}:{}, {} connected ({} most_work). \
all {} = {} healthy + {} banned + {} defunct",
config.host,
@ -158,8 +156,8 @@ fn monitor_peers(
let mut connected_peers: Vec<SocketAddr> = vec![];
for p in peers.connected_peers() {
debug!(
LOGGER,
"monitor_peers: {}:{} ask {} for more peers", config.host, config.port, p.info.addr,
"monitor_peers: {}:{} ask {} for more peers",
config.host, config.port, p.info.addr,
);
let _ = p.send_peer_request(capabilities);
connected_peers.push(p.info.addr)
@ -178,7 +176,7 @@ fn monitor_peers(
}
}
}
None => debug!(LOGGER, "monitor_peers: no preferred peers"),
None => debug!("monitor_peers: no preferred peers"),
}
// take a random defunct peer and mark it healthy: over a long period any
@ -197,8 +195,8 @@ fn monitor_peers(
);
for p in new_peers.iter().filter(|p| !peers.is_known(&p.addr)) {
debug!(
LOGGER,
"monitor_peers: on {}:{}, queue to soon try {}", config.host, config.port, p.addr,
"monitor_peers: on {}:{}, queue to soon try {}",
config.host, config.port, p.addr,
);
tx.send(p.addr).unwrap();
}
@ -208,13 +206,13 @@ fn update_dandelion_relay(peers: Arc<p2p::Peers>, dandelion_config: DandelionCon
// Dandelion Relay Updater
let dandelion_relay = peers.get_dandelion_relay();
if dandelion_relay.is_empty() {
debug!(LOGGER, "monitor_peers: no dandelion relay updating");
debug!("monitor_peers: no dandelion relay updating");
peers.update_dandelion_relay();
} else {
for last_added in dandelion_relay.keys() {
let dandelion_interval = Utc::now().timestamp() - last_added;
if dandelion_interval >= dandelion_config.relay_secs.unwrap() as i64 {
debug!(LOGGER, "monitor_peers: updating expired dandelion relay");
debug!("monitor_peers: updating expired dandelion relay");
peers.update_dandelion_relay();
}
}
@ -242,11 +240,11 @@ fn connect_to_seeds_and_preferred_peers(
// If we have preferred peers add them to the connection
match peers_preferred_list {
Some(mut peers_preferred) => peer_addrs.append(&mut peers_preferred),
None => debug!(LOGGER, "No preferred peers"),
None => debug!("No preferred peers"),
};
if peer_addrs.len() == 0 {
warn!(LOGGER, "No seeds were retrieved.");
warn!("No seeds were retrieved.");
}
// connect to this first set of addresses
@ -311,7 +309,7 @@ pub fn dns_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
let mut addresses: Vec<SocketAddr> = vec![];
for dns_seed in DNS_SEEDS {
let temp_addresses = addresses.clone();
debug!(LOGGER, "Retrieving seed nodes from dns {}", dns_seed);
debug!("Retrieving seed nodes from dns {}", dns_seed);
match (dns_seed.to_owned(), 0).to_socket_addrs() {
Ok(addrs) => addresses.append(
&mut (addrs
@ -321,13 +319,10 @@ pub fn dns_seeds() -> Box<Fn() -> Vec<SocketAddr> + Send> {
}).filter(|addr| !temp_addresses.contains(addr))
.collect()),
),
Err(e) => debug!(
LOGGER,
"Failed to resolve seed {:?} got error {:?}", dns_seed, e
),
Err(e) => debug!("Failed to resolve seed {:?} got error {:?}", dns_seed, e),
}
}
debug!(LOGGER, "Retrieved seed addresses: {:?}", addresses);
debug!("Retrieved seed addresses: {:?}", addresses);
addresses
})
}

View file

@ -39,7 +39,6 @@ use p2p;
use pool;
use store;
use util::file::get_first_line;
use util::LOGGER;
/// Grin server holding internal structures.
pub struct Server {
@ -156,7 +155,7 @@ impl Server {
global::ChainTypes::Mainnet => genesis::genesis_testnet2(), //TODO: Fix, obviously
};
info!(LOGGER, "Starting server, genesis block: {}", genesis.hash());
info!("Starting server, genesis block: {}", genesis.hash());
let db_env = Arc::new(store::new_env(config.db_root.clone()));
let shared_chain = Arc::new(chain::Chain::init(
@ -205,10 +204,7 @@ impl Server {
if config.p2p_config.seeding_type.clone() != p2p::Seeding::Programmatic {
let seeder = match config.p2p_config.seeding_type.clone() {
p2p::Seeding::None => {
warn!(
LOGGER,
"No seed configured, will stay solo until connected to"
);
warn!("No seed configured, will stay solo until connected to");
seed::predefined_seeds(vec![])
}
p2p::Seeding::List => {
@ -255,7 +251,7 @@ impl Server {
.name("p2p-server".to_string())
.spawn(move || p2p_inner.listen());
info!(LOGGER, "Starting rest apis at: {}", &config.api_http_addr);
info!("Starting rest apis at: {}", &config.api_http_addr);
let api_secret = get_first_line(config.api_secret_path.clone());
api::start_rest_apis(
config.api_http_addr.clone(),
@ -266,10 +262,7 @@ impl Server {
None,
);
info!(
LOGGER,
"Starting dandelion monitor: {}", &config.api_http_addr
);
info!("Starting dandelion monitor: {}", &config.api_http_addr);
dandelion_monitor::monitor_transactions(
config.dandelion_config.clone(),
tx_pool.clone(),
@ -277,7 +270,7 @@ impl Server {
stop.clone(),
);
warn!(LOGGER, "Grin server started.");
warn!("Grin server started.");
Ok(Server {
config,
p2p: p2p_server,
@ -336,7 +329,7 @@ impl Server {
/// internal miner, and should only be used for automated testing. Burns
/// reward if wallet_listener_url is 'None'
pub fn start_test_miner(&self, wallet_listener_url: Option<String>, stop: Arc<AtomicBool>) {
info!(LOGGER, "start_test_miner - start",);
info!("start_test_miner - start",);
let sync_state = self.sync_state.clone();
let config_wallet_url = match wallet_listener_url.clone() {
Some(u) => u,
@ -467,6 +460,6 @@ impl Server {
/// Stops the test miner without stopping the p2p layer
pub fn stop_test_miner(&self, stop: Arc<AtomicBool>) {
stop.store(true, Ordering::Relaxed);
info!(LOGGER, "stop_test_miner - stop",);
info!("stop_test_miner - stop",);
}
}

View file

@ -22,7 +22,6 @@ use common::types::{SyncState, SyncStatus};
use core::core::hash::{Hash, Hashed, ZERO_HASH};
use core::global;
use p2p;
use util::LOGGER;
pub struct BodySync {
chain: Arc<chain::Chain>,
@ -94,7 +93,6 @@ impl BodySync {
self.reset();
debug!(
LOGGER,
"body_sync: body_head - {}, {}, header_head - {}, {}, sync_head - {}, {}",
body_head.last_block_h,
body_head.height,
@ -148,7 +146,6 @@ impl BodySync {
if hashes_to_get.len() > 0 {
debug!(
LOGGER,
"block_sync: {}/{} requesting blocks {:?} from {} peers",
body_head.height,
header_head.height,
@ -161,7 +158,7 @@ impl BodySync {
for hash in hashes_to_get.clone() {
if let Some(peer) = peers_iter.next() {
if let Err(e) = peer.send_block_request(*hash) {
debug!(LOGGER, "Skipped request to {}: {:?}", peer.info.addr, e);
debug!("Skipped request to {}: {:?}", peer.info.addr, e);
} else {
self.body_sync_hashes.push(hash.clone());
}
@ -199,7 +196,6 @@ impl BodySync {
.filter(|x| !self.chain.get_block(*x).is_ok() && !self.chain.is_orphan(*x))
.collect::<Vec<_>>();
debug!(
LOGGER,
"body_sync: {}/{} blocks received, and no more in 200ms",
self.body_sync_hashes.len() - hashes_not_get.len(),
self.body_sync_hashes.len(),
@ -210,7 +206,6 @@ impl BodySync {
None => {
if Utc::now() - self.sync_start_ts > Duration::seconds(5) {
debug!(
LOGGER,
"body_sync: 0/{} blocks received in 5s",
self.body_sync_hashes.len(),
);

View file

@ -20,7 +20,6 @@ use chain;
use common::types::{Error, SyncState, SyncStatus};
use core::core::hash::{Hash, Hashed};
use p2p::{self, Peer};
use util::LOGGER;
pub struct HeaderSync {
sync_state: Arc<SyncState>,
@ -60,7 +59,6 @@ impl HeaderSync {
// but ONLY on initial transition to HeaderSync state.
let sync_head = self.chain.get_sync_head().unwrap();
debug!(
LOGGER,
"sync: initial transition to HeaderSync. sync_head: {} at {}, reset to: {} at {}",
sync_head.hash(),
sync_head.height,
@ -141,8 +139,8 @@ impl HeaderSync {
fn request_headers(&mut self, peer: &Peer) {
if let Ok(locator) = self.get_locator() {
debug!(
LOGGER,
"sync: request_headers: asking {} for headers, {:?}", peer.info.addr, locator,
"sync: request_headers: asking {} for headers, {:?}",
peer.info.addr, locator,
);
let _ = peer.send_header_request(locator);
@ -165,7 +163,7 @@ impl HeaderSync {
self.history_locators.clear();
}
debug!(LOGGER, "sync: locator heights : {:?}", heights);
debug!("sync: locator heights : {:?}", heights);
let mut locator: Vec<Hash> = vec![];
let mut current = self.chain.get_block_header(&tip.last_block_h);
@ -237,7 +235,7 @@ impl HeaderSync {
}
}
debug!(LOGGER, "sync: locator heights': {:?}", new_heights);
debug!("sync: locator heights': {:?}", new_heights);
// shrink history_locators properly
if heights.len() > 1 {
@ -258,14 +256,13 @@ impl HeaderSync {
}
}
debug!(
LOGGER,
"sync: history locators: len={}, shrunk={}",
self.history_locators.len(),
shrunk_size
);
}
debug!(LOGGER, "sync: locator: {:?}", locator);
debug!("sync: locator: {:?}", locator);
Ok(locator)
}

View file

@ -21,7 +21,6 @@ use common::types::{Error, SyncState, SyncStatus};
use core::core::hash::Hashed;
use core::global;
use p2p::{self, Peer};
use util::LOGGER;
/// Fast sync has 3 "states":
/// * syncing headers
@ -77,10 +76,7 @@ impl StateSync {
{
let clone = self.sync_state.sync_error();
if let Some(ref sync_error) = *clone.read() {
error!(
LOGGER,
"fast_sync: error = {:?}. restart fast sync", sync_error
);
error!("fast_sync: error = {:?}. restart fast sync", sync_error);
sync_need_restart = true;
}
drop(clone);
@ -92,8 +88,8 @@ impl StateSync {
if !peer.is_connected() {
sync_need_restart = true;
info!(
LOGGER,
"fast_sync: peer connection lost: {:?}. restart", peer.info.addr,
"fast_sync: peer connection lost: {:?}. restart",
peer.info.addr,
);
}
}
@ -110,10 +106,7 @@ impl StateSync {
if let SyncStatus::TxHashsetDownload { .. } = self.sync_state.status() {
if download_timeout {
error!(
LOGGER,
"fast_sync: TxHashsetDownload status timeout in 10 minutes!"
);
error!("fast_sync: TxHashsetDownload status timeout in 10 minutes!");
self.sync_state
.set_sync_error(Error::P2P(p2p::Error::Timeout));
}
@ -168,7 +161,6 @@ impl StateSync {
}
let bhash = txhashset_head.hash();
debug!(
LOGGER,
"fast_sync: before txhashset request, header head: {} / {}, txhashset_head: {} / {}",
header_head.height,
header_head.last_block_h,
@ -176,7 +168,7 @@ impl StateSync {
bhash
);
if let Err(e) = peer.send_txhashset_request(txhashset_head.height, bhash) {
error!(LOGGER, "fast_sync: send_txhashset_request err! {:?}", e);
error!("fast_sync: send_txhashset_request err! {:?}", e);
return Err(e);
}
return Ok(peer.clone());

View file

@ -24,7 +24,6 @@ use grin::sync::body_sync::BodySync;
use grin::sync::header_sync::HeaderSync;
use grin::sync::state_sync::StateSync;
use p2p::{self, Peers};
use util::LOGGER;
pub fn run_sync(
sync_state: Arc<SyncState>,
@ -164,7 +163,6 @@ fn needs_syncing(
if peer.info.total_difficulty() <= local_diff {
let ch = chain.head().unwrap();
info!(
LOGGER,
"synchronized at {} @ {} [{}]",
local_diff.to_num(),
ch.height,
@ -175,7 +173,7 @@ fn needs_syncing(
return (false, most_work_height);
}
} else {
warn!(LOGGER, "sync: no peers available, disabling sync");
warn!("sync: no peers available, disabling sync");
return (false, 0);
}
} else {
@ -192,7 +190,6 @@ fn needs_syncing(
let peer_diff = peer.info.total_difficulty();
if peer_diff > local_diff.clone() + threshold.clone() {
info!(
LOGGER,
"sync: total_difficulty {}, peer_difficulty {}, threshold {} (last 5 blocks), enabling sync",
local_diff,
peer_diff,

View file

@ -35,7 +35,7 @@ extern crate serde;
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate grin_api as api;

View file

@ -28,7 +28,7 @@ use core::core::verifier_cache::VerifierCache;
use core::{consensus, core, ser};
use keychain::{ExtKeychain, Identifier, Keychain};
use pool;
use util::{self, LOGGER};
use util;
use wallet::{self, BlockFees};
// Ensure a block suitable for mining is built and returned
@ -55,24 +55,22 @@ pub fn get_block(
self::Error::Chain(c) => match c.kind() {
chain::ErrorKind::DuplicateCommitment(_) => {
debug!(
LOGGER,
"Duplicate commit for potential coinbase detected. Trying next derivation."
);
}
_ => {
error!(LOGGER, "Chain Error: {}", c);
error!("Chain Error: {}", c);
}
},
self::Error::Wallet(_) => {
error!(
LOGGER,
"Error building new block: Can't connect to wallet listener at {:?}; will retry",
wallet_listener_url.as_ref().unwrap()
);
thread::sleep(Duration::from_secs(wallet_retry_interval));
}
ae => {
warn!(LOGGER, "Error building new block: {:?}. Retrying.", ae);
warn!("Error building new block: {:?}. Retrying.", ae);
}
}
thread::sleep(Duration::from_millis(100));
@ -134,7 +132,6 @@ fn build_block(
let b_difficulty = (b.header.total_difficulty() - head.total_difficulty()).to_num();
debug!(
LOGGER,
"Built new block with {} inputs and {} outputs, network difficulty: {}, cumulative difficulty {}",
b.inputs().len(),
b.outputs().len(),
@ -159,10 +156,7 @@ fn build_block(
//Some other issue, possibly duplicate kernel
_ => {
error!(
LOGGER,
"Error setting txhashset root to build a block: {:?}", e
);
error!("Error setting txhashset root to build a block: {:?}", e);
Err(Error::Chain(
chain::ErrorKind::Other(format!("{:?}", e)).into(),
))
@ -176,7 +170,7 @@ fn build_block(
/// Probably only want to do this when testing.
///
fn burn_reward(block_fees: BlockFees) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
warn!(LOGGER, "Burning block fees: {:?}", block_fees);
warn!("Burning block fees: {:?}", block_fees);
let keychain = ExtKeychain::from_random_seed().unwrap();
let key_id = ExtKeychain::derive_key_id(1, 1, 0, 0, 0);
let (out, kernel) =
@ -209,7 +203,7 @@ fn get_coinbase(
..block_fees
};
debug!(LOGGER, "get_coinbase: {:?}", block_fees);
debug!("get_coinbase: {:?}", block_fees);
return Ok((output, kernel, block_fees));
}
}

View file

@ -35,7 +35,7 @@ use core::{pow, ser};
use keychain;
use mining::mine_block;
use pool;
use util::{self, LOGGER};
use util;
// ----------------------------------------
// http://www.jsonrpc.org/specification
@ -114,7 +114,6 @@ fn accept_workers(
match stream {
Ok(stream) => {
warn!(
LOGGER,
"(Server ID: {}) New connection: {}",
id,
stream.peer_addr().unwrap()
@ -135,10 +134,7 @@ fn accept_workers(
worker_id = worker_id + 1;
}
Err(e) => {
warn!(
LOGGER,
"(Server ID: {}) Error accepting connection: {:?}", id, e
);
warn!("(Server ID: {}) Error accepting connection: {:?}", id, e);
}
}
}
@ -185,8 +181,8 @@ impl Worker {
}
Err(e) => {
warn!(
LOGGER,
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
"(Server ID: {}) Error in connection with stratum client: {}",
self.id, e
);
self.error = true;
return None;
@ -206,16 +202,16 @@ impl Worker {
Ok(_) => {}
Err(e) => {
warn!(
LOGGER,
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
"(Server ID: {}) Error in connection with stratum client: {}",
self.id, e
);
self.error = true;
}
},
Err(e) => {
warn!(
LOGGER,
"(Server ID: {}) Error in connection with stratum client: {}", self.id, e
"(Server ID: {}) Error in connection with stratum client: {}",
self.id, e
);
self.error = true;
return;
@ -296,7 +292,6 @@ impl StratumServer {
Err(e) => {
// not a valid JSON RpcRequest - disconnect the worker
warn!(
LOGGER,
"(Server ID: {}) Failed to parse JSONRpc: {} - {:?}",
self.id,
e.description(),
@ -409,11 +404,8 @@ impl StratumServer {
let job_template = self.build_block_template();
let response = serde_json::to_value(&job_template).unwrap();
debug!(
LOGGER,
"(Server ID: {}) sending block {} with id {} to single worker",
self.id,
job_template.height,
job_template.job_id,
self.id, job_template.height, job_template.job_id,
);
return Ok(response);
}
@ -452,8 +444,8 @@ impl StratumServer {
if params.height != self.current_block_versions.last().unwrap().header.height {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Share at height {} submitted too late", self.id, params.height,
"(Server ID: {}) Share at height {} submitted too late",
self.id, params.height,
);
worker_stats.num_stale += 1;
let e = RpcError {
@ -467,11 +459,8 @@ impl StratumServer {
if b.is_none() {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Failed to validate solution at height {}: invalid job_id {}",
self.id,
params.height,
params.job_id,
self.id, params.height, params.job_id,
);
worker_stats.num_rejected += 1;
let e = RpcError {
@ -491,11 +480,8 @@ impl StratumServer {
if share_difficulty < self.minimum_share_difficulty {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Share rejected due to low difficulty: {}/{}",
self.id,
share_difficulty,
self.minimum_share_difficulty,
self.id, share_difficulty, self.minimum_share_difficulty,
);
worker_stats.num_rejected += 1;
let e = RpcError {
@ -511,7 +497,6 @@ impl StratumServer {
if let Err(e) = res {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Failed to validate solution at height {}: {}: {}",
self.id,
params.height,
@ -528,15 +513,14 @@ impl StratumServer {
share_is_block = true;
// Log message to make it obvious we found a block
warn!(
LOGGER,
"(Server ID: {}) Solution Found for block {} - Yay!!!", self.id, params.height
"(Server ID: {}) Solution Found for block {} - Yay!!!",
self.id, params.height
);
} else {
// Do some validation but dont submit
if !pow::verify_size(&b.header, b.header.pow.proof.edge_bits).is_ok() {
// Return error status
error!(
LOGGER,
"(Server ID: {}) Failed to validate share at height {} with nonce {} using job_id {}",
self.id,
params.height,
@ -557,7 +541,6 @@ impl StratumServer {
Some(login) => login.clone(),
};
info!(
LOGGER,
"(Server ID: {}) Got share for block: hash {}, height {}, nonce {}, difficulty {}/{}, submitted by {}",
self.id,
b.hash(),
@ -588,10 +571,8 @@ impl StratumServer {
for num in start..workers_l.len() {
if workers_l[num].error == true {
warn!(
LOGGER,
"(Server ID: {}) Dropping worker: {}",
self.id,
workers_l[num].id;
self.id, workers_l[num].id
);
// Update worker stats
let mut stratum_stats = stratum_stats.write();
@ -631,11 +612,8 @@ impl StratumServer {
};
let job_request_json = serde_json::to_string(&job_request).unwrap();
debug!(
LOGGER,
"(Server ID: {}) sending block {} with id {} to stratum clients",
self.id,
job_template.height,
job_template.job_id,
self.id, job_template.height, job_template.job_id,
);
// Push the new block to all connected clients
// NOTE: We do not give a unique nonce (should we?) so miners need
@ -659,11 +637,8 @@ impl StratumServer {
sync_state: Arc<SyncState>,
) {
info!(
LOGGER,
"(Server ID: {}) Starting stratum server with edge_bits = {}, proof_size = {}",
self.id,
edge_bits,
proof_size
self.id, edge_bits, proof_size
);
self.sync_state = sync_state;
@ -698,7 +673,6 @@ impl StratumServer {
}
warn!(
LOGGER,
"Stratum server started on {}",
self.config.stratum_server_addr.clone().unwrap()
);

View file

@ -31,7 +31,6 @@ use core::global;
use core::pow::PoWContext;
use mining::mine_block;
use pool;
use util::LOGGER;
pub struct Miner {
config: StratumServerConfig,
@ -85,7 +84,6 @@ impl Miner {
let deadline = Utc::now().timestamp() + attempt_time_per_block as i64;
debug!(
LOGGER,
"(Server ID: {}) Mining Cuckoo{} for max {}s on {} @ {} [{}].",
self.debug_output_id,
global::min_edge_bits(),
@ -116,10 +114,8 @@ impl Miner {
}
debug!(
LOGGER,
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count
self.debug_output_id, iter_count
);
false
}
@ -128,8 +124,8 @@ impl Miner {
/// chain anytime required and looking for PoW solution.
pub fn run_loop(&self, wallet_listener_url: Option<String>) {
info!(
LOGGER,
"(Server ID: {}) Starting test miner loop.", self.debug_output_id
"(Server ID: {}) Starting test miner loop.",
self.debug_output_id
);
// iteration, we keep the returned derivation to provide it back when
@ -137,7 +133,7 @@ impl Miner {
let mut key_id = None;
while !self.stop.load(Ordering::Relaxed) {
trace!(LOGGER, "in miner loop. key_id: {:?}", key_id);
trace!("in miner loop. key_id: {:?}", key_id);
// get the latest chain state and build a block on top of it
let head = self.chain.head_header().unwrap();
@ -161,7 +157,6 @@ impl Miner {
// we found a solution, push our block through the chain processing pipeline
if sol {
info!(
LOGGER,
"(Server ID: {}) Found valid proof of work, adding block {}.",
self.debug_output_id,
b.hash()
@ -169,26 +164,21 @@ impl Miner {
let res = self.chain.process_block(b, chain::Options::MINE);
if let Err(e) = res {
error!(
LOGGER,
"(Server ID: {}) Error validating mined block: {:?}",
self.debug_output_id,
e
self.debug_output_id, e
);
}
trace!(LOGGER, "resetting key_id in miner to None");
trace!("resetting key_id in miner to None");
key_id = None;
} else {
debug!(
LOGGER,
"setting pubkey in miner to pubkey from block_fees - {:?}", block_fees
"setting pubkey in miner to pubkey from block_fees - {:?}",
block_fees
);
key_id = block_fees.key_id();
}
}
info!(
LOGGER,
"(Server ID: {}) test miner exit.", self.debug_output_id
);
info!("(Server ID: {}) test miner exit.", self.debug_output_id);
}
}

View file

@ -25,8 +25,6 @@ use std::env;
use std::io::Error;
use std::thread;
use util::LOGGER;
/// Future returned from `MainService`.
enum MainFuture {
Root,
@ -94,10 +92,7 @@ pub fn start_webwallet_server() {
let server = Server::bind(&addr)
.serve(|| future::ok::<_, Error>(MainService::new()))
.map_err(|e| eprintln!("server error: {}", e));
warn!(
LOGGER,
"Grin Web-Wallet Application is running at http://{}/", addr
);
warn!("Grin Web-Wallet Application is running at http://{}/", addr);
rt::run(server);
});
}

View file

@ -13,7 +13,7 @@
// limitations under the License.
#[macro_use]
extern crate slog;
extern crate log;
extern crate grin_api as api;
extern crate grin_chain as chain;
@ -33,12 +33,13 @@ use util::Mutex;
use core::global::{self, ChainTypes};
use framework::{LocalServerContainer, LocalServerContainerConfig};
use util::{init_test_logger, LOGGER};
use util::init_test_logger;
#[test]
fn simple_server_wallet() {
init_test_logger();
info!(LOGGER, "starting simple_server_wallet");
info!("starting simple_server_wallet");
let test_name_dir = "test_servers";
core::global::set_mining_mode(core::global::ChainTypes::AutomatedTesting);
// Run a separate coinbase wallet for coinbase transactions
@ -82,11 +83,11 @@ fn simple_server_wallet() {
let base_addr = server_config.base_addr;
let api_server_port = server_config.api_server_port;
warn!(LOGGER, "Testing chain handler");
warn!("Testing chain handler");
let tip = get_tip(&base_addr, api_server_port);
assert!(tip.is_ok());
warn!(LOGGER, "Testing status handler");
warn!("Testing status handler");
let status = get_status(&base_addr, api_server_port);
assert!(status.is_ok());
@ -97,7 +98,7 @@ fn simple_server_wallet() {
current_tip = get_tip(&base_addr, api_server_port).unwrap();
}
warn!(LOGGER, "Testing block handler");
warn!("Testing block handler");
let last_block_by_height = get_block_by_height(&base_addr, api_server_port, current_tip.height);
assert!(last_block_by_height.is_ok());
let last_block_by_height_compact =
@ -111,7 +112,7 @@ fn simple_server_wallet() {
get_block_by_hash_compact(&base_addr, api_server_port, &block_hash);
assert!(last_block_by_hash_compact.is_ok());
warn!(LOGGER, "Testing chain output handler");
warn!("Testing chain output handler");
let start_height = 0;
let end_height = current_tip.height;
let outputs_by_height =
@ -123,7 +124,7 @@ fn simple_server_wallet() {
let outputs_by_ids2 = get_outputs_by_ids2(&base_addr, api_server_port, ids.clone());
assert!(outputs_by_ids2.is_ok());
warn!(LOGGER, "Testing txhashset handler");
warn!("Testing txhashset handler");
let roots = get_txhashset_roots(&base_addr, api_server_port);
assert!(roots.is_ok());
let last_10_outputs = get_txhashset_lastoutputs(&base_addr, api_server_port, 0);
@ -147,7 +148,7 @@ fn simple_server_wallet() {
#[test]
fn test_p2p() {
init_test_logger();
info!(LOGGER, "starting test_p2p");
info!("starting test_p2p");
global::set_mining_mode(ChainTypes::AutomatedTesting);
let test_name_dir = "test_servers";
@ -188,7 +189,7 @@ fn test_p2p() {
thread::sleep(time::Duration::from_millis(2000));
// Starting tests
warn!(LOGGER, "Starting P2P Tests");
warn!("Starting P2P Tests");
let base_addr = server_config_one.base_addr;
let api_server_port = server_config_one.api_server_port;

View file

@ -13,7 +13,7 @@
// limitations under the License.
#[macro_use]
extern crate slog;
extern crate log;
extern crate grin_api as api;
extern crate grin_chain as chain;
@ -31,8 +31,6 @@ use std::sync::Arc;
use std::{thread, time};
use util::Mutex;
use util::LOGGER;
/// Start 1 node mining, 1 non mining node and two wallets.
/// Then send a transaction from one wallet to another and propagate it a stem
/// transaction but without stem relay and check if the transaction is still
@ -136,7 +134,7 @@ fn test_dandelion_timeout() {
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
}
warn!(LOGGER, "Sending 50 Grins to recipient wallet");
warn!("Sending 50 Grins to recipient wallet");
// Sending stem transaction
LocalServerContainer::send_amount_to(

View file

@ -21,7 +21,7 @@ extern crate grin_servers as servers;
extern crate grin_util as util;
extern crate grin_wallet as wallet;
#[macro_use]
extern crate slog;
extern crate log;
mod framework;
@ -34,7 +34,6 @@ use util::Mutex;
use core::core::hash::Hashed;
use core::global::{self, ChainTypes};
use util::LOGGER;
use wallet::controller;
use wallet::libtx::slate::Slate;
use wallet::libwallet::types::{WalletBackend, WalletInst};
@ -243,7 +242,7 @@ fn simulate_block_propagation() {
thread::sleep(time::Duration::from_millis(1_000));
time_spent += 1;
if time_spent >= 30 {
info!(LOGGER, "simulate_block_propagation - fail on timeout",);
info!("simulate_block_propagation - fail on timeout",);
break;
}
@ -285,7 +284,6 @@ fn simulate_full_sync() {
// Get the current header from s1.
let s1_header = s1.chain.head_header().unwrap();
info!(
LOGGER,
"simulate_full_sync - s1 header head: {} at {}",
s1_header.hash(),
s1_header.height
@ -298,7 +296,6 @@ fn simulate_full_sync() {
time_spent += 1;
if time_spent >= 30 {
info!(
LOGGER,
"sync fail. s2.head().height: {}, s1_header.height: {}",
s2.head().height,
s1_header.height
@ -356,7 +353,6 @@ fn simulate_fast_sync() {
total_wait += 1;
if total_wait >= 30 {
error!(
LOGGER,
"simulate_fast_sync test fail on timeout! s2 height: {}, s1 height: {}",
s2.head().height,
s1_header.height,

View file

@ -23,7 +23,7 @@ extern crate grin_wallet as wallet;
extern crate bufstream;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
mod framework;
@ -38,7 +38,6 @@ use std::sync::Arc;
use std::{thread, time};
use core::global::{self, ChainTypes};
use util::LOGGER;
use framework::{config, stratum_config};
@ -77,7 +76,7 @@ fn basic_stratum_server() {
}
// As this stream falls out of scope it will be disconnected
}
info!(LOGGER, "stratum server connected");
info!("stratum server connected");
// Create a few new worker connections
let mut workers = vec![];
@ -89,7 +88,7 @@ fn basic_stratum_server() {
workers.push(stream);
}
assert!(workers.len() == 5);
info!(LOGGER, "workers length verification ok");
info!("workers length verification ok");
// Simulate a worker lost connection
workers.remove(4);
@ -118,7 +117,7 @@ fn basic_stratum_server() {
assert!(false);
}
}
info!(LOGGER, "a few stratum JSONRpc commands verification ok");
info!("a few stratum JSONRpc commands verification ok");
// keepalive - expected "ok" result
let mut response = String::new();
@ -129,7 +128,7 @@ fn basic_stratum_server() {
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
let _st = workers[2].read_line(&mut response);
assert_eq!(response.as_str(), ok_resp);
info!(LOGGER, "keepalive test ok");
info!("keepalive test ok");
// "doesnotexist" - error expected
let mut response = String::new();
@ -140,7 +139,7 @@ fn basic_stratum_server() {
thread::sleep(time::Duration::from_secs(1)); // Wait for the server to reply
let _st = workers[3].read_line(&mut response);
assert_eq!(response.as_str(), ok_resp);
info!(LOGGER, "worker doesnotexist test ok");
info!("worker doesnotexist test ok");
// Verify stratum server and worker stats
let stats = s.get_server_stats().unwrap();
@ -148,18 +147,18 @@ fn basic_stratum_server() {
assert_eq!(stats.stratum_stats.num_workers, 4); // 5 - 1 = 4
assert_eq!(stats.stratum_stats.worker_stats[5].is_connected, false); // worker was removed
assert_eq!(stats.stratum_stats.worker_stats[1].is_connected, true);
info!(LOGGER, "stratum server and worker stats verification ok");
info!("stratum server and worker stats verification ok");
// Start mining blocks
let stop = Arc::new(AtomicBool::new(false));
s.start_test_miner(None, stop.clone());
info!(LOGGER, "test miner started");
info!("test miner started");
// This test is supposed to complete in 3 seconds,
// so let's set a timeout on 10s to avoid infinite waiting happened in Travis-CI.
let _handler = thread::spawn(|| {
thread::sleep(time::Duration::from_secs(10));
error!(LOGGER, "basic_stratum_server test fail on timeout!");
error!("basic_stratum_server test fail on timeout!");
thread::sleep(time::Duration::from_millis(100));
process::exit(1);
});
@ -177,12 +176,12 @@ fn basic_stratum_server() {
let _st = workers[2].read_line(&mut jobtemplate);
let job_template: Value = serde_json::from_str(&jobtemplate).unwrap();
assert_eq!(job_template["method"], expected);
info!(LOGGER, "blocks broadcasting to workers test ok");
info!("blocks broadcasting to workers test ok");
// Verify stratum server and worker stats
let stats = s.get_server_stats().unwrap();
assert_eq!(stats.stratum_stats.num_workers, 3); // 5 - 2 = 3
assert_eq!(stats.stratum_stats.worker_stats[2].is_connected, false); // worker was removed
assert_ne!(stats.stratum_stats.block_height, 1);
info!(LOGGER, "basic_stratum_server test done and ok.");
info!("basic_stratum_server test done and ok.");
}

View file

@ -13,7 +13,7 @@
// limitations under the License.
#[macro_use]
extern crate slog;
extern crate log;
extern crate grin_api as api;
extern crate grin_chain as chain;
@ -31,8 +31,6 @@ use std::sync::Arc;
use std::{thread, time};
use util::Mutex;
use util::LOGGER;
/// Start 1 node mining and two wallets, then send a few
/// transactions from one to the other
#[ignore]
@ -105,7 +103,7 @@ fn basic_wallet_transactions() {
coinbase_info =
LocalServerContainer::get_wallet_info(&coinbase_wallet_config, &coinbase_seed);
}
warn!(LOGGER, "Sending 50 Grins to recipient wallet");
warn!("Sending 50 Grins to recipient wallet");
LocalServerContainer::send_amount_to(
&coinbase_wallet_config,
"50.00",
@ -125,10 +123,7 @@ fn basic_wallet_transactions() {
println!("Recipient wallet info: {:?}", recipient_info);
assert!(recipient_info.amount_currently_spendable == 50000000000);
warn!(
LOGGER,
"Sending many small transactions to recipient wallet"
);
warn!("Sending many small transactions to recipient wallet");
for _i in 0..10 {
LocalServerContainer::send_amount_to(
&coinbase_wallet_config,

View file

@ -29,7 +29,6 @@ use core::global;
use p2p::Seeding;
use servers;
use tui::ui;
use util::LOGGER;
/// wrap below to allow UI to clean up on stop
fn start_server(config: servers::ServerConfig) {
@ -37,9 +36,9 @@ fn start_server(config: servers::ServerConfig) {
// Just kill process for now, otherwise the process
// hangs around until sigint because the API server
// currently has no shutdown facility
warn!(LOGGER, "Shutting down...");
warn!("Shutting down...");
thread::sleep(Duration::from_millis(1000));
warn!(LOGGER, "Shutdown complete.");
warn!("Shutdown complete.");
exit(0);
}
@ -47,7 +46,7 @@ fn start_server_tui(config: servers::ServerConfig) {
// Run the UI controller.. here for now for simplicity to access
// everything it might need
if config.run_tui.is_some() && config.run_tui.unwrap() {
warn!(LOGGER, "Starting GRIN in UI mode...");
warn!("Starting GRIN in UI mode...");
servers::Server::start(config, |serv: Arc<servers::Server>| {
let running = Arc::new(AtomicBool::new(true));
let _ = thread::Builder::new()
@ -60,7 +59,7 @@ fn start_server_tui(config: servers::ServerConfig) {
});
}).unwrap();
} else {
warn!(LOGGER, "Starting GRIN w/o UI...");
warn!("Starting GRIN w/o UI...");
servers::Server::start(config, |serv: Arc<servers::Server>| {
let running = Arc::new(AtomicBool::new(true));
let r = running.clone();
@ -70,7 +69,7 @@ fn start_server_tui(config: servers::ServerConfig) {
while running.load(Ordering::SeqCst) {
thread::sleep(Duration::from_secs(1));
}
warn!(LOGGER, "Received SIGINT (Ctrl+C) or SIGTERM (kill).");
warn!("Received SIGINT (Ctrl+C) or SIGTERM (kill).");
serv.stop();
}).unwrap();
}
@ -170,8 +169,8 @@ pub fn server_command(server_args: Option<&ArgMatches>, mut global_config: Globa
}
});
match daemonize.start() {
Ok(_) => info!(LOGGER, "Grin server successfully started."),
Err(e) => error!(LOGGER, "Error starting: {}", e),
Ok(_) => info!("Grin server successfully started."),
Err(e) => error!("Error starting: {}", e),
}
}
("stop", _) => println!("TODO. Just 'kill $pid' for now. Maybe /tmp/grin.pid is $pid"),

View file

@ -35,7 +35,6 @@ use grin_wallet::{
use keychain;
use servers::start_webwallet_server;
use util::file::get_first_line;
use util::LOGGER;
pub fn _init_wallet_seed(wallet_config: WalletConfig) {
if let Err(_) = WalletSeed::from_file(&wallet_config) {
@ -73,7 +72,7 @@ pub fn instantiate_wallet(
println!("Error starting wallet: {}", e);
process::exit(0);
});
info!(LOGGER, "Using LMDB Backend for wallet");
info!("Using LMDB Backend for wallet");
Box::new(db_wallet)
}
@ -107,7 +106,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
// Generate the initial wallet seed if we are running "wallet init".
if let ("init", Some(_)) = wallet_args.subcommand() {
WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file.");
info!(LOGGER, "Wallet seed file created");
info!("Wallet seed file created");
let client =
HTTPWalletClient::new(&wallet_config.check_node_api_http_addr, node_api_secret);
let _: LMDBBackend<HTTPWalletClient, keychain::ExtKeychain> =
@ -117,7 +116,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
e, wallet_config
);
});
info!(LOGGER, "Wallet database backend created");
info!("Wallet database backend created");
// give logging thread a moment to catch up
thread::sleep(Duration::from_millis(200));
// we are done here with creating the wallet, so just return
@ -268,7 +267,6 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let slate = match result {
Ok(s) => {
info!(
LOGGER,
"Tx created: {} grin to {} (strategy '{}')",
core::amount_to_hr_string(amount, false),
dest,
@ -277,7 +275,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
s
}
Err(e) => {
error!(LOGGER, "Tx not created: {:?}", e);
error!("Tx not created: {:?}", e);
match e.kind() {
// user errors, don't backtrace
libwallet::ErrorKind::NotEnoughFunds { .. } => {}
@ -285,7 +283,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
libwallet::ErrorKind::FeeExceedsAmount { .. } => {}
_ => {
// otherwise give full dump
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
error!("Backtrace: {}", e.backtrace().unwrap());
}
};
panic!();
@ -294,18 +292,18 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_tx(&slate, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Tx sent",);
info!("Tx sent",);
Ok(())
}
Err(e) => {
error!(LOGGER, "Tx not sent: {:?}", e);
error!("Tx not sent: {:?}", e);
Err(e)
}
}
} else {
error!(
LOGGER,
"HTTP Destination should start with http://: or https://: {}", dest
"HTTP Destination should start with http://: or https://: {}",
dest
);
panic!();
}
@ -321,7 +319,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
).expect("Send failed");
Ok(())
} else {
error!(LOGGER, "unsupported payment method: {}", method);
error!("unsupported payment method: {}", method);
panic!();
}
}
@ -354,11 +352,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_tx(&slate, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Tx sent");
info!("Tx sent");
Ok(())
}
Err(e) => {
error!(LOGGER, "Tx not sent: {:?}", e);
error!("Tx not sent: {:?}", e);
Err(e)
}
}
@ -439,7 +437,7 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
("repost", Some(repost_args)) => {
let tx_id: u32 = match repost_args.value_of("id") {
None => {
error!(LOGGER, "Transaction of a completed but unconfirmed transaction required (specify with --id=[id])");
error!("Transaction of a completed but unconfirmed transaction required (specify with --id=[id])");
panic!();
}
Some(tx) => match tx.parse() {
@ -456,11 +454,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.post_stored_tx(tx_id, fluff);
match result {
Ok(_) => {
info!(LOGGER, "Reposted transaction at {}", tx_id);
info!("Reposted transaction at {}", tx_id);
Ok(())
}
Err(e) => {
error!(LOGGER, "Transaction reposting failed: {}", e);
error!("Transaction reposting failed: {}", e);
Err(e)
}
}
@ -469,11 +467,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.dump_stored_tx(tx_id, true, f);
match result {
Ok(_) => {
warn!(LOGGER, "Dumped transaction data for tx {} to {}", tx_id, f);
warn!("Dumped transaction data for tx {} to {}", tx_id, f);
Ok(())
}
Err(e) => {
error!(LOGGER, "Transaction reposting failed: {}", e);
error!("Transaction reposting failed: {}", e);
Err(e)
}
}
@ -488,11 +486,11 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.cancel_tx(tx_id);
match result {
Ok(_) => {
info!(LOGGER, "Transaction {} Cancelled", tx_id);
info!("Transaction {} Cancelled", tx_id);
Ok(())
}
Err(e) => {
error!(LOGGER, "TX Cancellation failed: {}", e);
error!("TX Cancellation failed: {}", e);
Err(e)
}
}
@ -501,12 +499,12 @@ pub fn wallet_command(wallet_args: &ArgMatches, config: GlobalWalletConfig) {
let result = api.restore();
match result {
Ok(_) => {
info!(LOGGER, "Wallet restore complete",);
info!("Wallet restore complete",);
Ok(())
}
Err(e) => {
error!(LOGGER, "Wallet restore failed: {:?}", e);
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
error!("Wallet restore failed: {:?}", e);
error!("Backtrace: {}", e.backtrace().unwrap());
Err(e)
}
}

View file

@ -24,7 +24,7 @@ extern crate daemonize;
extern crate serde;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
extern crate term;
extern crate grin_api as api;
@ -45,7 +45,7 @@ use clap::{App, Arg, SubCommand};
use config::config::{SERVER_CONFIG_FILE_NAME, WALLET_CONFIG_FILE_NAME};
use core::global;
use util::{init_logger, LOGGER};
use util::init_logger;
// include build information
pub mod built_info {
@ -73,9 +73,9 @@ pub fn info_strings() -> (String, String, String) {
fn log_build_info() {
let (basic_info, detailed_info, deps) = info_strings();
info!(LOGGER, "{}", basic_info);
debug!(LOGGER, "{}", detailed_info);
trace!(LOGGER, "{}", deps);
info!("{}", basic_info);
debug!("{}", detailed_info);
trace!("{}", deps);
}
fn main() {
@ -378,7 +378,6 @@ fn main() {
l.tui_running = Some(false);
init_logger(Some(l));
warn!(
LOGGER,
"Using wallet configuration file at {}",
w.config_file_path.as_ref().unwrap().to_str().unwrap()
);
@ -399,12 +398,11 @@ fn main() {
global::set_mining_mode(s.members.as_mut().unwrap().server.clone().chain_type);
if let Some(file_path) = &s.config_file_path {
info!(
LOGGER,
"Using configuration file at {}",
file_path.to_str().unwrap()
);
} else {
info!(LOGGER, "Node configuration file not found, using default");
info!("Node configuration file not found, using default");
}
node_config = Some(s);
}

View file

@ -37,7 +37,6 @@ use servers::Server;
use tui::constants::ROOT_STACK;
use tui::types::{TUIStatusListener, UIMessage};
use tui::{menu, mining, peers, status, version};
use util::LOGGER;
use built_info;
@ -172,7 +171,7 @@ impl Controller {
let mut next_stat_update = Utc::now().timestamp() + stat_update_interval;
while self.ui.step() {
if !running.load(Ordering::SeqCst) {
warn!(LOGGER, "Received SIGINT (Ctrl+C).");
warn!("Received SIGINT (Ctrl+C).");
server.stop();
self.ui.stop();
}

View file

@ -16,7 +16,7 @@ lmdb-zero = "0.4.4"
memmap = "0.6.2"
serde = "1"
serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
grin_core = { path = "../core" }
grin_util = { path = "../util" }

View file

@ -25,7 +25,6 @@ use core::core::hash::Hashed;
use core::core::pmmr;
use core::core::BlockHeader;
use prune_list::PruneList;
use util::LOGGER;
/// Compact (roaring) bitmap representing the set of positions of
/// leaves that are currently unpruned in the MMR.
@ -64,7 +63,7 @@ impl LeafSet {
let cp_file_path = Path::new(&cp_path);
if !cp_file_path.exists() {
debug!(LOGGER, "leaf_set: rewound leaf file not found: {}", cp_path);
debug!("leaf_set: rewound leaf file not found: {}", cp_path);
return Ok(());
}
@ -73,10 +72,7 @@ impl LeafSet {
bitmap_file.read_to_end(&mut buffer)?;
let bitmap = Bitmap::deserialize(&buffer);
debug!(
LOGGER,
"leaf_set: copying rewound file {} to {}", cp_path, path
);
debug!("leaf_set: copying rewound file {} to {}", cp_path, path);
let mut leaf_set = LeafSet {
path: path.clone(),

View file

@ -28,7 +28,7 @@ extern crate lmdb_zero;
extern crate memmap;
extern crate serde;
#[macro_use]
extern crate slog;
extern crate log;
extern crate failure;
#[macro_use]
extern crate failure_derive;

View file

@ -24,7 +24,6 @@ use core::ser::{self, PMMRable};
use leaf_set::LeafSet;
use prune_list::PruneList;
use types::{prune_noop, AppendOnlyFile, HashFile};
use util::LOGGER;
const PMMR_HASH_FILE: &'static str = "pmmr_hash.bin";
const PMMR_DATA_FILE: &'static str = "pmmr_data.bin";
@ -103,8 +102,8 @@ where
Ok(h) => Some(h),
Err(e) => {
error!(
LOGGER,
"Corrupted storage, could not read an entry from hash store: {:?}", e
"Corrupted storage, could not read an entry from hash store: {:?}",
e
);
return None;
}
@ -126,8 +125,8 @@ where
Ok(h) => Some(h),
Err(e) => {
error!(
LOGGER,
"Corrupted storage, could not read an entry from data store: {:?}", e
"Corrupted storage, could not read an entry from data store: {:?}",
e
);
return None;
}
@ -200,7 +199,6 @@ where
fn dump_stats(&self) {
debug!(
LOGGER,
"pmmr backend: unpruned: {}, hashes: {}, data: {}, leaf_set: {}, prune_list: {}",
self.unpruned_size().unwrap_or(0),
self.hash_size().unwrap_or(0),

View file

@ -29,8 +29,6 @@ use croaring::Bitmap;
use core::core::pmmr::{bintree_postorder_height, family, path};
use util::LOGGER;
/// Maintains a list of previously pruned nodes in PMMR, compacting the list as
/// parents get pruned and allowing checking whether a leaf is pruned. Given
/// a node's position, computes how much it should get shifted given the
@ -91,7 +89,7 @@ impl PruneList {
prune_list.init_caches();
if !prune_list.bitmap.is_empty() {
debug!(LOGGER, "prune_list: bitmap {} pos ({} bytes), pruned_cache {} pos ({} bytes), shift_cache {}, leaf_shift_cache {}",
debug!("prune_list: bitmap {} pos ({} bytes), pruned_cache {} pos ({} bytes), shift_cache {}, leaf_shift_cache {}",
prune_list.bitmap.cardinality(),
prune_list.bitmap.get_serialized_size_in_bytes(),
prune_list.pruned_cache.cardinality(),

View file

@ -27,7 +27,6 @@ use libc::{ftruncate64, off64_t};
use core::core::hash::Hash;
use core::ser;
use util::LOGGER;
/// A no-op function for doing nothing with some pruned data.
pub fn prune_noop(_pruned_data: &[u8]) {}
@ -65,8 +64,8 @@ impl HashFile {
Ok(h) => Some(h),
Err(e) => {
error!(
LOGGER,
"Corrupted storage, could not read an entry from hash file: {:?}", e
"Corrupted storage, could not read an entry from hash file: {:?}",
e
);
return None;
}

View file

@ -13,9 +13,8 @@ lazy_static = "1"
rand = "0.5"
serde = "1"
serde_derive = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
slog-term = "~2.4"
slog-async = "~2.3"
log4rs = { version = "0.8.1", features = ["rolling_file_appender", "compound_policy", "size_trigger", "fixed_window_roller"] }
log = "0.4"
walkdir = "2"
zip = "0.4"
parking_lot = {version = "0.6"}

View file

@ -26,10 +26,8 @@ extern crate base64;
extern crate byteorder;
extern crate rand;
#[macro_use]
extern crate slog;
extern crate slog_async;
extern crate slog_term;
extern crate log;
extern crate log4rs;
#[macro_use]
extern crate lazy_static;
@ -48,7 +46,7 @@ pub extern crate secp256k1zkp as secp;
// Logging related
pub mod logger;
pub use logger::{init_logger, init_test_logger, LOGGER};
pub use logger::{init_logger, init_test_logger};
// Static secp instance
pub mod secp_static;

View file

@ -12,10 +12,6 @@
// limitations under the License.
//! Logging wrapper to be used throughout all crates in the workspace
use slog::{Discard, Drain, Duplicate, Level, LevelFilter, Logger};
use slog_async;
use slog_term;
use std::fs::OpenOptions;
use std::ops::Deref;
use Mutex;
@ -24,14 +20,27 @@ use std::{panic, thread};
use types::{LogLevel, LoggingConfig};
fn convert_log_level(in_level: &LogLevel) -> Level {
use log::{LevelFilter, Record};
use log4rs;
use log4rs::append::console::ConsoleAppender;
use log4rs::append::file::FileAppender;
use log4rs::append::rolling_file::{
policy::compound::roll::fixed_window::FixedWindowRoller,
policy::compound::trigger::size::SizeTrigger, policy::compound::CompoundPolicy,
RollingFileAppender,
};
use log4rs::append::Append;
use log4rs::config::{Appender, Config, Root};
use log4rs::encode::pattern::PatternEncoder;
use log4rs::filter::{threshold::ThresholdFilter, Filter, Response};
fn convert_log_level(in_level: &LogLevel) -> LevelFilter {
match *in_level {
LogLevel::Info => Level::Info,
LogLevel::Critical => Level::Critical,
LogLevel::Warning => Level::Warning,
LogLevel::Debug => Level::Debug,
LogLevel::Trace => Level::Trace,
LogLevel::Error => Level::Error,
LogLevel::Info => LevelFilter::Info,
LogLevel::Warning => LevelFilter::Warn,
LogLevel::Debug => LevelFilter::Debug,
LogLevel::Trace => LevelFilter::Trace,
LogLevel::Error => LevelFilter::Error,
}
}
@ -43,60 +52,115 @@ lazy_static! {
static ref TUI_RUNNING: Mutex<bool> = Mutex::new(false);
/// Static Logging configuration, should only be set once, before first logging call
static ref LOGGING_CONFIG: Mutex<LoggingConfig> = Mutex::new(LoggingConfig::default());
}
/// And a static reference to the logger itself, accessible from all crates
pub static ref LOGGER: Logger = {
let was_init = WAS_INIT.lock().clone();
let config = LOGGING_CONFIG.lock();
let slog_level_stdout = convert_log_level(&config.stdout_log_level);
let slog_level_file = convert_log_level(&config.file_log_level);
if config.tui_running.is_some() && config.tui_running.unwrap() {
let mut tui_running_ref = TUI_RUNNING.lock();
*tui_running_ref = true;
/// This filter is rejecting messages that doesn't start with "grin"
/// in order to save log space for only Grin-related records
#[derive(Debug)]
struct GrinFilter;
impl Filter for GrinFilter {
fn filter(&self, record: &Record) -> Response {
if let Some(module_path) = record.module_path() {
if module_path.starts_with("grin") {
return Response::Neutral;
}
}
//Terminal output drain
let terminal_decorator = slog_term::TermDecorator::new().build();
let terminal_drain = slog_term::FullFormat::new(terminal_decorator).build().fuse();
let terminal_drain = LevelFilter::new(terminal_drain, slog_level_stdout).fuse();
let mut terminal_drain = slog_async::Async::new(terminal_drain).build().fuse();
if !config.log_to_stdout || !was_init {
terminal_drain = slog_async::Async::new(Discard{}).build().fuse();
Response::Reject
}
if config.log_to_file && was_init {
//File drain
let file = OpenOptions::new()
.create(true)
.write(true)
.append(config.log_file_append)
.truncate(false)
.open(&config.log_file_path)
.unwrap();
let file_decorator = slog_term::PlainDecorator::new(file);
let file_drain = slog_term::FullFormat::new(file_decorator).build().fuse();
let file_drain = LevelFilter::new(file_drain, slog_level_file).fuse();
let file_drain_final = slog_async::Async::new(file_drain).build().fuse();
let composite_drain = Duplicate::new(terminal_drain, file_drain_final).fuse();
Logger::root(composite_drain, o!())
} else {
Logger::root(terminal_drain, o!())
}
};
}
/// Initialize the logger with the given configuration
pub fn init_logger(config: Option<LoggingConfig>) {
if let Some(c) = config {
let mut config_ref = LOGGING_CONFIG.lock();
*config_ref = c.clone();
let level_stdout = convert_log_level(&c.stdout_log_level);
let level_file = convert_log_level(&c.file_log_level);
let level_minimum;
// Determine minimum logging level for Root logger
if level_stdout > level_file {
level_minimum = level_stdout;
} else {
level_minimum = level_file;
}
// Start logger
let stdout = ConsoleAppender::builder()
.encoder(Box::new(PatternEncoder::default()))
.build();
let mut root = Root::builder();
let mut appenders = vec![];
if c.log_to_stdout {
let filter = Box::new(ThresholdFilter::new(level_stdout));
appenders.push(
Appender::builder()
.filter(filter)
.filter(Box::new(GrinFilter))
.build("stdout", Box::new(stdout)),
);
root = root.appender("stdout");
}
if c.log_to_file {
// If maximum log size is specified, use rolling file appender
// or use basic one otherwise
let filter = Box::new(ThresholdFilter::new(level_file));
let file: Box<Append> = {
if let Some(size) = c.log_max_size {
let roller = FixedWindowRoller::builder()
.build(&format!("{}.{{}}.gz", c.log_file_path), 32)
.unwrap();
let trigger = SizeTrigger::new(size);
let policy = CompoundPolicy::new(Box::new(trigger), Box::new(roller));
Box::new(
RollingFileAppender::builder()
.append(c.log_file_append)
.encoder(Box::new(PatternEncoder::new("{d} {l} {M} - {m}{n}")))
.build(c.log_file_path, Box::new(policy))
.unwrap(),
)
} else {
Box::new(
FileAppender::builder()
.append(c.log_file_append)
.encoder(Box::new(PatternEncoder::new("{d} {l} {M} - {m}{n}")))
.build(c.log_file_path)
.unwrap(),
)
}
};
appenders.push(
Appender::builder()
.filter(filter)
.filter(Box::new(GrinFilter))
.build("file", file),
);
root = root.appender("file");
}
let config = Config::builder()
.appenders(appenders)
.build(root.build(level_minimum))
.unwrap();
let _ = log4rs::init_config(config).unwrap();
info!(
"log4rs is initialized, file level: {:?}, stdout level: {:?}, min. level: {:?}",
level_file, level_stdout, level_minimum
);
// Logger configuration successfully injected into LOGGING_CONFIG...
let mut was_init_ref = WAS_INIT.lock();
*was_init_ref = true;
// .. allow logging, having ensured that paths etc are immutable
}
send_panic_to_log();
}
@ -134,7 +198,6 @@ fn send_panic_to_log() {
match info.location() {
Some(location) => {
error!(
LOGGER,
"\nthread '{}' panicked at '{}': {}:{}{:?}\n\n",
thread,
msg,
@ -143,10 +206,7 @@ fn send_panic_to_log() {
backtrace
);
}
None => error!(
LOGGER,
"thread '{}' panicked at '{}'{:?}", thread, msg, backtrace
),
None => error!("thread '{}' panicked at '{}'{:?}", thread, msg, backtrace),
}
//also print to stderr
let tui_running = TUI_RUNNING.lock().clone();

View file

@ -14,11 +14,9 @@
//! Logging configuration types
/// Log level types, as slog's don't implement serialize
/// Log level types
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)]
pub enum LogLevel {
/// Critical
Critical,
/// Error
Error,
/// Warning
@ -46,6 +44,8 @@ pub struct LoggingConfig {
pub log_file_path: String,
/// Whether to append to log or replace
pub log_file_append: bool,
/// Size of the log in bytes to rotate over (optional)
pub log_max_size: Option<u64>,
/// Whether the tui is running (optional)
pub tui_running: Option<bool>,
}
@ -59,6 +59,7 @@ impl Default for LoggingConfig {
file_log_level: LogLevel::Debug,
log_file_path: String::from("grin.log"),
log_file_append: true,
log_max_size: Some(1024 * 1024 * 16), // 16 megabytes default
tui_running: None,
}
}

View file

@ -17,7 +17,7 @@ rand = "0.5"
serde = "1"
serde_derive = "1"
serde_json = "1"
slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
log = "0.4"
term = "0.5"
tokio = "0.1.7"
tokio-core = "0.1"

View file

@ -26,8 +26,8 @@ use api;
use error::{Error, ErrorKind};
use libtx::slate::Slate;
use libwallet;
use util;
use util::secp::pedersen;
use util::{self, LOGGER};
#[derive(Clone)]
pub struct HTTPWalletClient {
@ -65,11 +65,11 @@ impl WalletClient for HTTPWalletClient {
match single_create_coinbase(&url, &block_fees) {
Err(e) => {
error!(
LOGGER,
"Failed to get coinbase from {}. Run grin wallet listen?", url
"Failed to get coinbase from {}. Run grin wallet listen?",
url
);
error!(LOGGER, "Underlying Error: {}", e.cause().unwrap());
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
error!("Underlying Error: {}", e.cause().unwrap());
error!("Backtrace: {}", e.backtrace().unwrap());
Err(libwallet::ErrorKind::ClientCallback(
"Failed to get coinbase",
))?
@ -85,11 +85,11 @@ impl WalletClient for HTTPWalletClient {
"dest formatted as {} but send -d expected stdout or http://IP:port",
dest
);
error!(LOGGER, "{}", err_str,);
error!("{}", err_str,);
Err(libwallet::ErrorKind::Uri)?
}
let url = format!("{}/v1/wallet/foreign/receive_tx", dest);
debug!(LOGGER, "Posting transaction slate to {}", url);
debug!("Posting transaction slate to {}", url);
let res = api::client::post(url.as_str(), None, slate).context(
libwallet::ErrorKind::ClientCallback("Posting transaction slate"),
@ -153,7 +153,7 @@ impl WalletClient for HTTPWalletClient {
let results = match rt.block_on(task) {
Ok(outputs) => outputs,
Err(e) => {
error!(LOGGER, "Outputs by id failed: {}", e);
error!("Outputs by id failed: {}", e);
return Err(libwallet::ErrorKind::ClientCallback("Error from server"))?;
}
};
@ -209,8 +209,8 @@ impl WalletClient for HTTPWalletClient {
Err(e) => {
// if we got anything other than 200 back from server, bye
error!(
LOGGER,
"get_outputs_by_pmmr_index: unable to contact API {}. Error: {}", addr, e
"get_outputs_by_pmmr_index: unable to contact API {}. Error: {}",
addr, e
);
Err(libwallet::ErrorKind::ClientCallback(
"unable to contact api",
@ -226,11 +226,11 @@ pub fn create_coinbase(dest: &str, block_fees: &BlockFees) -> Result<CbData, Err
match single_create_coinbase(&url, &block_fees) {
Err(e) => {
error!(
LOGGER,
"Failed to get coinbase from {}. Run grin wallet listen?", url
"Failed to get coinbase from {}. Run grin wallet listen?",
url
);
error!(LOGGER, "Underlying Error: {}", e.cause().unwrap());
error!(LOGGER, "Backtrace: {}", e.backtrace().unwrap());
error!("Underlying Error: {}", e.cause().unwrap());
error!("Backtrace: {}", e.backtrace().unwrap());
Err(e)?
}
Ok(res) => Ok(res),

View file

@ -27,8 +27,7 @@ use uuid::Uuid;
use failure::ResultExt;
use keychain::{self, Identifier, Keychain};
use util::secp::pedersen;
use util::LOGGER;
use util::secp::pedersen
use error::{Error, ErrorKind};
@ -168,11 +167,10 @@ where
// delete the lock file
if let Err(e) = fs::remove_dir(&self.lock_file_path) {
error!(
LOGGER,
"Could not remove wallet lock file. Maybe insufficient rights? {:?} ", e
);
}
info!(LOGGER, "... released wallet lock");
info!("... released wallet lock");
}
}
@ -224,7 +222,7 @@ where
/// Close wallet and remove any stored credentials (TBD)
fn close(&mut self) -> Result<(), libwallet::Error> {
debug!(LOGGER, "Closing wallet keychain");
debug!("Closing wallet keychain");
self.keychain = None;
Ok(())
}
@ -352,14 +350,14 @@ where
fn lock(&self) -> Result<(), libwallet::Error> {
// create directory if it doesn't exist
fs::create_dir_all(self.config.data_file_dir.clone()).unwrap_or_else(|why| {
info!(LOGGER, "! {:?}", why.kind());
info!("! {:?}", why.kind());
});
info!(LOGGER, "Acquiring wallet lock ...");
info!("Acquiring wallet lock ...");
let lock_file_path = self.lock_file_path.clone();
let action = || {
trace!(LOGGER, "making lock file for wallet lock");
trace!("making lock file for wallet lock");
fs::create_dir(&lock_file_path)
};
@ -377,7 +375,6 @@ where
Ok(_) => Ok(()),
Err(e) => {
error!(
LOGGER,
"Failed to acquire wallet lock file (multiple retries)",
);
Err(e.into())
@ -390,7 +387,7 @@ where
fn read_or_create_paths(&mut self) -> Result<(), Error> {
if !Path::new(&self.config.data_file_dir.clone()).exists() {
fs::create_dir_all(&self.config.data_file_dir.clone()).unwrap_or_else(|why| {
info!(LOGGER, "! {:?}", why.kind());
info!("! {:?}", why.kind());
});
}
if Path::new(&self.data_file_path.clone()).exists() {

View file

@ -24,7 +24,7 @@ extern crate serde;
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate term;
extern crate url;

View file

@ -30,7 +30,6 @@ use util::{kernel_sig_msg, secp};
use core::core::{Input, Output, OutputFeatures, Transaction, TxKernel};
use keychain::{self, BlindSum, BlindingFactor, Identifier, Keychain};
use libtx::{aggsig, proof};
use util::LOGGER;
/// Context information available to transaction combinators.
pub struct Context<'a, K: 'a>
@ -67,8 +66,8 @@ where
K: Keychain,
{
debug!(
LOGGER,
"Building input (spending regular output): {}, {}", value, key_id
"Building input (spending regular output): {}, {}",
value, key_id
);
build_input(value, OutputFeatures::DEFAULT_OUTPUT, key_id)
}
@ -78,10 +77,7 @@ pub fn coinbase_input<K>(value: u64, key_id: Identifier) -> Box<Append<K>>
where
K: Keychain,
{
debug!(
LOGGER,
"Building input (spending coinbase): {}, {}", value, key_id
);
debug!("Building input (spending coinbase): {}, {}", value, key_id);
build_input(value, OutputFeatures::COINBASE_OUTPUT, key_id)
}
@ -95,7 +91,7 @@ where
move |build, (tx, kern, sum)| -> (Transaction, TxKernel, BlindSum) {
let commit = build.keychain.commit(value, &key_id).unwrap();
debug!(LOGGER, "Building output: {}, {:?}", value, commit);
debug!("Building output: {}, {:?}", value, commit);
let rproof = proof::create(build.keychain, value, &key_id, commit, None).unwrap();

View file

@ -21,7 +21,7 @@ use core::core::KernelFeatures;
use core::core::{Output, OutputFeatures, TxKernel};
use libtx::error::Error;
use libtx::{aggsig, proof};
use util::{kernel_sig_msg, secp, static_secp_instance, LOGGER};
use util::{kernel_sig_msg, secp, static_secp_instance};
/// output a reward output
pub fn output<K>(
@ -36,7 +36,7 @@ where
let value = reward(fees);
let commit = keychain.commit(value, key_id)?;
trace!(LOGGER, "Block reward - Pedersen Commit is: {:?}", commit,);
trace!("Block reward - Pedersen Commit is: {:?}", commit,);
let rproof = proof::create(keychain, value, key_id, commit, None)?;

View file

@ -27,9 +27,9 @@ use keychain::{BlindSum, BlindingFactor, Keychain};
use libtx::error::{Error, ErrorKind};
use libtx::{aggsig, build, tx_fee};
use util::secp;
use util::secp::key::{PublicKey, SecretKey};
use util::secp::Signature;
use util::{secp, LOGGER};
/// Public data for each participant in the slate
@ -289,7 +289,7 @@ impl Slate {
amount_to_hr_string(fee, false),
amount_to_hr_string(self.amount + self.fee, false)
);
info!(LOGGER, "{}", reason);
info!("{}", reason);
return Err(ErrorKind::Fee(reason.to_string()))?;
}
@ -395,7 +395,7 @@ impl Slate {
final_tx.kernels_mut()[0].excess_sig = final_sig.clone();
// confirm the kernel verifies successfully before proceeding
debug!(LOGGER, "Validating final transaction");
debug!("Validating final transaction");
final_tx.kernels()[0].verify()?;
// confirm the overall transaction is valid (including the updated kernel)

View file

@ -36,8 +36,8 @@ use libwallet::types::{
WalletClient, WalletInfo,
};
use libwallet::{Error, ErrorKind};
use util;
use util::secp::pedersen;
use util::{self, LOGGER};
/// Wrapper around internal API functions, containing a reference to
/// the wallet/keychain that they're acting upon
@ -188,7 +188,6 @@ where
Ok(s) => s,
Err(e) => {
error!(
LOGGER,
"Communication with receiver failed on SenderInitiation send. Aborting transaction {:?}",
e,
);
@ -321,11 +320,10 @@ where
};
let res = client.post_tx(&TxWrapper { tx_hex: tx_hex }, fluff);
if let Err(e) = res {
error!(LOGGER, "api: post_tx: failed with error: {}", e);
error!("api: post_tx: failed with error: {}", e);
Err(e)
} else {
debug!(
LOGGER,
"api: post_tx: successfully posted tx: {}, fluff? {}",
slate.tx.hash(),
fluff
@ -351,14 +349,14 @@ where
};
if confirmed {
warn!(
LOGGER,
"api: dump_stored_tx: transaction at {} is already confirmed.", tx_id
"api: dump_stored_tx: transaction at {} is already confirmed.",
tx_id
);
}
if tx_hex.is_none() {
error!(
LOGGER,
"api: dump_stored_tx: completed transaction at {} does not exist.", tx_id
"api: dump_stored_tx: completed transaction at {} does not exist.",
tx_id
);
return Err(ErrorKind::TransactionBuildingNotCompleted(tx_id))?;
}
@ -386,15 +384,15 @@ where
};
if confirmed {
error!(
LOGGER,
"api: repost_tx: transaction at {} is confirmed. NOT resending.", tx_id
"api: repost_tx: transaction at {} is confirmed. NOT resending.",
tx_id
);
return Err(ErrorKind::TransactionAlreadyConfirmed)?;
}
if tx_hex.is_none() {
error!(
LOGGER,
"api: repost_tx: completed transaction at {} does not exist.", tx_id
"api: repost_tx: completed transaction at {} does not exist.",
tx_id
);
return Err(ErrorKind::TransactionBuildingNotCompleted(tx_id))?;
}
@ -406,12 +404,12 @@ where
fluff,
);
if let Err(e) = res {
error!(LOGGER, "api: repost_tx: failed with error: {}", e);
error!("api: repost_tx: failed with error: {}", e);
Err(e)
} else {
debug!(
LOGGER,
"api: repost_tx: successfully posted tx at: {}, fluff? {}", tx_id, fluff
"api: repost_tx: successfully posted tx at: {}, fluff? {}",
tx_id, fluff
);
Ok(())
}
@ -541,11 +539,10 @@ where
w.close()?;
if let Err(e) = res {
error!(LOGGER, "api: receive_tx: failed with error: {}", e);
error!("api: receive_tx: failed with error: {}", e);
Err(e)
} else {
debug!(
LOGGER,
"api: receive_tx: successfully received tx: {}",
slate.tx.hash()
);

View file

@ -37,7 +37,7 @@ use std::sync::Arc;
use url::form_urlencoded;
use util::secp::pedersen;
use util::Mutex;
use util::{to_base64, LOGGER};
use util::to_base64;
/// Instantiate wallet Owner API for a single-use (command line) call
/// Return a function containing a loaded API context to call
@ -95,7 +95,7 @@ where
.map_err(|_| ErrorKind::GenericError("Router failed to add route".to_string()))?;
let mut apis = ApiServer::new();
info!(LOGGER, "Starting HTTP Owner API server at {}.", addr);
info!("Starting HTTP Owner API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
let api_thread =
apis.start(socket_addr, router, tls_config)
@ -127,7 +127,7 @@ where
.map_err(|_| ErrorKind::GenericError("Router failed to add route".to_string()))?;
let mut apis = ApiServer::new();
info!(LOGGER, "Starting HTTP Foreign API server at {}.", addr);
info!("Starting HTTP Foreign API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
let api_thread =
apis.start(socket_addr, router, tls_config)
@ -226,12 +226,12 @@ where
Ok(id) => match api.dump_stored_tx(id, false, "") {
Ok(tx) => Ok(tx),
Err(e) => {
error!(LOGGER, "dump_stored_tx: failed with error: {}", e);
error!("dump_stored_tx: failed with error: {}", e);
Err(e)
}
},
Err(e) => {
error!(LOGGER, "dump_stored_tx: could not parse id: {}", e);
error!("dump_stored_tx: could not parse id: {}", e);
Err(ErrorKind::TransactionDumpError(
"dump_stored_tx: cannot dump transaction. Could not parse id in request.",
).into())
@ -307,7 +307,7 @@ where
args.selection_strategy_is_use_all,
)
} else {
error!(LOGGER, "unsupported payment method: {}", args.method);
error!("unsupported payment method: {}", args.method);
return Err(ErrorKind::ClientCallback("unsupported payment method"))?;
}
}))
@ -322,7 +322,7 @@ where
parse_body(req).and_then(move |mut slate| match api.finalize_tx(&mut slate) {
Ok(_) => ok(slate.clone()),
Err(e) => {
error!(LOGGER, "finalize_tx: failed with error: {}", e);
error!("finalize_tx: failed with error: {}", e);
err(e)
}
}),
@ -340,12 +340,12 @@ where
Ok(id) => match api.cancel_tx(id) {
Ok(_) => ok(()),
Err(e) => {
error!(LOGGER, "cancel_tx: failed with error: {}", e);
error!("cancel_tx: failed with error: {}", e);
err(e)
}
},
Err(e) => {
error!(LOGGER, "cancel_tx: could not parse id: {}", e);
error!("cancel_tx: could not parse id: {}", e);
err(ErrorKind::TransactionCancellationError(
"cancel_tx: cannot cancel transaction. Could not parse id in request.",
).into())
@ -443,7 +443,7 @@ where
match self.handle_get_request(&req) {
Ok(r) => Box::new(ok(r)),
Err(e) => {
error!(LOGGER, "Request Error: {:?}", e);
error!("Request Error: {:?}", e);
Box::new(ok(create_error_response(e)))
}
}
@ -454,7 +454,7 @@ where
self.handle_post_request(req)
.and_then(|r| ok(r))
.or_else(|e| {
error!(LOGGER, "Request Error: {:?}", e);
error!("Request Error: {:?}", e);
ok(create_error_response(e))
}),
)
@ -511,7 +511,7 @@ where
parse_body(req).and_then(move |mut slate| match api.receive_tx(&mut slate) {
Ok(_) => ok(slate.clone()),
Err(e) => {
error!(LOGGER, "receive_tx: failed with error: {}", e);
error!("receive_tx: failed with error: {}", e);
err(e)
}
}),
@ -548,7 +548,7 @@ where
{
fn post(&self, req: Request<Body>) -> ResponseFuture {
Box::new(self.handle_request(req).and_then(|r| ok(r)).or_else(|e| {
error!(LOGGER, "Request Error: {:?}", e);
error!("Request Error: {:?}", e);
ok(create_error_response(e))
}))
}

View file

@ -21,7 +21,6 @@ use libwallet::types::*;
use libwallet::Error;
use std::collections::HashMap;
use util::secp::{key::SecretKey, pedersen};
use util::LOGGER;
/// Utility struct for return values from below
struct OutputResult {
@ -55,7 +54,6 @@ where
let mut wallet_outputs: Vec<OutputResult> = Vec::new();
info!(
LOGGER,
"Scanning {} outputs in the current Grin utxo set",
outputs.len(),
);
@ -70,10 +68,7 @@ where
continue;
}
info!(
LOGGER,
"Output found: {:?}, amount: {:?}", commit, info.value
);
info!("Output found: {:?}, amount: {:?}", commit, info.value);
let lock_height = if *is_coinbase {
*height + global::coinbase_maturity()
@ -109,14 +104,11 @@ where
// Don't proceed if wallet_data has anything in it
let is_empty = wallet.iter().next().is_none();
if !is_empty {
error!(
LOGGER,
"Not restoring. Please back up and remove existing db directory first."
);
error!("Not restoring. Please back up and remove existing db directory first.");
return Ok(());
}
info!(LOGGER, "Starting restore.");
info!("Starting restore.");
let batch_size = 1000;
let mut start_index = 1;
@ -126,7 +118,6 @@ where
.client()
.get_outputs_by_pmmr_index(start_index, batch_size)?;
info!(
LOGGER,
"Retrieved {} outputs, up to index {}. (Highest index: {})",
outputs.len(),
highest_index,
@ -142,7 +133,6 @@ where
}
info!(
LOGGER,
"Identified {} wallet_outputs as belonging to this wallet",
result_vec.len(),
);

View file

@ -20,8 +20,6 @@ use libwallet::error::{Error, ErrorKind};
use libwallet::internal::keys;
use libwallet::types::*;
use util::LOGGER;
/// Initialize a transaction on the sender side, returns a corresponding
/// libwallet transaction slate with the appropriate inputs selected,
/// and saves the private wallet identifiers of our selected outputs
@ -356,14 +354,11 @@ where
let mut change_amounts_derivations = vec![];
if change == 0 {
debug!(
LOGGER,
"No change (sending exactly amount + fee), no change outputs to build"
);
debug!("No change (sending exactly amount + fee), no change outputs to build");
} else {
debug!(
LOGGER,
"Building change outputs: total change: {} ({} outputs)", change, num_change_outputs
"Building change outputs: total change: {} ({} outputs)",
change, num_change_outputs
);
let part_change = change / num_change_outputs as u64;
@ -442,7 +437,6 @@ where
// coins = the amount.
if let Some(outputs) = select_from(amount, false, eligible.clone()) {
debug!(
LOGGER,
"Extending maximum number of outputs. {} outputs selected.",
outputs.len()
);

View file

@ -25,7 +25,6 @@ use libtx::{build, tx_fee};
use libwallet::internal::{selection, updater};
use libwallet::types::{Context, TxLogEntryType, WalletBackend, WalletClient};
use libwallet::{Error, ErrorKind};
use util::LOGGER;
/// Receive a transaction, modifying the slate accordingly (which can then be
/// sent back to sender for posting)
@ -225,7 +224,7 @@ where
parent_key_id,
);
debug!(LOGGER, "selected some coins - {}", coins.len());
debug!("selected some coins - {}", coins.len());
let fee = tx_fee(coins.len(), 2, 1, None);
let num_change_outputs = 1;

View file

@ -30,8 +30,8 @@ use libwallet::types::{
BlockFees, CbData, OutputData, OutputStatus, TxLogEntry, TxLogEntryType, WalletBackend,
WalletClient, WalletInfo,
};
use util;
use util::secp::pedersen;
use util::{self, LOGGER};
/// Retrieve all of the outputs (doesn't attempt to update from node)
pub fn retrieve_outputs<T: ?Sized, C, K>(
@ -201,14 +201,10 @@ where
// these changes as the chain is syncing, incorrect or forking
if height < last_confirmed_height {
warn!(
LOGGER,
"Not updating outputs as the height of the node's chain \
is less than the last reported wallet update height."
);
warn!(
LOGGER,
"Please wait for sync on node to complete or fork to resolve and try again."
);
warn!("Please wait for sync on node to complete or fork to resolve and try again.");
return Ok(());
}
let mut batch = wallet.batch()?;
@ -274,7 +270,7 @@ where
C: WalletClient,
K: Keychain,
{
debug!(LOGGER, "Refreshing wallet outputs");
debug!("Refreshing wallet outputs");
// build a local map of wallet outputs keyed by commit
// and a list of outputs we want to query the node for
@ -423,7 +419,6 @@ where
}
debug!(
LOGGER,
"receive_coinbase: built candidate output - {:?}, {}",
key_id.clone(),
key_id,
@ -432,7 +427,7 @@ where
let mut block_fees = block_fees.clone();
block_fees.key_id = Some(key_id.clone());
debug!(LOGGER, "receive_coinbase: {:?}", block_fees);
debug!("receive_coinbase: {:?}", block_fees);
let (out, kern) = reward::output(
wallet.keychain(),

View file

@ -26,7 +26,6 @@ use error::{Error, ErrorKind};
use failure::ResultExt;
use keychain::Keychain;
use util;
use util::LOGGER;
pub const SEED_FILE: &'static str = "wallet.seed";
@ -118,7 +117,7 @@ impl WalletSeed {
wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE,
);
debug!(LOGGER, "Generating wallet seed file at: {}", seed_file_path);
debug!("Generating wallet seed file at: {}", seed_file_path);
if Path::new(seed_file_path).exists() {
Err(ErrorKind::WalletSeedExists)?
@ -140,7 +139,7 @@ impl WalletSeed {
wallet_config.data_file_dir, MAIN_SEPARATOR, SEED_FILE,
);
debug!(LOGGER, "Using wallet seed file at: {}", seed_file_path,);
debug!("Using wallet seed file at: {}", seed_file_path,);
if Path::new(seed_file_path).exists() {
let mut file = File::open(seed_file_path).context(ErrorKind::IO)?;
@ -150,7 +149,6 @@ impl WalletSeed {
Ok(wallet_seed)
} else {
error!(
LOGGER,
"wallet seed file {} could not be opened (grin wallet init). \
Run \"grin wallet init\" to initialize a new wallet.",
seed_file_path

View file

@ -20,7 +20,7 @@ extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate rand;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate serde;
extern crate uuid;
@ -35,7 +35,6 @@ use std::time::Duration;
use core::global;
use core::global::ChainTypes;
use keychain::{ExtKeychain, Keychain};
use util::LOGGER;
use wallet::libwallet;
fn clean_output_dir(test_dir: &str) {
@ -69,7 +68,7 @@ fn accounts_test_impl(test_dir: &str) -> Result<(), libwallet::Error> {
// Set the wallet proxy listener running
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e);
error!("Wallet Proxy error: {}", e);
}
});

View file

@ -28,8 +28,8 @@ use util::{Mutex, RwLock};
use common::api;
use common::serde_json;
use store;
use util;
use util::secp::pedersen::Commitment;
use util::{self, LOGGER};
use common::failure::ResultExt;
@ -146,7 +146,7 @@ where
thread::sleep(Duration::from_millis(10));
// read queue
let m = self.rx.recv().unwrap();
trace!(LOGGER, "Wallet Client Proxy Received: {:?}", m);
trace!("Wallet Client Proxy Received: {:?}", m);
let resp = match m.method.as_ref() {
"get_chain_height" => self.get_chain_height(m)?,
"get_outputs_from_node" => self.get_outputs_from_node(m)?,
@ -345,7 +345,7 @@ impl WalletClient for LocalWalletClient {
}
let r = self.rx.lock();
let m = r.recv().unwrap();
trace!(LOGGER, "Received send_tx_slate response: {:?}", m.clone());
trace!("Received send_tx_slate response: {:?}", m.clone());
Ok(
serde_json::from_str(&m.body).context(libwallet::ErrorKind::ClientCallback(
"Parsing send_tx_slate response",
@ -369,7 +369,7 @@ impl WalletClient for LocalWalletClient {
}
let r = self.rx.lock();
let m = r.recv().unwrap();
trace!(LOGGER, "Received post_tx response: {:?}", m.clone());
trace!("Received post_tx response: {:?}", m.clone());
Ok(())
}
@ -389,11 +389,7 @@ impl WalletClient for LocalWalletClient {
}
let r = self.rx.lock();
let m = r.recv().unwrap();
trace!(
LOGGER,
"Received get_chain_height response: {:?}",
m.clone()
);
trace!("Received get_chain_height response: {:?}", m.clone());
Ok(m.body
.parse::<u64>()
.context(libwallet::ErrorKind::ClientCallback(

View file

@ -20,7 +20,7 @@ extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate rand;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate serde;
extern crate uuid;
@ -35,7 +35,6 @@ use std::time::Duration;
use core::global;
use core::global::ChainTypes;
use keychain::{ExtKeychain, Identifier, Keychain};
use util::LOGGER;
use wallet::libtx::slate::Slate;
use wallet::libwallet;
use wallet::libwallet::types::AcctPathMapping;
@ -67,7 +66,7 @@ fn restore_wallet(base_dir: &str, wallet_dir: &str) -> Result<(), libwallet::Err
// Set the wallet proxy listener running
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e);
error!("Wallet Proxy error: {}", e);
}
});
@ -121,7 +120,7 @@ fn compare_wallet_restore(
// Set the wallet proxy listener running
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e);
error!("Wallet Proxy error: {}", e);
}
});
@ -218,7 +217,7 @@ fn setup_restore(test_dir: &str) -> Result<(), libwallet::Error> {
// Set the wallet proxy listener running
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e);
error!("Wallet Proxy error: {}", e);
}
});

View file

@ -20,7 +20,7 @@ extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate rand;
#[macro_use]
extern crate slog;
extern crate log;
extern crate chrono;
extern crate serde;
extern crate uuid;
@ -35,7 +35,6 @@ use std::time::Duration;
use core::global;
use core::global::ChainTypes;
use keychain::ExtKeychain;
use util::LOGGER;
use wallet::libtx::slate::Slate;
use wallet::libwallet;
use wallet::libwallet::types::OutputStatus;
@ -73,7 +72,7 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
// Set the wallet proxy listener running
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e);
error!("Wallet Proxy error: {}", e);
}
});
@ -87,10 +86,8 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
wallet::controller::owner_single_use(wallet1.clone(), |api| {
let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?;
debug!(
LOGGER,
"Wallet 1 Info Pre-Transaction, after {} blocks: {:?}",
wallet1_info.last_confirmed_height,
wallet1_info
wallet1_info.last_confirmed_height, wallet1_info
);
assert!(wallet1_refreshed);
assert_eq!(
@ -166,10 +163,8 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
wallet::controller::owner_single_use(wallet1.clone(), |api| {
let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?;
debug!(
LOGGER,
"Wallet 1 Info Post Transaction, after {} blocks: {:?}",
wallet1_info.last_confirmed_height,
wallet1_info
wallet1_info.last_confirmed_height, wallet1_info
);
let fee = wallet::libtx::tx_fee(
wallet1_info.last_confirmed_height as usize - 1 - cm as usize,
@ -207,7 +202,7 @@ fn basic_transaction_api(test_dir: &str) -> Result<(), libwallet::Error> {
// refresh wallets and retrieve info/tests for each wallet after maturity
wallet::controller::owner_single_use(wallet1.clone(), |api| {
let (wallet1_refreshed, wallet1_info) = api.retrieve_summary_info(true)?;
debug!(LOGGER, "Wallet 1 Info: {:?}", wallet1_info);
debug!("Wallet 1 Info: {:?}", wallet1_info);
assert!(wallet1_refreshed);
assert_eq!(
wallet1_info.total,
@ -318,7 +313,7 @@ fn tx_rollback(test_dir: &str) -> Result<(), libwallet::Error> {
// Set the wallet proxy listener running
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!(LOGGER, "Wallet Proxy error: {}", e);
error!("Wallet Proxy error: {}", e);
}
});