diff --git a/Cargo.toml b/Cargo.toml
index 98a50ecc3..e28069eab 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -24,7 +24,7 @@ humansize = "1.1.0"
daemonize = "0.3"
serde = "1"
serde_json = "1"
-slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
+log = "0.4"
term = "0.5"
grin_api = { path = "./api" }
diff --git a/api/Cargo.toml b/api/Cargo.toml
index a944eb7a1..10cbadce0 100644
--- a/api/Cargo.toml
+++ b/api/Cargo.toml
@@ -15,7 +15,7 @@ ring = "0.13"
serde = "1"
serde_derive = "1"
serde_json = "1"
-slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
+log = "0.4"
tokio = "0.1.7"
tokio-core = "0.1.17"
tokio-tcp = "0.1"
diff --git a/api/src/handlers.rs b/api/src/handlers.rs
index 8d4c08dfb..d684d4262 100644
--- a/api/src/handlers.rs
+++ b/api/src/handlers.rs
@@ -37,7 +37,6 @@ use types::*;
use url::form_urlencoded;
use util;
use util::secp::pedersen::Commitment;
-use util::LOGGER;
use web::*;
// All handlers use `Weak` references instead of `Arc` to avoid cycles that
@@ -206,12 +205,8 @@ impl OutputHandler {
}
debug!(
- LOGGER,
"outputs_block_batch: {}-{}, {:?}, {:?}",
- start_height,
- end_height,
- commitments,
- include_rp,
+ start_height, end_height, commitments, include_rp,
);
let mut return_vec = vec![];
@@ -745,7 +740,6 @@ impl PoolPushHandler {
identifier: "?.?.?.?".to_string(),
};
info!(
- LOGGER,
"Pushing transaction {} to pool (inputs: {}, outputs: {}, kernels: {})",
tx.hash(),
tx.inputs().len(),
@@ -759,7 +753,7 @@ impl PoolPushHandler {
tx_pool
.add_to_pool(source, tx, !fluff, &header)
.map_err(|e| {
- error!(LOGGER, "update_pool: failed with error: {:?}", e);
+ error!("update_pool: failed with error: {:?}", e);
ErrorKind::Internal(format!("Failed to update pool: {:?}", e)).into()
})
}),
@@ -808,7 +802,7 @@ pub fn start_rest_apis(
router.add_middleware(basic_auth_middleware);
}
- info!(LOGGER, "Starting HTTP API server at {}.", addr);
+ info!("Starting HTTP API server at {}.", addr);
let socket_addr: SocketAddr = addr.parse().expect("unable to parse socket address");
apis.start(socket_addr, router, tls_config).is_ok()
}
diff --git a/api/src/lib.rs b/api/src/lib.rs
index 5df900aaf..cde0399ef 100644
--- a/api/src/lib.rs
+++ b/api/src/lib.rs
@@ -33,7 +33,7 @@ extern crate serde;
extern crate serde_derive;
extern crate serde_json;
#[macro_use]
-extern crate slog;
+extern crate log;
extern crate futures;
extern crate http;
extern crate hyper_rustls;
diff --git a/api/src/rest.rs b/api/src/rest.rs
index f7738d0a5..54e7d2446 100644
--- a/api/src/rest.rs
+++ b/api/src/rest.rs
@@ -33,7 +33,6 @@ use std::sync::Arc;
use std::{io, thread};
use tokio_rustls::ServerConfigExt;
use tokio_tcp;
-use util::LOGGER;
/// Errors that can be returned by an ApiEndpoint implementation.
#[derive(Debug)]
@@ -243,13 +242,10 @@ impl ApiServer {
// TODO re-enable stop after investigation
//let tx = mem::replace(&mut self.shutdown_sender, None).unwrap();
//tx.send(()).expect("Failed to stop API server");
- info!(LOGGER, "API server has been stoped");
+ info!("API server has been stoped");
true
} else {
- error!(
- LOGGER,
- "Can't stop API server, it's not running or doesn't spport stop operation"
- );
+ error!("Can't stop API server, it's not running or doesn't spport stop operation");
false
}
}
@@ -263,7 +259,7 @@ impl Handler for LoggingMiddleware {
req: Request
,
mut handlers: Box>,
) -> ResponseFuture {
- debug!(LOGGER, "REST call: {} {}", req.method(), req.uri().path());
+ debug!("REST call: {} {}", req.method(), req.uri().path());
handlers.next().unwrap().call(req, handlers)
}
}
diff --git a/chain/Cargo.toml b/chain/Cargo.toml
index c59bc55cd..ff9456ed8 100644
--- a/chain/Cargo.toml
+++ b/chain/Cargo.toml
@@ -12,7 +12,7 @@ lmdb-zero = "0.4.4"
failure = "0.1"
failure_derive = "0.1"
croaring = "0.3"
-slog = { version = "~2.3", features = ["max_level_trace", "release_max_level_trace"] }
+log = "0.4"
serde = "1"
serde_derive = "1"
chrono = "0.4.4"
diff --git a/chain/src/chain.rs b/chain/src/chain.rs
index e2a144e4d..1f637afc3 100644
--- a/chain/src/chain.rs
+++ b/chain/src/chain.rs
@@ -38,7 +38,6 @@ use store;
use txhashset;
use types::{ChainAdapter, NoStatus, Options, Tip, TxHashSetRoots, TxHashsetWriteStatus};
use util::secp::pedersen::{Commitment, RangeProof};
-use util::LOGGER;
/// Orphan pool size is limited by MAX_ORPHAN_SIZE
pub const MAX_ORPHAN_SIZE: usize = 200;
@@ -184,7 +183,6 @@ impl Chain {
let head = store.head()?;
debug!(
- LOGGER,
"Chain init: {} @ {} [{}]",
head.total_difficulty.to_num(),
head.height,
@@ -261,7 +259,6 @@ impl Chain {
&self.orphans.add(orphan);
debug!(
- LOGGER,
"process_block: orphan: {:?}, # orphans {}{}",
block_hash,
self.orphans.len(),
@@ -275,7 +272,6 @@ impl Chain {
}
ErrorKind::Unfit(ref msg) => {
debug!(
- LOGGER,
"Block {} at {} is unfit at this time: {}",
b.hash(),
b.header.height,
@@ -285,7 +281,6 @@ impl Chain {
}
_ => {
info!(
- LOGGER,
"Rejected block {} at {}: {:?}",
b.hash(),
b.header.height,
@@ -360,7 +355,6 @@ impl Chain {
// Is there an orphan in our orphans that we can now process?
loop {
trace!(
- LOGGER,
"check_orphans: at {}, # orphans {}",
height,
self.orphans.len(),
@@ -373,7 +367,6 @@ impl Chain {
let orphans_len = orphans.len();
for (i, orphan) in orphans.into_iter().enumerate() {
debug!(
- LOGGER,
"check_orphans: get block {} at {}{}",
orphan.block.hash(),
height,
@@ -402,7 +395,6 @@ impl Chain {
if initial_height != height {
debug!(
- LOGGER,
"check_orphans: {} blocks accepted since height {}, remaining # orphans {}",
height - initial_height,
initial_height,
@@ -589,7 +581,6 @@ impl Chain {
txhashset: &txhashset::TxHashSet,
) -> Result<(), Error> {
debug!(
- LOGGER,
"chain: validate_kernel_history: rewinding and validating kernel history (readonly)"
);
@@ -606,8 +597,8 @@ impl Chain {
})?;
debug!(
- LOGGER,
- "chain: validate_kernel_history: validated kernel root on {} headers", count,
+ "chain: validate_kernel_history: validated kernel root on {} headers",
+ count,
);
Ok(())
@@ -682,10 +673,7 @@ impl Chain {
self.validate_kernel_history(&header, &txhashset)?;
// all good, prepare a new batch and update all the required records
- debug!(
- LOGGER,
- "chain: txhashset_write: rewinding a 2nd time (writeable)"
- );
+ debug!("chain: txhashset_write: rewinding a 2nd time (writeable)");
let mut batch = self.store.batch()?;
@@ -709,10 +697,7 @@ impl Chain {
Ok(())
})?;
- debug!(
- LOGGER,
- "chain: txhashset_write: finished validating and rebuilding"
- );
+ debug!("chain: txhashset_write: finished validating and rebuilding");
status.on_save();
@@ -727,10 +712,7 @@ impl Chain {
// Commit all the changes to the db.
batch.commit()?;
- debug!(
- LOGGER,
- "chain: txhashset_write: finished committing the batch (head etc.)"
- );
+ debug!("chain: txhashset_write: finished committing the batch (head etc.)");
// Replace the chain txhashset with the newly built one.
{
@@ -738,10 +720,7 @@ impl Chain {
*txhashset_ref = txhashset;
}
- debug!(
- LOGGER,
- "chain: txhashset_write: replaced our txhashset with the new one"
- );
+ debug!("chain: txhashset_write: replaced our txhashset with the new one");
// Check for any orphan blocks and process them based on the new chain state.
self.check_orphans(header.height + 1);
@@ -763,14 +742,11 @@ impl Chain {
/// therefore be called judiciously.
pub fn compact(&self) -> Result<(), Error> {
if self.archive_mode {
- debug!(
- LOGGER,
- "Blockchain compaction disabled, node running in archive mode."
- );
+ debug!("Blockchain compaction disabled, node running in archive mode.");
return Ok(());
}
- debug!(LOGGER, "Starting blockchain compaction.");
+ debug!("Starting blockchain compaction.");
// Compact the txhashset via the extension.
{
let mut txhashset = self.txhashset.write();
@@ -785,7 +761,7 @@ impl Chain {
// Now check we can still successfully validate the chain state after
// compacting, shouldn't be necessary once all of this is well-oiled
- debug!(LOGGER, "Validating state after compaction.");
+ debug!("Validating state after compaction.");
self.validate(true)?;
// we need to be careful here in testing as 20 blocks is not that long
@@ -798,7 +774,6 @@ impl Chain {
}
debug!(
- LOGGER,
"Compaction remove blocks older than {}.",
head.height - horizon
);
@@ -831,7 +806,7 @@ impl Chain {
}
}
batch.commit()?;
- debug!(LOGGER, "Compaction removed {} blocks, done.", count);
+ debug!("Compaction removed {} blocks, done.", count);
Ok(())
}
@@ -1052,7 +1027,6 @@ fn setup_head(
if header.height > 0 && extension.batch.get_block_sums(&header.hash()).is_err()
{
debug!(
- LOGGER,
"chain: init: building (missing) block sums for {} @ {}",
header.height,
header.hash()
@@ -1073,7 +1047,6 @@ fn setup_head(
}
debug!(
- LOGGER,
"chain: init: rewinding and validating before we start... {} at {}",
header.hash(),
header.height,
@@ -1110,7 +1083,7 @@ fn setup_head(
// Save the block_sums to the db for use later.
batch.save_block_sums(&genesis.hash(), &BlockSums::default())?;
- info!(LOGGER, "chain: init: saved genesis: {:?}", genesis.hash());
+ info!("chain: init: saved genesis: {:?}", genesis.hash());
}
Err(e) => return Err(ErrorKind::StoreErr(e, "chain init load head".to_owned()))?,
};
diff --git a/chain/src/lib.rs b/chain/src/lib.rs
index 83fc8faa9..82db548fb 100644
--- a/chain/src/lib.rs
+++ b/chain/src/lib.rs
@@ -30,7 +30,7 @@ extern crate serde;
#[macro_use]
extern crate serde_derive;
#[macro_use]
-extern crate slog;
+extern crate log;
extern crate chrono;
extern crate failure;
#[macro_use]
diff --git a/chain/src/pipe.rs b/chain/src/pipe.rs
index 82e1e143f..1eae38b53 100644
--- a/chain/src/pipe.rs
+++ b/chain/src/pipe.rs
@@ -35,7 +35,6 @@ use grin_store;
use store;
use txhashset;
use types::{Options, Tip};
-use util::LOGGER;
/// Contextual information required to process a new block and either reject or
/// accept it.
@@ -71,7 +70,6 @@ pub fn process_block(b: &Block, ctx: &mut BlockContext) -> Result