slog-rs logging (#171)

* added global slog instance, changed all logging macro formats to include logger instance
* adding configuration to logging, allowing for multiple log outputs
* updates to test, changes to build docs
* rustfmt
* moving logging functions into util crate
This commit is contained in:
Yeastplume 2017-10-12 17:56:44 +01:00 committed by Ignotus Peverell
parent b85006ebe5
commit 8e382a7593
62 changed files with 973 additions and 1006 deletions

View file

@ -15,15 +15,15 @@ grin_grin = { path = "./grin" }
grin_config = { path = "./config" }
grin_core = { path = "./core" }
grin_pow = { path = "./pow"}
grin_util = { path = "./util"}
secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" }
blake2-rfc = "~0.2.17"
clap = "^2.23.3"
daemonize = "^0.2.3"
env_logger="^0.3.5"
log = "^0.3"
serde = "~1.0.8"
serde_derive = "~1.0.8"
serde_json = "~1.0.2"
slog = "^2.0.12"
# TODO - once "patch" is available we should be able to clean up the workspace dependencies
# [patch.crate-io]

View file

@ -12,8 +12,8 @@ grin_store = { path = "../store" }
grin_util = { path = "../util" }
secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" }
hyper = "~0.10.6"
slog = "^2.0.12"
iron = "~0.5.1"
log = "~0.3"
router = "~0.5.1"
serde = "~1.0.8"
serde_derive = "~1.0.8"

View file

@ -26,14 +26,12 @@ use rest::Error;
/// returns a JSON object. Handles request building, JSON deserialization and
/// response code checking.
pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
where
for<'de> T: Deserialize<'de>,
where for<'de> T: Deserialize<'de>
{
let client = hyper::Client::new();
let res = check_error(client.get(url).send())?;
serde_json::from_reader(res).map_err(|e| {
Error::Internal(format!("Server returned invalid JSON: {}", e))
})
serde_json::from_reader(res)
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e)))
}
/// Helper function to easily issue a HTTP POST request with the provided JSON
@ -41,18 +39,15 @@ where
/// building, JSON serialization and deserialization, and response code
/// checking.
pub fn post<'a, IN, OUT>(url: &'a str, input: &IN) -> Result<OUT, Error>
where
IN: Serialize,
for<'de> OUT: Deserialize<'de>,
where IN: Serialize,
for<'de> OUT: Deserialize<'de>
{
let in_json = serde_json::to_string(input).map_err(|e| {
Error::Internal(format!("Could not serialize data to JSON: {}", e))
})?;
let in_json = serde_json::to_string(input)
.map_err(|e| Error::Internal(format!("Could not serialize data to JSON: {}", e)))?;
let client = hyper::Client::new();
let res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?;
serde_json::from_reader(res).map_err(|e| {
Error::Internal(format!("Server returned invalid JSON: {}", e))
})
serde_json::from_reader(res)
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e)))
}
// convert hyper error and check for non success response codes

View file

@ -24,6 +24,7 @@ use rest::*;
use types::*;
use secp::pedersen::Commitment;
use util;
use util::LOGGER;
/// ApiEndpoint implementation for the blockchain. Exposes the current chain
/// state as a simple JSON object.
@ -69,7 +70,7 @@ impl ApiEndpoint for OutputApi {
}
fn get(&self, id: String) -> ApiResult<Output> {
debug!("GET output {}", id);
debug!(LOGGER, "GET output {}", id);
let c = util::from_hex(id.clone()).map_err(|_| {
Error::Argument(format!("Not a valid commitment: {}", id))
})?;
@ -130,6 +131,7 @@ where
identifier: "?.?.?.?".to_string(),
};
info!(
LOGGER,
"Pushing transaction with {} inputs and {} outputs to pool.",
tx.inputs.len(),
tx.outputs.len()
@ -172,7 +174,7 @@ pub fn start_rest_apis<T>(
apis.register_endpoint("/pool".to_string(), PoolApi { tx_pool: tx_pool });
apis.start(&addr[..]).unwrap_or_else(|e| {
error!("Failed to start API HTTP server: {}.", e);
error!(LOGGER, "Failed to start API HTTP server: {}.", e);
});
});
}

View file

@ -21,7 +21,7 @@ extern crate secp256k1zkp as secp;
extern crate hyper;
#[macro_use]
extern crate log;
extern crate slog;
extern crate iron;
extern crate router;
extern crate serde;

View file

@ -36,6 +36,7 @@ use serde::de::DeserializeOwned;
use serde_json;
use store;
use util::LOGGER;
/// Errors that can be returned by an ApiEndpoint implementation.
#[derive(Debug)]
@ -171,13 +172,13 @@ impl<E> Handler for ApiWrapper<E>
Method::Get => {
let res = self.0.get(extract_param(req, "id")?)?;
let res_json = serde_json::to_string(&res)
.map_err(|e| IronError::new(e, status::InternalServerError))?;
.map_err(|e| IronError::new(e, status::InternalServerError))?;
Ok(Response::with((status::Ok, res_json)))
}
Method::Put => {
let id = extract_param(req, "id")?;
let t: E::T = serde_json::from_reader(req.body.by_ref())
.map_err(|e| IronError::new(e, status::BadRequest))?;
.map_err(|e| IronError::new(e, status::BadRequest))?;
self.0.update(id, t)?;
Ok(Response::with(status::NoContent))
}
@ -188,7 +189,7 @@ impl<E> Handler for ApiWrapper<E>
}
Method::Post => {
let t: E::T = serde_json::from_reader(req.body.by_ref())
.map_err(|e| IronError::new(e, status::BadRequest))?;
.map_err(|e| IronError::new(e, status::BadRequest))?;
let id = self.0.create(t)?;
Ok(Response::with((status::Created, id.to_string())))
}
@ -203,8 +204,7 @@ struct OpWrapper<E> {
}
impl<E> Handler for OpWrapper<E>
where
E: ApiEndpoint,
where E: ApiEndpoint
{
fn handle(&self, req: &mut Request) -> IronResult<Response> {
let t: E::OP_IN = serde_json::from_reader(req.body.by_ref()).map_err(|e| {
@ -219,7 +219,7 @@ where
Ok(Response::with((status::Ok, res_json)))
}
Err(e) => {
error!("API operation: {:?}", e);
error!(LOGGER, "API operation: {:?}", e);
Err(IronError::from(e))
}
}
@ -227,9 +227,8 @@ where
}
fn extract_param<ID>(req: &mut Request, param: &'static str) -> IronResult<ID>
where
ID: ToString + FromStr,
<ID as FromStr>::Err: Debug + Send + error::Error + 'static,
where ID: ToString + FromStr,
<ID as FromStr>::Err: Debug + Send + error::Error + 'static
{
let id = req.extensions
@ -237,9 +236,8 @@ where
.unwrap()
.find(param)
.unwrap_or("");
id.parse::<ID>().map_err(
|e| IronError::new(e, status::BadRequest),
)
id.parse::<ID>()
.map_err(|e| IronError::new(e, status::BadRequest))
}
/// HTTP server allowing the registration of ApiEndpoint implementations.
@ -279,9 +277,8 @@ impl ApiServer {
/// Register a new API endpoint, providing a relative URL for the new
/// endpoint.
pub fn register_endpoint<E>(&mut self, subpath: String, endpoint: E)
where
E: ApiEndpoint,
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error,
where E: ApiEndpoint,
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error
{
assert_eq!(subpath.chars().nth(0).unwrap(), '/');
@ -299,13 +296,9 @@ impl ApiServer {
endpoint: endpoint.clone(),
};
let full_path = format!("{}/{}", root.clone(), op_s.clone());
self.router.route(
op.to_method(),
full_path.clone(),
wrapper,
route_name,
);
info!("route: POST {}", full_path);
self.router
.route(op.to_method(), full_path.clone(), wrapper, route_name);
info!(LOGGER, "route: POST {}", full_path);
} else {
// regular REST operations
@ -317,21 +310,18 @@ impl ApiServer {
_ => panic!("unreachable"),
};
let wrapper = ApiWrapper(endpoint.clone());
self.router.route(
op.to_method(),
full_path.clone(),
wrapper,
route_name,
);
info!("route: {} {}", op.to_method(), full_path);
self.router
.route(op.to_method(), full_path.clone(), wrapper, route_name);
info!(LOGGER, "route: {} {}", op.to_method(), full_path);
}
}
// support for the HTTP Options method by differentiating what's on the
// root resource vs the id resource
let (root_opts, sub_opts) = endpoint.operations().iter().fold(
(vec![], vec![]),
|mut acc, op| {
let (root_opts, sub_opts) = endpoint
.operations()
.iter()
.fold((vec![], vec![]), |mut acc, op| {
let m = op.to_method();
if m == Method::Post {
acc.0.push(m);
@ -339,23 +329,18 @@ impl ApiServer {
acc.1.push(m);
}
acc
},
);
});
self.router.options(
root.clone(),
move |_: &mut Request| {
Ok(Response::with(
(status::Ok, Header(headers::Allow(root_opts.clone()))),
))
Ok(Response::with((status::Ok, Header(headers::Allow(root_opts.clone())))))
},
"option_".to_string() + route_postfix,
);
self.router.options(
root.clone() + "/:id",
move |_: &mut Request| {
Ok(Response::with(
(status::Ok, Header(headers::Allow(sub_opts.clone()))),
))
Ok(Response::with((status::Ok, Header(headers::Allow(sub_opts.clone())))))
},
"option_id_".to_string() + route_postfix,
);

View file

@ -58,10 +58,7 @@ impl Output {
pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> Output {
let (output_type, lock_height) = match output.features {
x if x.contains(core::transaction::COINBASE_OUTPUT) => {
(
OutputType::Coinbase,
block_header.height + global::coinbase_maturity(),
)
(OutputType::Coinbase, block_header.height + global::coinbase_maturity())
}
_ => (OutputType::Transaction, 0),
};

View file

@ -7,13 +7,14 @@ workspace = ".."
[dependencies]
bitflags = "^0.7.0"
byteorder = "^0.5"
log = "^0.3"
slog = "^2.0.12"
serde = "~1.0.8"
serde_derive = "~1.0.8"
time = "^0.1"
grin_core = { path = "../core" }
grin_keychain = { path = "../keychain" }
grin_util = { path = "../util" }
grin_store = { path = "../store" }
secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" }

View file

@ -28,6 +28,7 @@ use pipe;
use store;
use sumtree;
use types::*;
use util::LOGGER;
use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
@ -69,12 +70,11 @@ impl Chain {
/// on the current chain head to make sure it exists and creates one based
/// on
/// the genesis block if necessary.
pub fn init(
db_root: String,
adapter: Arc<ChainAdapter>,
gen_block: Option<Block>,
pow_verifier: fn(&BlockHeader, u32) -> bool,
) -> Result<Chain, Error> {
pub fn init(db_root: String,
adapter: Arc<ChainAdapter>,
gen_block: Option<Block>,
pow_verifier: fn(&BlockHeader, u32) -> bool)
-> Result<Chain, Error> {
let chain_store = store::ChainKVStore::new(db_root.clone())?;
// check if we have a head in store, otherwise the genesis block is it
@ -92,7 +92,7 @@ impl Chain {
// saving a new tip based on genesis
let tip = Tip::new(gen.hash());
chain_store.save_head(&tip)?;
info!("Saved genesis block with hash {}", gen.hash());
info!(LOGGER, "Saved genesis block with hash {}", gen.hash());
tip
}
Err(e) => return Err(Error::StoreErr(e)),
@ -138,6 +138,7 @@ impl Chain {
}
Err(ref e) => {
info!(
LOGGER,
"Rejected block {} at {} : {:?}",
b.hash(),
b.header.height,
@ -151,11 +152,10 @@ impl Chain {
/// Attempt to add a new header to the header chain. Only necessary during
/// sync.
pub fn process_block_header(
&self,
bh: &BlockHeader,
opts: Options,
) -> Result<Option<Tip>, Error> {
pub fn process_block_header(&self,
bh: &BlockHeader,
opts: Options)
-> Result<Option<Tip>, Error> {
let head = self.store.get_header_head().map_err(&Error::StoreErr)?;
let ctx = self.ctx_from_head(head, opts);
@ -213,9 +213,9 @@ impl Chain {
let sumtrees = self.sumtrees.read().unwrap();
let is_unspent = sumtrees.is_unspent(output_ref)?;
if is_unspent {
self.store.get_output_by_commit(output_ref).map_err(
&Error::StoreErr,
)
self.store
.get_output_by_commit(output_ref)
.map_err(&Error::StoreErr)
} else {
Err(Error::OutputNotFound)
}
@ -266,16 +266,15 @@ impl Chain {
/// Gets the block header at the provided height
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
self.store.get_header_by_height(height).map_err(
&Error::StoreErr,
)
self.store
.get_header_by_height(height)
.map_err(&Error::StoreErr)
}
/// Gets the block header by the provided output commitment
pub fn get_block_header_by_output_commit(
&self,
commit: &Commitment,
) -> Result<BlockHeader, Error> {
pub fn get_block_header_by_output_commit(&self,
commit: &Commitment)
-> Result<BlockHeader, Error> {
self.store
.get_block_header_by_output_commit(commit)
.map_err(&Error::StoreErr)

View file

@ -24,13 +24,14 @@
extern crate bitflags;
extern crate byteorder;
#[macro_use]
extern crate log;
extern crate slog;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate time;
extern crate grin_core as core;
extern crate grin_util as util;
extern crate grin_store;
extern crate secp256k1zkp as secp;

View file

@ -27,6 +27,7 @@ use types::*;
use store;
use sumtree;
use core::global;
use util::LOGGER;
/// Contextual information required to process a new block and either reject or
/// accept it.
@ -53,6 +54,7 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
// spend resources reading the full block when its header is invalid
info!(
LOGGER,
"Starting validation pipeline for block {} at {} with {} inputs and {} outputs.",
b.hash(),
b.header.height,
@ -74,6 +76,7 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
validate_block(b, &mut ctx, &mut extension)?;
debug!(
LOGGER,
"Block at {} with hash {} is valid, going to save and append.",
b.header.height,
b.hash()
@ -92,6 +95,7 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
info!(
LOGGER,
"Starting validation pipeline for block header {} at {}.",
bh.hash(),
bh.height
@ -135,6 +139,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
// check version, enforces scheduled hard fork
if !consensus::valid_header_version(header.height, header.version) {
error!(
LOGGER,
"Invalid block header version received ({}), maybe update Grin?",
header.version
);
@ -142,8 +147,7 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
if header.timestamp >
time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
{
time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64)) {
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
// TODO add warning in p2p code if local time is too different from peers
return Err(Error::InvalidBlockTime);
@ -155,16 +159,16 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
} else {
consensus::DEFAULT_SIZESHIFT
};
debug!("Validating block with cuckoo size {}", cycle_size);
debug!(LOGGER, "Validating block with cuckoo size {}", cycle_size);
if !(ctx.pow_verifier)(header, cycle_size as u32) {
return Err(Error::InvalidPow);
}
}
// first I/O cost, better as late as possible
let prev = try!(ctx.store.get_block_header(&header.previous).map_err(
&Error::StoreErr,
));
let prev = try!(ctx.store
.get_block_header(&header.previous)
.map_err(&Error::StoreErr));
if header.height != prev.height + 1 {
return Err(Error::InvalidBlockHeight);
@ -183,9 +187,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
let difficulty = consensus::next_difficulty(diff_iter).map_err(|e| {
Error::Other(e.to_string())
})?;
let difficulty = consensus::next_difficulty(diff_iter)
.map_err(|e| Error::Other(e.to_string()))?;
if header.difficulty < difficulty {
return Err(Error::DifficultyTooLow);
}
@ -195,11 +198,10 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
}
/// Fully validate the block content.
fn validate_block(
b: &Block,
ctx: &mut BlockContext,
ext: &mut sumtree::Extension,
) -> Result<(), Error> {
fn validate_block(b: &Block,
ctx: &mut BlockContext,
ext: &mut sumtree::Extension)
-> Result<(), Error> {
if b.header.height > ctx.head.height + 1 {
return Err(Error::Orphan);
}
@ -248,11 +250,7 @@ fn validate_block(
if forked_block.header.height > 0 {
let last_output = &forked_block.outputs[forked_block.outputs.len() - 1];
let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1];
ext.rewind(
forked_block.header.height,
last_output,
last_kernel,
)?;
ext.rewind(forked_block.header.height, last_output, last_kernel)?;
}
// apply all forked blocks, including this new one
@ -265,8 +263,7 @@ fn validate_block(
let (utxo_root, rproof_root, kernel_root) = ext.roots();
if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root ||
kernel_root.hash != b.header.kernel_root
{
kernel_root.hash != b.header.kernel_root {
return Err(Error::InvalidRoot);
}
@ -276,10 +273,8 @@ fn validate_block(
if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) {
if output.features.contains(transaction::COINBASE_OUTPUT) {
if let Ok(output_header) =
ctx.store.get_block_header_by_output_commit(
&input.commitment(),
)
{
ctx.store
.get_block_header_by_output_commit(&input.commitment()) {
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction
// validation rule
@ -333,7 +328,12 @@ fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error>
// TODO if we're switching branch, make sure to backtrack the sum trees
ctx.head = tip.clone();
info!("Updated head to {} at {}.", b.hash(), b.header.height);
info!(
LOGGER,
"Updated head to {} at {}.",
b.hash(),
b.header.height
);
Ok(Some(tip))
} else {
Ok(None)
@ -352,6 +352,7 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
ctx.head = tip.clone();
info!(
LOGGER,
"Updated block header head to {} at {}.",
bh.hash(),
bh.height

View file

@ -141,10 +141,8 @@ impl ChainStore for ChainKVStore {
// in this index.
//
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, Error> {
let block_hash = self.db.get_ser(&to_key(
HEADER_BY_OUTPUT_PREFIX,
&mut commit.as_ref().to_vec(),
))?;
let block_hash = self.db
.get_ser(&to_key(HEADER_BY_OUTPUT_PREFIX, &mut commit.as_ref().to_vec()))?;
match block_hash {
Some(hash) => {
@ -213,10 +211,8 @@ impl ChainStore for ChainKVStore {
/// that is consistent with its height (everything prior to this will be
/// consistent)
fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> {
self.db.put_ser(
&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height),
bh,
)?;
self.db
.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), bh)?;
if bh.height == 0 {
return Ok(());
}

View file

@ -36,16 +36,14 @@ const RANGE_PROOF_SUBDIR: &'static str = "rangeproof";
const KERNEL_SUBDIR: &'static str = "kernel";
struct PMMRHandle<T>
where
T: Summable + Clone,
where T: Summable + Clone
{
backend: PMMRBackend<T>,
last_pos: u64,
}
impl<T> PMMRHandle<T>
where
T: Summable + Clone,
where T: Summable + Clone
{
fn new(root_dir: String, file_name: &str) -> Result<PMMRHandle<T>, Error> {
let path = Path::new(&root_dir).join(SUMTREES_SUBDIR).join(file_name);
@ -107,8 +105,7 @@ impl SumTrees {
/// If the closure returns an error, modifications are canceled and the unit
/// of work is abandoned. Otherwise, the unit of work is permanently applied.
pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result<T, Error>
where
F: FnOnce(&mut Extension) -> Result<T, Error>,
where F: FnOnce(&mut Extension) -> Result<T, Error>
{
let sizes: (u64, u64, u64);
@ -229,9 +226,9 @@ impl<'a> Extension<'a> {
self.new_output_commits.insert(out.commitment(), pos);
// push range proofs in their MMR
self.rproof_pmmr.push(NoSum(out.proof)).map_err(
&Error::SumTreeErr,
)?;
self.rproof_pmmr
.push(NoSum(out.proof))
.map_err(&Error::SumTreeErr)?;
}
for kernel in &b.kernels {
@ -239,9 +236,9 @@ impl<'a> Extension<'a> {
return Err(Error::DuplicateKernel(kernel.excess.clone()));
}
// push kernels in their MMR
let pos = self.kernel_pmmr.push(NoSum(kernel.clone())).map_err(
&Error::SumTreeErr,
)?;
let pos = self.kernel_pmmr
.push(NoSum(kernel.clone()))
.map_err(&Error::SumTreeErr)?;
self.new_kernel_excesses.insert(kernel.excess, pos);
}
Ok(())
@ -277,14 +274,9 @@ impl<'a> Extension<'a> {
/// Current root hashes and sums (if applicable) for the UTXO, range proof
/// and kernel sum trees.
pub fn roots(
&self,
) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
(
self.output_pmmr.root(),
self.rproof_pmmr.root(),
self.kernel_pmmr.root(),
)
pub fn roots(&self)
-> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
(self.output_pmmr.root(), self.rproof_pmmr.root(), self.kernel_pmmr.root())
}
/// Force the rollback of this extension, no matter the result
@ -294,10 +286,8 @@ impl<'a> Extension<'a> {
// Sizes of the sum trees, used by `extending` on rollback.
fn sizes(&self) -> (u64, u64, u64) {
(
self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size(),
)
(self.output_pmmr.unpruned_size(),
self.rproof_pmmr.unpruned_size(),
self.kernel_pmmr.unpruned_size())
}
}

View file

@ -209,10 +209,9 @@ pub trait ChainStore: Send + Sync {
fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, store::Error>;
/// Gets a block_header for the given input commit
fn get_block_header_by_output_commit(
&self,
commit: &Commitment,
) -> Result<BlockHeader, store::Error>;
fn get_block_header_by_output_commit(&self,
commit: &Commitment)
-> Result<BlockHeader, store::Error>;
/// Saves the position of an output, represented by its commitment, in the
/// UTXO MMR. Used as an index for spending and pruning.

View file

@ -11,5 +11,6 @@ toml = "0.4"
grin_grin = { path = "../grin" }
grin_pow = { path = "../pow"}
grin_util = { path = "../util" }
grin_p2p = { path = "../p2p" }
grin_wallet = { path = "../wallet"}

View file

@ -22,6 +22,7 @@ use std::fs::File;
use toml;
use grin::ServerConfig;
use pow::types::MinerConfig;
use util::LoggingConfig;
use types::{ConfigMembers, GlobalConfig, ConfigError};
/// The default file name to use when trying to derive
@ -37,6 +38,7 @@ impl Default for ConfigMembers {
ConfigMembers {
server: ServerConfig::default(),
mining: Some(MinerConfig::default()),
logging: Some(LoggingConfig::default()),
}
}
}

View file

@ -30,6 +30,7 @@ extern crate grin_grin as grin;
extern crate grin_p2p as p2p;
extern crate grin_wallet as wallet;
extern crate grin_pow as pow;
extern crate grin_util as util;
pub mod config;
pub mod types;

View file

@ -20,6 +20,7 @@ use std::fmt;
use grin::ServerConfig;
use pow::types::MinerConfig;
use util::LoggingConfig;
/// Error type wrapping config errors.
#[derive(Debug)]
@ -99,9 +100,12 @@ pub struct ConfigMembers {
pub server: ServerConfig,
/// Mining config
pub mining: Option<MinerConfig>,
//removing wallet from here for now,
//as its concerns are separate from the server's, really
//given it needs to manage keys. It should probably
//stay command line only for the time being
//pub wallet: Option<WalletConfig>
/// Logging config
pub logging: Option<LoggingConfig>,
//removing wallet from here for now,
//as its concerns are separate from the server's, really
//given it needs to manage keys. It should probably
//stay command line only for the time being
//pub wallet: Option<WalletConfig>
}

View file

@ -66,14 +66,14 @@ Provided all of the prerequisites were installed and there were no issues, there
By default, executing:
```
RUST_LOG=grin=debug cargo run
cargo run
```
from the build directory will run grin using the defaults in the grin.toml file, creating a new blockchain locally and mining using a simple version of the embedded miner.
For the time being, it's recommended just to put the built version of grin on your path, e.g. via:
```
export PATH=/path/to/grin/dir/target/debug/grin:$PATH
export PATH=/path/to/grin/dir/target/debug:$PATH
```
# Configuration
@ -100,11 +100,11 @@ For a basic example simulating a single node network, create a directory called
Before running your mining server, a wallet server needs to be set up and listening so that the mining server knows where to send mining rewards. Do this from the first node directory with the following command:
node1$ RUST_LOG=grin=info grin wallet -p "password" receive
node1$ grin wallet -p "password" receive
(Note you can substitute 'RUST_LOG=grin=debug' if you'd like to see more detailed debug output.) This will create a wallet server listening on the default port 13415 with the password "password". Next, in another terminal window in the 'node1' directory, run a full mining node with the following command:
This will create a wallet server listening on the default port 13415 with the password "password". Next, in another terminal window in the 'node1' directory, run a full mining node with the following command:
node1$ RUST_LOG=grin=info grin server -m run
node1$ grin server -m run
This creates a new .grin database directory in the current directory, and begins mining new blocks (with no transactions, for now). Note this starts two services listening on two default ports,
port 13414 for the peer-to-peer (P2P) service which keeps all nodes synchronised, and 13415 for the Rest API service used to verify transactions and post new transactions to the pool (for example). These ports can be configured via command line switches, or via a grin.toml file in the working directory.
@ -123,11 +123,11 @@ As before, node 1 will create the blockchain and begin mining. As we'll be runni
First, we run a wallet server to receive rewards on port 15000 (we'll log in debug mode for more information about what's happening)
node1$ RUST_LOG=grin=debug grin wallet -p "password" -r 15000 receive
node1$ grin wallet -p "password" -r 15000 receive
Then we start node 1 mining with its P2P server bound to port 10000 and its api server at 10001. We also provide our wallet address where we'll receive mining rewards. In another terminal:
node1$ RUST_LOG=grin=debug grin server -m -p 10000 -a 10001 -w "http://127.0.0.1:15000" run
node1$ grin server -m -p 10000 -a 10001 -w "http://127.0.0.1:15000" run
### Node 2: Regular Node (not mining)
@ -135,7 +135,7 @@ We'll set up Node 2 as a simple validating node (i.e. it won't mine,) but we'll
In a new terminal, tell node 2 to run a sever using node 1's P2P address as a seed. Node 2's P2P server will run on port 20000 and its API server will run on port 20001.
node2$ RUST_LOG=grin=debug grin server -s "127.0.0.1:10000" -p 20000 -a 20001 run
node2$ grin server -s "127.0.0.1:10000" -p 20000 -a 20001 run
Node 2 will then sync and process and validate new blocks that node 1 may find.
@ -143,11 +143,11 @@ Node 2 will then sync and process and validate new blocks that node 1 may find.
Similar to Node 2, we'll set up node 3 as a non-mining node seeded with node 2 (node 1 could also be used). However, we'll also run another wallet in listener mode on this node:
node3$ RUST_LOG=grin=debug grin server -s "127.0.0.1:20000" -p 30000 -a 30001 run
node3$ grin server -s "127.0.0.1:20000" -p 30000 -a 30001 run
Node 3 is now running it's P2P service on port 30000 and its API server on 30001. You should be able to see it syncing its blockchain and peer data with nodes 1 and 2. Now start up a wallet listener.
node3$ RUST_LOG=grin=debug grin wallet -p "password" -a "http://127.0.0.1:10001" -r 35000 receive
node3$ grin wallet -p "password" -a "http://127.0.0.1:10001" -r 35000 receive
In contrast to other blockchains, a feature of a MimbleWimble is that a transaction cannot just be directly posted to the blockchain. It first needs to be sent from the sender to the receiver,
who will add a blinding factor before posting it to the blockchain. The above command tells the wallet server to listen for transactions on port 35000, and, after applying it's own blinding factor to the transaction, forward them on to the listening API server on node 1. (NB: we should theoretically be able to post transactions to node 3 or 2, but for some reason transactions posted to peers don't seem to propagate properly at present)
@ -159,7 +159,7 @@ With all of your servers happily running and your terminals scrolling away, let'
In yet another terminal in node 1's directory, create a new partial transaction spending 20000 coins and send them on to node 3's wallet listener. We'll also specify that we'll
use node 2's API listener to validate our transaction inputs before sending:
node1$ RUST_LOG=grin=debug grin wallet -p "password" -a "http://127.0.0.1:20001" send 20000 -d "http://127.0.0.1:35000"
node1$ grin wallet -p "password" -a "http://127.0.0.1:20001" send 20000 -d "http://127.0.0.1:35000"
Your terminal windows should all light up now. Node 1 will check its inputs against node 2, and then send a partial transaction to node 3's wallet listener. Node 3 has been configured to
send signed and finalised transactions to the api listener on node 1, which should then add the transaction to the next block and validate it via mining.

View file

@ -9,6 +9,10 @@
# -[user home]/.grin
#
#########################################
### SERVER CONFIGURATION ###
#########################################
#Server connection details
[server]
@ -48,6 +52,34 @@ capabilities = [7]
host = "127.0.0.1"
port = 13414
#########################################
### LOGGING CONFIGURATION ###
#########################################
[logging]
# Whether to log to stdout
log_to_stdout = true
# Log level for stdout: Critical, Error, Warning, Info, Debug, Trace
stdout_log_level = "Debug"
# Whether to log to a file
log_to_file = true
# Log level for file: Critical, Error, Warning, Info, Debug, Trace
file_log_level = "Trace"
# Log file path
log_file_path = "grin.log"
# Whether to append to the log file (true), or replace it on every run (false)
log_file_append = true
#########################################
### MINING CONFIGURATION ###
#########################################
#Mining details. This section is optional. If it's not here, the server
#will default to not mining.
[mining]
@ -83,7 +115,7 @@ wallet_receiver_url = "http://127.0.0.1:13416"
#whether to ignore the reward (mostly for testing)
burn_reward = true
burn_reward = false
#testing value, optional
#slow_down_in_millis = 30

View file

@ -17,11 +17,10 @@ grin_wallet = { path = "../wallet" }
grin_pow = { path = "../pow" }
secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" }
env_logger="^0.3.5"
futures = "^0.1.15"
futures-cpupool = "^0.1.3"
hyper = { git = "https://github.com/hyperium/hyper" }
log = "^0.3"
slog = "^2.0.12"
time = "^0.1"
serde = "~1.0.8"
serde_derive = "~1.0.8"

View file

@ -27,6 +27,7 @@ use secp::pedersen::Commitment;
use util::OneTime;
use store;
use sync;
use util::LOGGER;
use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
/// Implementation of the NetAdapter for the blockchain. Gets notified when new
@ -51,19 +52,23 @@ impl NetAdapter for NetToChainAdapter {
identifier: "?.?.?.?".to_string(),
};
if let Err(e) = self.tx_pool.write().unwrap().add_to_memory_pool(source, tx) {
error!("Transaction rejected: {:?}", e);
error!(LOGGER, "Transaction rejected: {:?}", e);
}
}
fn block_received(&self, b: core::Block) {
let bhash = b.hash();
debug!("Received block {} from network, going to process.", bhash);
debug!(
LOGGER,
"Received block {} from network, going to process.",
bhash
);
// pushing the new block through the chain pipeline
let res = self.chain.process_block(b, self.chain_opts());
if let Err(e) = res {
debug!("Block {} refused by chain: {:?}", bhash, e);
debug!(LOGGER, "Block {} refused by chain: {:?}", bhash, e);
}
if self.syncer.borrow().syncing() {
@ -82,6 +87,7 @@ impl NetAdapter for NetToChainAdapter {
}
Err(chain::Error::Unfit(s)) => {
info!(
LOGGER,
"Received unfit block header {} at {}: {}.",
bh.hash(),
bh.height,
@ -89,16 +95,25 @@ impl NetAdapter for NetToChainAdapter {
);
}
Err(chain::Error::StoreErr(e)) => {
error!("Store error processing block header {}: {:?}", bh.hash(), e);
error!(
LOGGER,
"Store error processing block header {}: {:?}",
bh.hash(),
e
);
return;
}
Err(e) => {
info!("Invalid block header {}: {:?}.", bh.hash(), e);
info!(LOGGER, "Invalid block header {}: {:?}.", bh.hash(), e);
// TODO penalize peer somehow
}
}
}
info!("Added {} headers to the header chain.", added_hs.len());
info!(
LOGGER,
"Added {} headers to the header chain.",
added_hs.len()
);
if self.syncer.borrow().syncing() {
self.syncer.borrow().headers_received(added_hs);
@ -118,7 +133,7 @@ impl NetAdapter for NetToChainAdapter {
return self.locate_headers(locator[1..].to_vec());
}
Err(e) => {
error!("Could not build header locator: {:?}", e);
error!(LOGGER, "Could not build header locator: {:?}", e);
return vec![];
}
};
@ -132,7 +147,7 @@ impl NetAdapter for NetToChainAdapter {
Ok(head) => headers.push(head),
Err(chain::Error::StoreErr(store::Error::NotFoundErr)) => break,
Err(e) => {
error!("Could not build header locator: {:?}", e);
error!(LOGGER, "Could not build header locator: {:?}", e);
return vec![];
}
}
@ -152,18 +167,15 @@ impl NetAdapter for NetToChainAdapter {
/// Find good peers we know with the provided capability and return their
/// addresses.
fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec<SocketAddr> {
let peers = self.peer_store.find_peers(
State::Healthy,
capab,
p2p::MAX_PEER_ADDRS as usize,
);
debug!("Got {} peer addrs to send.", peers.len());
let peers = self.peer_store
.find_peers(State::Healthy, capab, p2p::MAX_PEER_ADDRS as usize);
debug!(LOGGER, "Got {} peer addrs to send.", peers.len());
map_vec!(peers, |p| p.addr)
}
/// A list of peers has been received from one of our peers.
fn peer_addrs_received(&self, peer_addrs: Vec<SocketAddr>) {
debug!("Received {} peer addrs, saving.", peer_addrs.len());
debug!(LOGGER, "Received {} peer addrs, saving.", peer_addrs.len());
for pa in peer_addrs {
if let Ok(e) = self.peer_store.exists_peer(pa) {
if e {
@ -177,14 +189,14 @@ impl NetAdapter for NetToChainAdapter {
flags: State::Healthy,
};
if let Err(e) = self.peer_store.save_peer(&peer) {
error!("Could not save received peer address: {:?}", e);
error!(LOGGER, "Could not save received peer address: {:?}", e);
}
}
}
/// Network successfully connected to a peer.
fn peer_connected(&self, pi: &p2p::PeerInfo) {
debug!("Saving newly connected peer {}.", pi.addr);
debug!(LOGGER, "Saving newly connected peer {}.", pi.addr);
let peer = PeerData {
addr: pi.addr,
capabilities: pi.capabilities,
@ -192,17 +204,16 @@ impl NetAdapter for NetToChainAdapter {
flags: State::Healthy,
};
if let Err(e) = self.peer_store.save_peer(&peer) {
error!("Could not save connected peer: {:?}", e);
error!(LOGGER, "Could not save connected peer: {:?}", e);
}
}
}
impl NetToChainAdapter {
pub fn new(
chain_ref: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
peer_store: Arc<PeerStore>,
) -> NetToChainAdapter {
pub fn new(chain_ref: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
peer_store: Arc<PeerStore>)
-> NetToChainAdapter {
NetToChainAdapter {
chain: chain_ref,
peer_store: peer_store,
@ -216,15 +227,15 @@ impl NetToChainAdapter {
pub fn start_sync(&self, sync: sync::Syncer) {
let arc_sync = Arc::new(sync);
self.syncer.init(arc_sync.clone());
let spawn_result = thread::Builder::new().name("syncer".to_string()).spawn(
move || {
let spawn_result = thread::Builder::new()
.name("syncer".to_string())
.spawn(move || {
let sync_run_result = arc_sync.run();
match sync_run_result {
Ok(_) => {}
Err(_) => {}
}
},
);
});
match spawn_result {
Ok(_) => {}
Err(_) => {}
@ -261,6 +272,7 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
{
if let Err(e) = self.tx_pool.write().unwrap().reconcile_block(b) {
error!(
LOGGER,
"Pool could not update itself at block {}: {:?}",
b.hash(),
e
@ -272,9 +284,8 @@ impl ChainAdapter for ChainToPoolAndNetAdapter {
}
impl ChainToPoolAndNetAdapter {
pub fn new(
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
) -> ChainToPoolAndNetAdapter {
pub fn new(tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>)
-> ChainToPoolAndNetAdapter {
ChainToPoolAndNetAdapter {
tx_pool: tx_pool,
p2p: OneTime::new(),
@ -306,19 +317,19 @@ impl PoolToChainAdapter {
impl pool::BlockChain for PoolToChainAdapter {
fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, pool::PoolError> {
self.chain.borrow().get_unspent(output_ref).map_err(
|e| match e {
self.chain
.borrow()
.get_unspent(output_ref)
.map_err(|e| match e {
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
_ => pool::PoolError::GenericPoolError,
},
)
})
}
fn get_block_header_by_output_commit(
&self,
commit: &Commitment,
) -> Result<BlockHeader, pool::PoolError> {
fn get_block_header_by_output_commit(&self,
commit: &Commitment)
-> Result<BlockHeader, pool::PoolError> {
self.chain
.borrow()
.get_block_header_by_output_commit(commit)
@ -326,8 +337,9 @@ impl pool::BlockChain for PoolToChainAdapter {
}
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
self.chain.borrow().head_header().map_err(|_| {
pool::PoolError::GenericPoolError
})
self.chain
.borrow()
.head_header()
.map_err(|_| pool::PoolError::GenericPoolError)
}
}

View file

@ -22,8 +22,7 @@
#![warn(missing_docs)]
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate slog;
extern crate futures;
extern crate futures_cpupool as cpupool;
extern crate hyper;

View file

@ -35,6 +35,7 @@ use pow::MiningWorker;
use pow::types::MinerConfig;
use core::ser;
use core::ser::AsFixedBytes;
use util::LOGGER;
// use core::genesis;
@ -81,10 +82,8 @@ impl Default for HeaderPartWriter {
impl HeaderPartWriter {
pub fn parts_as_hex_strings(&self) -> (String, String) {
(
String::from(format!("{:02x}", self.pre_nonce.iter().format(""))),
String::from(format!("{:02x}", self.post_nonce.iter().format(""))),
)
(String::from(format!("{:02x}", self.pre_nonce.iter().format(""))),
String::from(format!("{:02x}", self.post_nonce.iter().format(""))))
}
}
@ -129,11 +128,10 @@ pub struct Miner {
impl Miner {
/// Creates a new Miner. Needs references to the chain state and its
/// storage.
pub fn new(
config: MinerConfig,
chain_ref: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>,
) -> Miner {
pub fn new(config: MinerConfig,
chain_ref: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<PoolToChainAdapter>>>)
-> Miner {
Miner {
config: config,
chain: chain_ref,
@ -149,18 +147,18 @@ impl Miner {
}
/// Inner part of the mining loop for cuckoo-miner async mode
pub fn inner_loop_async(
&self,
plugin_miner: &mut PluginMiner,
difficulty: Difficulty,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
latest_hash: &Hash,
attempt_time_per_block: u32,
) -> Option<Proof> {
pub fn inner_loop_async(&self,
plugin_miner: &mut PluginMiner,
difficulty: Difficulty,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
latest_hash: &Hash,
attempt_time_per_block: u32)
-> Option<Proof> {
debug!(
LOGGER,
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.",
self.debug_output_id,
cuckoo_size,
@ -201,7 +199,7 @@ impl Miner {
if let Some(s) = job_handle.get_solution() {
sol = Some(Proof::new(s.solution_nonces.to_vec()));
b.header.nonce = s.get_nonce_as_u64();
// debug!("Nonce: {}", b.header.nonce);
// debug!(LOGGER, "Nonce: {}", b.header.nonce);
break;
}
if time::get_time().sec > next_stat_output {
@ -213,6 +211,7 @@ impl Miner {
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
debug!(
LOGGER,
"Mining: Plugin {} - Device {} ({}): Last Solution time: {}s; \
Solutions per second: {:.*} - Total Attempts: {}",
i,
@ -228,7 +227,7 @@ impl Miner {
}
}
}
debug!("Total solutions per second: {}", sps_total);
debug!(LOGGER, "Total solutions per second: {}", sps_total);
next_stat_output = time::get_time().sec + stat_output_interval;
}
}
@ -238,6 +237,7 @@ impl Miner {
}
if sol == None {
debug!(
LOGGER,
"(Server ID: {}) No solution found after {} seconds, continuing...",
self.debug_output_id,
attempt_time_per_block
@ -250,22 +250,23 @@ impl Miner {
}
/// The inner part of mining loop for cuckoo miner sync mode
pub fn inner_loop_sync_plugin(
&self,
plugin_miner: &mut PluginMiner,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
attempt_time_per_block: u32,
latest_hash: &mut Hash,
) -> Option<Proof> {
// look for a pow for at most 2 sec on the same block (to give a chance to new
pub fn inner_loop_sync_plugin(&self,
plugin_miner: &mut PluginMiner,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
attempt_time_per_block: u32,
latest_hash: &mut Hash)
-> Option<Proof> {
// look for a pow for at most attempt_time_per_block sec on the same block (to
// give a chance to new
// transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64;
let stat_check_interval = 3;
let mut next_stat_check = time::get_time().sec + stat_check_interval;
debug!(
LOGGER,
"(Server ID: {}) Mining at Cuckoo{} for {} secs (will wait for last solution) \
on block {} at difficulty {}.",
self.debug_output_id,
@ -278,6 +279,7 @@ impl Miner {
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
debug!(
LOGGER,
"(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.debug_output_id,
self.config.slow_down_in_millis.unwrap()
@ -301,7 +303,8 @@ impl Miner {
for s in stats_vec.into_iter() {
let last_solution_time_secs = s.last_solution_time as f64 / 1000.0;
let last_hashes_per_sec = 1.0 / last_solution_time_secs;
println!(
debug!(
LOGGER,
"Plugin 0 - Device {} ({}) - Last Solution time: {}; Solutions per second: {:.*}",
s.device_id,
s.device_name,
@ -319,8 +322,7 @@ impl Miner {
// Artificial slow down
if self.config.slow_down_in_millis != None &&
self.config.slow_down_in_millis.unwrap() > 0
{
self.config.slow_down_in_millis.unwrap() > 0 {
thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(),
));
@ -329,6 +331,7 @@ impl Miner {
if sol == None {
debug!(
LOGGER,
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count
@ -339,20 +342,20 @@ impl Miner {
}
/// The inner part of mining loop for the internal miner
pub fn inner_loop_sync_internal<T: MiningWorker>(
&self,
miner: &mut T,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
attempt_time_per_block: u32,
latest_hash: &mut Hash,
) -> Option<Proof> {
pub fn inner_loop_sync_internal<T: MiningWorker>(&self,
miner: &mut T,
b: &mut Block,
cuckoo_size: u32,
head: &BlockHeader,
attempt_time_per_block: u32,
latest_hash: &mut Hash)
-> Option<Proof> {
// look for a pow for at most 2 sec on the same block (to give a chance to new
// transactions) and as long as the head hasn't changed
let deadline = time::get_time().sec + attempt_time_per_block as i64;
debug!(
LOGGER,
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs on block {} at difficulty {}.",
self.debug_output_id,
cuckoo_size,
@ -364,6 +367,7 @@ impl Miner {
if self.config.slow_down_in_millis != None && self.config.slow_down_in_millis.unwrap() > 0 {
debug!(
LOGGER,
"(Server ID: {}) Artificially slowing down loop by {}ms per iteration.",
self.debug_output_id,
self.config.slow_down_in_millis.unwrap()
@ -388,8 +392,7 @@ impl Miner {
// Artificial slow down
if self.config.slow_down_in_millis != None &&
self.config.slow_down_in_millis.unwrap() > 0
{
self.config.slow_down_in_millis.unwrap() > 0 {
thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(),
));
@ -398,6 +401,7 @@ impl Miner {
if sol == None {
debug!(
LOGGER,
"(Server ID: {}) No solution found after {} iterations, continuing...",
self.debug_output_id,
iter_count
@ -410,22 +414,18 @@ impl Miner {
/// chain anytime required and looking for PoW solution.
pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) {
info!("(Server ID: {}) Starting miner loop.", self.debug_output_id);
info!(
LOGGER,
"(Server ID: {}) Starting miner loop.",
self.debug_output_id
);
let mut plugin_miner = None;
let mut miner = None;
if miner_config.use_cuckoo_miner {
plugin_miner = Some(PluginMiner::new(
consensus::EASINESS,
cuckoo_size,
proof_size,
));
plugin_miner = Some(PluginMiner::new(consensus::EASINESS, cuckoo_size, proof_size));
plugin_miner.as_mut().unwrap().init(miner_config.clone());
} else {
miner = Some(cuckoo::Miner::new(
consensus::EASINESS,
cuckoo_size,
proof_size,
));
miner = Some(cuckoo::Miner::new(consensus::EASINESS, cuckoo_size, proof_size));
}
// to prevent the wallet from generating a new HD key derivation for each
@ -434,7 +434,7 @@ impl Miner {
let mut pubkey = None;
loop {
debug!("in miner loop...");
debug!(LOGGER, "in miner loop...");
// get the latest chain state and build a block on top of it
let head = self.chain.head_header().unwrap();
@ -484,6 +484,7 @@ impl Miner {
// if we found a solution, push our block out
if let Some(proof) = sol {
info!(
LOGGER,
"(Server ID: {}) Found valid proof of work, adding block {}.",
self.debug_output_id,
b.hash()
@ -497,15 +498,20 @@ impl Miner {
let res = self.chain.process_block(b, opts);
if let Err(e) = res {
error!(
LOGGER,
"(Server ID: {}) Error validating mined block: {:?}",
self.debug_output_id,
e
);
}
debug!("resetting pubkey in miner to None");
debug!(LOGGER, "resetting pubkey in miner to None");
pubkey = None;
} else {
debug!("setting pubkey in miner to pubkey from block_fees - {:?}", block_fees);
debug!(
LOGGER,
"setting pubkey in miner to pubkey from block_fees - {:?}",
block_fees
);
pubkey = block_fees.pubkey();
}
}
@ -513,11 +519,10 @@ impl Miner {
/// Builds a new block with the chain head as previous and eligible
/// transactions from the pool.
fn build_block(
&self,
head: &core::BlockHeader,
pubkey: Option<Identifier>,
) -> (core::Block, BlockFees) {
fn build_block(&self,
head: &core::BlockHeader,
pubkey: Option<Identifier>)
-> (core::Block, BlockFees) {
// prepare the block header timestamp
let mut now_sec = time::get_time().sec;
let head_sec = head.timestamp.to_timespec().sec;
@ -530,9 +535,10 @@ impl Miner {
let difficulty = consensus::next_difficulty(diff_iter).unwrap();
// extract current transaction from the pool
let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions(
MAX_TX,
);
let txs_box = self.tx_pool
.read()
.unwrap()
.prepare_mineable_transactions(MAX_TX);
let txs: Vec<&Transaction> = txs_box.iter().map(|tx| tx.as_ref()).collect();
// build the coinbase and the block itself
@ -543,6 +549,7 @@ impl Miner {
let (output, kernel, block_fees) = self.get_coinbase(block_fees);
let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap();
debug!(
LOGGER,
"(Server ID: {}) Built new block with {} inputs and {} outputs, difficulty: {}",
self.debug_output_id,
b.inputs.len(),
@ -558,16 +565,13 @@ impl Miner {
b.header.nonce = rng.gen();
b.header.difficulty = difficulty;
b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0));
self.chain.set_sumtree_roots(&mut b).expect(
"Error setting sum tree roots",
);
self.chain
.set_sumtree_roots(&mut b)
.expect("Error setting sum tree roots");
(b, block_fees)
}
fn get_coinbase(
&self,
block_fees: BlockFees,
) -> (core::Output, core::TxKernel, BlockFees) {
fn get_coinbase(&self, block_fees: BlockFees) -> (core::Output, core::TxKernel, BlockFees) {
if self.config.burn_reward {
let keychain = Keychain::from_random_seed().unwrap();
let pubkey = keychain.derive_pubkey(1).unwrap();
@ -598,10 +602,10 @@ impl Miner {
let pubkey = ser::deserialize(&mut &pubkey_bin[..]).unwrap();
let block_fees = BlockFees {
pubkey: Some(pubkey),
.. block_fees
..block_fees
};
debug!("block_fees here: {:?}", block_fees);
debug!(LOGGER, "block_fees here: {:?}", block_fees);
(output, kernel, block_fees)
}

View file

@ -31,6 +31,7 @@ use tokio_core::reactor;
use tokio_timer::Timer;
use p2p;
use util::LOGGER;
const PEER_MAX_COUNT: u32 = 25;
const PEER_PREFERRED_COUNT: u32 = 8;
@ -44,11 +45,10 @@ pub struct Seeder {
}
impl Seeder {
pub fn new(
capabilities: p2p::Capabilities,
peer_store: Arc<p2p::PeerStore>,
p2p: Arc<p2p::Server>,
) -> Seeder {
pub fn new(capabilities: p2p::Capabilities,
peer_store: Arc<p2p::PeerStore>,
p2p: Arc<p2p::Server>)
-> Seeder {
Seeder {
peer_store: peer_store,
p2p: p2p,
@ -56,31 +56,27 @@ impl Seeder {
}
}
pub fn connect_and_monitor(
&self,
h: reactor::Handle,
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>,
) {
pub fn connect_and_monitor(&self,
h: reactor::Handle,
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>) {
// open a channel with a listener that connects every peer address sent below
// max peer count
let (tx, rx) = futures::sync::mpsc::unbounded();
h.spawn(self.listen_for_addrs(h.clone(), rx));
// check seeds and start monitoring connections
let seeder = self.connect_to_seeds(tx.clone(), seed_list).join(
self.monitor_peers(tx.clone()),
);
let seeder = self.connect_to_seeds(tx.clone(), seed_list)
.join(self.monitor_peers(tx.clone()));
h.spawn(seeder.map(|_| ()).map_err(|e| {
error!("Seeding or peer monitoring error: {}", e);
error!(LOGGER, "Seeding or peer monitoring error: {}", e);
()
}));
}
fn monitor_peers(
&self,
tx: mpsc::UnboundedSender<SocketAddr>,
) -> Box<Future<Item = (), Error = String>> {
fn monitor_peers(&self,
tx: mpsc::UnboundedSender<SocketAddr>)
-> Box<Future<Item = (), Error = String>> {
let peer_store = self.peer_store.clone();
let p2p_server = self.p2p.clone();
@ -95,9 +91,9 @@ impl Seeder {
let disconnected = p2p_server.clean_peers();
for p in disconnected {
if p.is_banned() {
debug!("Marking peer {} as banned.", p.info.addr);
let update_result =
peer_store.update_state(p.info.addr, p2p::State::Banned);
debug!(LOGGER, "Marking peer {} as banned.", p.info.addr);
let update_result = peer_store
.update_state(p.info.addr, p2p::State::Banned);
match update_result {
Ok(()) => {}
Err(_) => {}
@ -114,7 +110,11 @@ impl Seeder {
);
peers.retain(|p| !p2p_server.is_known(p.addr));
if peers.len() > 0 {
debug!("Got {} more peers from db, trying to connect.", peers.len());
debug!(
LOGGER,
"Got {} more peers from db, trying to connect.",
peers.len()
);
thread_rng().shuffle(&mut peers[..]);
let sz = min(PEER_PREFERRED_COUNT as usize, peers.len());
for p in &peers[0..sz] {
@ -131,11 +131,10 @@ impl Seeder {
// Check if we have any pre-existing peer in db. If so, start with those,
// otherwise use the seeds provided.
fn connect_to_seeds(
&self,
tx: mpsc::UnboundedSender<SocketAddr>,
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>,
) -> Box<Future<Item = (), Error = String>> {
fn connect_to_seeds(&self,
tx: mpsc::UnboundedSender<SocketAddr>,
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>)
-> Box<Future<Item = (), Error = String>> {
let peer_store = self.peer_store.clone();
// a thread pool is required so we don't block the event loop with a
@ -164,11 +163,11 @@ impl Seeder {
// connect to this first set of addresses
let sz = min(PEER_PREFERRED_COUNT as usize, peer_addrs.len());
for addr in &peer_addrs[0..sz] {
debug!("Connecting to seed: {}.", addr);
debug!(LOGGER, "Connecting to seed: {}.", addr);
tx.unbounded_send(*addr).unwrap();
}
if peer_addrs.len() == 0 {
warn!("No seeds were retrieved.");
warn!(LOGGER, "No seeds were retrieved.");
}
Ok(())
});
@ -179,17 +178,16 @@ impl Seeder {
/// addresses to and initiate a connection if the max peer count isn't
/// exceeded. A request for more peers is also automatically sent after
/// connection.
fn listen_for_addrs(
&self,
h: reactor::Handle,
rx: mpsc::UnboundedReceiver<SocketAddr>,
) -> Box<Future<Item = (), Error = ()>> {
fn listen_for_addrs(&self,
h: reactor::Handle,
rx: mpsc::UnboundedReceiver<SocketAddr>)
-> Box<Future<Item = (), Error = ()>> {
let capab = self.capabilities;
let p2p_store = self.peer_store.clone();
let p2p_server = self.p2p.clone();
let listener = rx.for_each(move |peer_addr| {
debug!("New peer address to connect to: {}.", peer_addr);
debug!(LOGGER, "New peer address to connect to: {}.", peer_addr);
let inner_h = h.clone();
if p2p_server.peer_count() < PEER_MAX_COUNT {
connect_and_req(
@ -226,8 +224,10 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
})
.and_then(|res| {
// collect all chunks and split around whitespace to get a list of SocketAddr
res.body().collect().map_err(|e| e.to_string()).and_then(
|chunks| {
res.body()
.collect()
.map_err(|e| e.to_string())
.and_then(|chunks| {
let res = chunks.iter().fold("".to_string(), |acc, ref chunk| {
acc + str::from_utf8(&chunk[..]).unwrap()
});
@ -235,8 +235,7 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
.map(|s| s.parse().unwrap())
.collect::<Vec<_>>();
Ok(addrs)
},
)
})
})
});
Box::new(seeds)
@ -244,9 +243,8 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
/// Convenience function when the seed list is immediately known. Mostly used
/// for tests.
pub fn predefined_seeds(
addrs_str: Vec<String>,
) -> Box<Future<Item = Vec<SocketAddr>, Error = String>> {
pub fn predefined_seeds(addrs_str: Vec<String>)
-> Box<Future<Item = Vec<SocketAddr>, Error = String>> {
let seeds = future::ok(()).and_then(move |_| {
Ok(
addrs_str
@ -258,13 +256,12 @@ pub fn predefined_seeds(
Box::new(seeds)
}
fn connect_and_req(
capab: p2p::Capabilities,
peer_store: Arc<p2p::PeerStore>,
p2p: Arc<p2p::Server>,
h: reactor::Handle,
addr: SocketAddr,
) -> Box<Future<Item = (), Error = ()>> {
fn connect_and_req(capab: p2p::Capabilities,
peer_store: Arc<p2p::PeerStore>,
p2p: Arc<p2p::Server>,
h: reactor::Handle,
addr: SocketAddr)
-> Box<Future<Item = (), Error = ()>> {
let fut = p2p.connect_peer(addr, h).then(move |p| {
match p {
Ok(Some(p)) => {
@ -275,7 +272,7 @@ fn connect_and_req(
}
}
Err(e) => {
error!("Peer request error: {:?}", e);
error!(LOGGER, "Peer request error: {:?}", e);
let update_result = peer_store.update_state(addr, p2p::State::Defunct);
match update_result {
Ok(()) => {}

View file

@ -35,6 +35,7 @@ use seed;
use sync;
use types::*;
use pow;
use util::LOGGER;
use core::global;
@ -66,7 +67,7 @@ impl Server {
let forever = Timer::default()
.interval(time::Duration::from_secs(60))
.for_each(move |_| {
debug!("event loop running");
debug!(LOGGER, "event loop running");
Ok(())
})
.map_err(|_| ());
@ -79,9 +80,10 @@ impl Server {
pub fn future(mut config: ServerConfig, evt_handle: &reactor::Handle) -> Result<Server, Error> {
let pool_adapter = Arc::new(PoolToChainAdapter::new());
let tx_pool = Arc::new(RwLock::new(
pool::TransactionPool::new(config.pool_config.clone(), pool_adapter.clone()),
));
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
config.pool_config.clone(),
pool_adapter.clone(),
)));
let chain_adapter = Arc::new(ChainToPoolAndNetAdapter::new(tx_pool.clone()));
@ -131,7 +133,7 @@ impl Server {
evt_handle.spawn(p2p_server.start(evt_handle.clone()).map_err(|_| ()));
info!("Starting rest apis at: {}", &config.api_http_addr);
info!(LOGGER, "Starting rest apis at: {}", &config.api_http_addr);
api::start_rest_apis(
config.api_http_addr.clone(),
@ -139,7 +141,7 @@ impl Server {
tx_pool.clone(),
);
warn!("Grin server started.");
warn!(LOGGER, "Grin server started.");
Ok(Server {
config: config,
evt_handle: evt_handle.clone(),
@ -174,9 +176,7 @@ impl Server {
let mut miner = miner::Miner::new(config.clone(), self.chain.clone(), self.tx_pool.clone());
miner.set_debug_output_id(format!("Port {}", self.config.p2p_config.unwrap().port));
thread::spawn(move || {
miner.run_loop(config.clone(), cuckoo_size as u32, proof_size);
});
thread::spawn(move || { miner.run_loop(config.clone(), cuckoo_size as u32, proof_size); });
}
/// The chain head

View file

@ -28,6 +28,7 @@ use core::core::hash::{Hash, Hashed};
use chain;
use p2p;
use types::Error;
use util::LOGGER;
pub struct Syncer {
chain: Arc<chain::Chain>,
@ -58,7 +59,7 @@ impl Syncer {
/// Checks the local chain state, comparing it with our peers and triggers
/// syncing if required.
pub fn run(&self) -> Result<(), Error> {
debug!("Starting syncer.");
debug!(LOGGER, "Starting syncer.");
let start = Instant::now();
loop {
let pc = self.p2p.peer_count();
@ -76,7 +77,7 @@ impl Syncer {
// main syncing loop, requests more headers and bodies periodically as long
// as a peer with higher difficulty exists and we're not fully caught up
info!("Starting sync loop.");
info!(LOGGER, "Starting sync loop.");
loop {
let tip = self.chain.get_header_head()?;
// TODO do something better (like trying to get more) if we lose peers
@ -107,7 +108,7 @@ impl Syncer {
thread::sleep(Duration::from_secs(2));
}
info!("Sync done.");
info!(LOGGER, "Sync done.");
Ok(())
}
@ -130,6 +131,7 @@ impl Syncer {
}
debug!(
LOGGER,
"Added {} full block hashes to download.",
blocks_to_download.len()
);
@ -162,6 +164,7 @@ impl Syncer {
blocks_downloading.push((h, Instant::now()));
}
debug!(
LOGGER,
"Requesting more full block hashes to download, total: {}.",
blocks_to_download.len()
);
@ -187,6 +190,7 @@ impl Syncer {
let locator = self.get_locator(&tip)?;
if let Some(p) = peer {
debug!(
LOGGER,
"Asking peer {} for more block headers starting from {} at {}.",
p.info.addr,
tip.last_block_h,
@ -194,7 +198,7 @@ impl Syncer {
);
p.send_header_request(locator)?;
} else {
warn!("Could not get most worked peer to request headers.");
warn!(LOGGER, "Could not get most worked peer to request headers.");
}
Ok(())
}

View file

@ -23,7 +23,6 @@ extern crate grin_pow as pow;
extern crate secp256k1zkp as secp;
extern crate blake2_rfc as blake2;
extern crate env_logger;
extern crate futures;
extern crate tokio_core;
extern crate tokio_timer;

View file

@ -21,7 +21,6 @@ extern crate grin_wallet as wallet;
extern crate grin_pow as pow;
extern crate secp256k1zkp as secp;
extern crate env_logger;
extern crate futures;
extern crate tokio_core;
extern crate tokio_timer;
@ -49,7 +48,6 @@ use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerCon
/// Block and mining into a wallet for a bit
#[test]
fn basic_genesis_mine() {
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "genesis_mine";
@ -80,7 +78,6 @@ fn basic_genesis_mine() {
/// messages they all end up connected.
#[test]
fn simulate_seeding() {
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "simulate_seeding";
@ -134,7 +131,6 @@ fn simulate_seeding() {
//#[test]
#[allow(dead_code)]
fn simulate_parallel_mining() {
let _ = env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "simulate_parallel_mining";
@ -190,7 +186,6 @@ fn simulate_parallel_mining() {
/// gets propagated to all.
#[test]
fn a_simulate_block_propagation() {
env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "grin-prop";
@ -262,7 +257,6 @@ fn a_simulate_block_propagation() {
/// them and check that the 2nd gets all the blocks
#[test]
fn simulate_full_sync() {
env_logger::init();
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "grin-sync";

View file

@ -8,7 +8,7 @@ workspace = ".."
bitflags = "^0.7.0"
byteorder = "^0.5"
futures = "^0.1.15"
log = "^0.3"
slog = "^2.0.12"
net2 = "0.2.0"
rand = "^0.3"
serde = "~1.0.8"
@ -24,6 +24,3 @@ num = "^0.1.36"
grin_core = { path = "../core" }
grin_store = { path = "../store" }
grin_util = { path = "../util" }
[dev-dependencies]
env_logger = "^0.3"

View file

@ -34,6 +34,7 @@ use core::ser;
use msg::*;
use types::Error;
use rate_limit::*;
use util::LOGGER;
/// Handler to provide to the connection, will be called back anytime a message
/// is received. The provided sender can be use to immediately send back
@ -42,26 +43,22 @@ pub trait Handler: Sync + Send {
/// Handle function to implement to process incoming messages. A sender to
/// reply immediately as well as the message header and its unparsed body
/// are provided.
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error>;
fn handle(&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>)
-> Result<Option<Hash>, ser::Error>;
}
impl<F> Handler for F
where
F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>)
-> Result<Option<Hash>, ser::Error>,
F: Sync + Send,
where F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>) -> Result<Option<Hash>, ser::Error>,
F: Sync + Send
{
fn handle(
&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>,
) -> Result<Option<Hash>, ser::Error> {
fn handle(&self,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
body: Vec<u8>)
-> Result<Option<Hash>, ser::Error> {
self(sender, header, body)
}
}
@ -91,12 +88,10 @@ impl Connection {
/// Start listening on the provided connection and wraps it. Does not hang
/// the current thread, instead just returns a future and the Connection
/// itself.
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (Connection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
pub fn listen<F>(conn: TcpStream,
handler: F)
-> (Connection, Box<Future<Item = (), Error = Error>>)
where F: Handler + 'static
{
let (reader, writer) = conn.split();
@ -111,9 +106,9 @@ impl Connection {
// same for closing the connection
let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
let close_conn = close_rx.for_each(|_| Ok(())).map_err(
|_| Error::ConnectionClose,
);
let close_conn = close_rx
.for_each(|_| Ok(()))
.map_err(|_| Error::ConnectionClose);
let me = Connection {
outbound_chan: tx.clone(),
@ -143,13 +138,11 @@ impl Connection {
/// Prepares the future that gets message data produced by our system and
/// sends it to the peer connection
fn write_msg<W>(
&self,
rx: UnboundedReceiver<Vec<u8>>,
writer: W,
) -> Box<Future<Item = W, Error = Error>>
where
W: AsyncWrite + 'static,
fn write_msg<W>(&self,
rx: UnboundedReceiver<Vec<u8>>,
writer: W)
-> Box<Future<Item = W, Error = Error>>
where W: AsyncWrite + 'static
{
let sent_bytes = self.sent_bytes.clone();
@ -170,15 +163,13 @@ impl Connection {
/// Prepares the future reading from the peer connection, parsing each
/// message and forwarding them appropriately based on their type
fn read_msg<F, R>(
&self,
sender: UnboundedSender<Vec<u8>>,
reader: R,
handler: F,
) -> Box<Future<Item = R, Error = Error>>
where
F: Handler + 'static,
R: AsyncRead + 'static,
fn read_msg<F, R>(&self,
sender: UnboundedSender<Vec<u8>>,
reader: R,
handler: F)
-> Box<Future<Item = R, Error = Error>>
where F: Handler + 'static,
R: AsyncRead + 'static
{
// infinite iterator stream so we repeat the message reading logic until the
@ -215,7 +206,7 @@ impl Connection {
// and handle the different message types
let msg_type = header.msg_type;
if let Err(e) = handler.handle(sender_inner.clone(), header, buf) {
debug!("Invalid {:?} message: {}", msg_type, e);
debug!(LOGGER, "Invalid {:?} message: {}", msg_type, e);
return Err(Error::Serialization(e));
}
@ -232,15 +223,12 @@ impl Connection {
let mut body_data = vec![];
try!(ser::serialize(&mut body_data, body));
let mut data = vec![];
try!(ser::serialize(
&mut data,
&MsgHeader::new(t, body_data.len() as u64),
));
try!(ser::serialize(&mut data, &MsgHeader::new(t, body_data.len() as u64)));
data.append(&mut body_data);
self.outbound_chan.unbounded_send(data).map_err(
|_| Error::ConnectionClose,
)
self.outbound_chan
.unbounded_send(data)
.map_err(|_| Error::ConnectionClose)
}
/// Bytes sent and received by this peer to the remote peer.
@ -261,12 +249,10 @@ pub struct TimeoutConnection {
impl TimeoutConnection {
/// Same as Connection
pub fn listen<F>(
conn: TcpStream,
handler: F,
) -> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
where
F: Handler + 'static,
pub fn listen<F>(conn: TcpStream,
handler: F)
-> (TimeoutConnection, Box<Future<Item = (), Error = Error>>)
where F: Handler + 'static
{
let expects = Arc::new(Mutex::new(vec![]));
@ -310,21 +296,17 @@ impl TimeoutConnection {
underlying: conn,
expected_responses: expects,
};
(
me,
Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)),
)
(me, Box::new(fut.select(timer).map(|_| ()).map_err(|(e1, _)| e1)))
}
/// Sends a request and registers a timer on the provided message type and
/// optionally the hash of the sent data.
pub fn send_request<W: ser::Writeable>(
&self,
t: Type,
rt: Type,
body: &W,
expect_h: Option<(Hash)>,
) -> Result<(), Error> {
pub fn send_request<W: ser::Writeable>(&self,
t: Type,
rt: Type,
body: &W,
expect_h: Option<(Hash)>)
-> Result<(), Error> {
let _sent = try!(self.underlying.send_msg(t, body));
let mut expects = self.expected_responses.lock().unwrap();

View file

@ -26,6 +26,7 @@ use core::ser;
use msg::*;
use types::*;
use protocol::ProtocolV1;
use util::LOGGER;
const NONCES_CAP: usize = 100;
@ -47,13 +48,12 @@ impl Handshake {
}
/// Handles connecting to a new remote peer, starting the version handshake.
pub fn connect(
&self,
capab: Capabilities,
total_difficulty: Difficulty,
self_addr: SocketAddr,
conn: TcpStream,
) -> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
pub fn connect(&self,
capab: Capabilities,
total_difficulty: Difficulty,
self_addr: SocketAddr,
conn: TcpStream)
-> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
// prepare the first part of the hanshake
let nonce = self.next_nonce();
let hand = Hand {
@ -85,7 +85,7 @@ impl Handshake {
total_difficulty: shake.total_difficulty,
};
info!("Connected to peer {:?}", peer_info);
info!(LOGGER, "Connected to peer {:?}", peer_info);
// when more than one protocol version is supported, choosing should go here
Ok((conn, ProtocolV1::new(), peer_info))
}
@ -95,12 +95,11 @@ impl Handshake {
/// Handles receiving a connection from a new remote peer that started the
/// version handshake.
pub fn handshake(
&self,
capab: Capabilities,
total_difficulty: Difficulty,
conn: TcpStream,
) -> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
pub fn handshake(&self,
capab: Capabilities,
total_difficulty: Difficulty,
conn: TcpStream)
-> Box<Future<Item = (TcpStream, ProtocolV1, PeerInfo), Error = Error>> {
let nonces = self.nonces.clone();
Box::new(
read_msg::<Hand>(conn)
@ -139,7 +138,7 @@ impl Handshake {
Ok((conn, shake, peer_info))
})
.and_then(|(conn, shake, peer_info)| {
debug!("Success handshake with {}.", peer_info.addr);
debug!(LOGGER, "Success handshake with {}.", peer_info.addr);
write_msg(conn, shake, Type::Shake)
// when more than one protocol version is supported, choosing should go here
.map(|conn| (conn, ProtocolV1::new(), peer_info))

View file

@ -29,7 +29,7 @@ extern crate grin_core as core;
extern crate grin_store;
extern crate grin_util as util;
#[macro_use]
extern crate log;
extern crate slog;
extern crate futures;
extern crate tokio_core;
extern crate tokio_io;

View file

@ -70,8 +70,7 @@ enum_from_primitive! {
/// the header first, handles its validation and then reads the Readable body,
/// allocating buffers of the right size.
pub fn read_msg<T>(conn: TcpStream) -> Box<Future<Item = (TcpStream, T), Error = Error>>
where
T: Readable + 'static,
where T: Readable + 'static
{
let read_header = read_exact(conn, vec![0u8; HEADER_LEN as usize])
.from_err()
@ -99,13 +98,11 @@ where
/// Future combinator to write a full message from a Writeable payload.
/// Serializes the payload first and then sends the message header and that
/// payload.
pub fn write_msg<T>(
conn: TcpStream,
msg: T,
msg_type: Type,
) -> Box<Future<Item = TcpStream, Error = Error>>
where
T: Writeable + 'static,
pub fn write_msg<T>(conn: TcpStream,
msg: T,
msg_type: Type)
-> Box<Future<Item = TcpStream, Error = Error>>
where T: Writeable + 'static
{
let write_msg = ok((conn)).and_then(move |conn| {
// prepare the body first so we know its serialized length
@ -226,9 +223,7 @@ impl Readable for Hand {
let receiver_addr = try!(SockAddr::read(reader));
let ua = try!(reader.read_vec());
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
ser::Error::CorruptedData,
));
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData));
Ok(Hand {
version: version,
capabilities: capabilities,
@ -275,9 +270,7 @@ impl Readable for Shake {
let total_diff = try!(Difficulty::read(reader));
let ua = try!(reader.read_vec());
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
ser::Error::CorruptedData,
));
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData));
Ok(Shake {
version: version,
capabilities: capabilities,
@ -302,9 +295,7 @@ impl Writeable for GetPeerAddrs {
impl Readable for GetPeerAddrs {
fn read(reader: &mut Reader) -> Result<GetPeerAddrs, ser::Error> {
let capab = try!(reader.read_u32());
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
ser::Error::CorruptedData,
));
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData));
Ok(GetPeerAddrs { capabilities: capabilities })
}
}
@ -361,9 +352,7 @@ impl Writeable for PeerError {
impl Readable for PeerError {
fn read(reader: &mut Reader) -> Result<PeerError, ser::Error> {
let (code, msg) = ser_multiread!(reader, read_u32, read_vec);
let message = try!(String::from_utf8(msg).map_err(
|_| ser::Error::CorruptedData,
));
let message = try!(String::from_utf8(msg).map_err(|_| ser::Error::CorruptedData));
Ok(PeerError {
code: code,
message: message,

View file

@ -23,6 +23,7 @@ use core::core::hash::Hash;
use core::core::target::Difficulty;
use handshake::Handshake;
use types::*;
use util::LOGGER;
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
enum State {
@ -42,58 +43,48 @@ unsafe impl Send for Peer {}
impl Peer {
/// Initiates the handshake with another peer.
pub fn connect(
conn: TcpStream,
capab: Capabilities,
total_difficulty: Difficulty,
self_addr: SocketAddr,
hs: &Handshake,
) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
pub fn connect(conn: TcpStream,
capab: Capabilities,
total_difficulty: Difficulty,
self_addr: SocketAddr,
hs: &Handshake)
-> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
let connect_peer = hs.connect(capab, total_difficulty, self_addr, conn)
.and_then(|(conn, proto, info)| {
Ok((
conn,
Peer {
info: info,
proto: Box::new(proto),
state: Arc::new(RwLock::new(State::Connected)),
},
))
Ok((conn,
Peer {
info: info,
proto: Box::new(proto),
state: Arc::new(RwLock::new(State::Connected)),
}))
});
Box::new(connect_peer)
}
/// Accept a handshake initiated by another peer.
pub fn accept(
conn: TcpStream,
capab: Capabilities,
total_difficulty: Difficulty,
hs: &Handshake,
) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
let hs_peer = hs.handshake(capab, total_difficulty, conn).and_then(
|(conn,
proto,
info)| {
Ok((
conn,
Peer {
info: info,
proto: Box::new(proto),
state: Arc::new(RwLock::new(State::Connected)),
},
))
},
);
pub fn accept(conn: TcpStream,
capab: Capabilities,
total_difficulty: Difficulty,
hs: &Handshake)
-> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
let hs_peer = hs.handshake(capab, total_difficulty, conn)
.and_then(|(conn, proto, info)| {
Ok((conn,
Peer {
info: info,
proto: Box::new(proto),
state: Arc::new(RwLock::new(State::Connected)),
}))
});
Box::new(hs_peer)
}
/// Main peer loop listening for messages and forwarding to the rest of the
/// system.
pub fn run(
&self,
conn: TcpStream,
na: Arc<NetAdapter>,
) -> Box<Future<Item = (), Error = Error>> {
pub fn run(&self,
conn: TcpStream,
na: Arc<NetAdapter>)
-> Box<Future<Item = (), Error = Error>> {
let addr = self.info.addr;
let state = self.state.clone();
@ -103,17 +94,17 @@ impl Peer {
match res {
Ok(_) => {
*state = State::Disconnected;
info!("Client {} disconnected.", addr);
info!(LOGGER, "Client {} disconnected.", addr);
Ok(())
}
Err(Error::Serialization(e)) => {
*state = State::Banned;
info!("Client {} corrupted, ban.", addr);
info!(LOGGER, "Client {} corrupted, ban.", addr);
Err(Error::Serialization(e))
}
Err(_) => {
*state = State::Disconnected;
info!("Client {} connection lost.", addr);
info!(LOGGER, "Client {} connection lost.", addr);
Ok(())
}
}
@ -153,12 +144,17 @@ impl Peer {
}
pub fn send_block_request(&self, h: Hash) -> Result<(), Error> {
debug!("Requesting block {} from peer {}.", h, self.info.addr);
debug!(
LOGGER,
"Requesting block {} from peer {}.",
h,
self.info.addr
);
self.proto.send_block_request(h)
}
pub fn send_peer_request(&self, capab: Capabilities) -> Result<(), Error> {
debug!("Asking {} for more peers.", self.info.addr);
debug!(LOGGER, "Asking {} for more peers.", self.info.addr);
self.proto.send_peer_request(capab)
}

View file

@ -24,6 +24,7 @@ use core::ser;
use conn::TimeoutConnection;
use msg::*;
use types::*;
use util::LOGGER;
use util::OneTime;
#[allow(dead_code)]
@ -44,11 +45,10 @@ impl ProtocolV1 {
impl Protocol for ProtocolV1 {
/// Sets up the protocol reading, writing and closing logic.
fn handle(
&self,
conn: TcpStream,
adapter: Arc<NetAdapter>,
) -> Box<Future<Item = (), Error = Error>> {
fn handle(&self,
conn: TcpStream,
adapter: Arc<NetAdapter>)
-> Box<Future<Item = (), Error = Error>> {
let (conn, listener) = TimeoutConnection::listen(conn, move |sender, header, data| {
let adapt = adapter.as_ref();
@ -114,23 +114,21 @@ impl ProtocolV1 {
self.conn.borrow().send_msg(t, body)
}
fn send_request<W: ser::Writeable>(
&self,
t: Type,
rt: Type,
body: &W,
expect_resp: Option<Hash>,
) -> Result<(), Error> {
fn send_request<W: ser::Writeable>(&self,
t: Type,
rt: Type,
body: &W,
expect_resp: Option<Hash>)
-> Result<(), Error> {
self.conn.borrow().send_request(t, rt, body, expect_resp)
}
}
fn handle_payload(
adapter: &NetAdapter,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
buf: Vec<u8>,
) -> Result<Option<Hash>, ser::Error> {
fn handle_payload(adapter: &NetAdapter,
sender: UnboundedSender<Vec<u8>>,
header: MsgHeader,
buf: Vec<u8>)
-> Result<Option<Hash>, ser::Error> {
match header.msg_type {
Type::Ping => {
let data = ser::ser_vec(&MsgHeader::new(Type::Pong, 0))?;
@ -173,10 +171,7 @@ fn handle_payload(
// serialize and send all the headers over
let mut body_data = vec![];
try!(ser::serialize(
&mut body_data,
&Headers { headers: headers },
));
try!(ser::serialize(&mut body_data, &Headers { headers: headers }));
let mut data = vec![];
try!(ser::serialize(
&mut data,
@ -220,7 +215,7 @@ fn handle_payload(
Ok(None)
}
_ => {
debug!("unknown message type {:?}", header.msg_type);
debug!(LOGGER, "unknown message type {:?}", header.msg_type);
Ok(None)
}
}

View file

@ -77,10 +77,7 @@ impl<R: AsyncRead> io::Read for ThrottledReader<R> {
// Check if Allowed
if self.allowed < 1 {
return Err(io::Error::new(
io::ErrorKind::WouldBlock,
"Reached Allowed Read Limit",
));
return Err(io::Error::new(io::ErrorKind::WouldBlock, "Reached Allowed Read Limit"));
}
// Read Max Allowed
@ -158,10 +155,7 @@ impl<W: AsyncWrite> io::Write for ThrottledWriter<W> {
// Check if Allowed
if self.allowed < 1 {
return Err(io::Error::new(
io::ErrorKind::WouldBlock,
"Reached Allowed Write Limit",
));
return Err(io::Error::new(io::ErrorKind::WouldBlock, "Reached Allowed Write Limit"));
}
// Write max allowed

View file

@ -34,6 +34,7 @@ use core::core::target::Difficulty;
use handshake::Handshake;
use peer::Peer;
use types::*;
use util::LOGGER;
/// A no-op network adapter used for testing.
pub struct DummyAdapter {}
@ -88,7 +89,7 @@ impl Server {
pub fn start(&self, h: reactor::Handle) -> Box<Future<Item = (), Error = Error>> {
let addr = SocketAddr::new(self.config.host, self.config.port);
let socket = TcpListener::bind(&addr, &h.clone()).unwrap();
warn!("P2P server started on {}", addr);
warn!(LOGGER, "P2P server started on {}", addr);
let hs = Arc::new(Handshake::new());
let peers = self.peers.clone();
@ -118,7 +119,7 @@ impl Server {
let server = peers.for_each(move |peer| {
hs.spawn(peer.then(|res| {
match res {
Err(e) => info!("Client error: {:?}", e),
Err(e) => info!(LOGGER, "Client error: {:?}", e),
_ => {}
}
futures::finished(())
@ -143,11 +144,10 @@ impl Server {
}
/// Asks the server to connect to a new peer.
pub fn connect_peer(
&self,
addr: SocketAddr,
h: reactor::Handle,
) -> Box<Future<Item = Option<Arc<Peer>>, Error = Error>> {
pub fn connect_peer(&self,
addr: SocketAddr,
h: reactor::Handle)
-> Box<Future<Item = Option<Arc<Peer>>, Error = Error>> {
if let Some(p) = self.get_peer(addr) {
// if we're already connected to the addr, just return the peer
return Box::new(future::ok(Some(p)));
@ -164,7 +164,7 @@ impl Server {
let capab = self.capabilities.clone();
let self_addr = SocketAddr::new(self.config.host, self.config.port);
debug!("{} connecting to {}", self_addr, addr);
debug!(LOGGER, "{} connecting to {}", self_addr, addr);
let socket = TcpStream::connect(&addr, &h).map_err(|e| Error::Connection(e));
let h2 = h.clone();
@ -182,7 +182,7 @@ impl Server {
})
.and_then(move |(socket, peer)| {
h2.spawn(peer.run(socket, adapter2).map_err(|e| {
error!("Peer error: {:?}", e);
error!(LOGGER, "Peer error: {:?}", e);
()
}));
Ok(Some(peer))
@ -264,7 +264,7 @@ impl Server {
for p in peers.deref() {
if p.is_connected() {
if let Err(e) = p.send_block(b) {
debug!("Error sending block to peer: {:?}", e);
debug!(LOGGER, "Error sending block to peer: {:?}", e);
}
}
}
@ -286,13 +286,11 @@ impl Server {
}
// Adds the peer built by the provided future in the peers map
fn add_to_peers<A>(
peers: Arc<RwLock<Vec<Arc<Peer>>>>,
adapter: Arc<NetAdapter>,
peer_fut: A,
) -> Box<Future<Item = Result<(TcpStream, Arc<Peer>), ()>, Error = Error>>
where
A: IntoFuture<Item = (TcpStream, Peer), Error = Error> + 'static,
fn add_to_peers<A>(peers: Arc<RwLock<Vec<Arc<Peer>>>>,
adapter: Arc<NetAdapter>,
peer_fut: A)
-> Box<Future<Item = Result<(TcpStream, Arc<Peer>), ()>, Error = Error>>
where A: IntoFuture<Item = (TcpStream, Peer), Error = Error> + 'static
{
let peer_add = peer_fut.into_future().map(move |(conn, peer)| {
adapter.peer_connected(&peer.info);
@ -305,17 +303,15 @@ where
}
// Adds a timeout to a future
fn with_timeout<T: 'static>(
fut: Box<Future<Item = Result<T, ()>, Error = Error>>,
h: &reactor::Handle,
) -> Box<Future<Item = T, Error = Error>> {
fn with_timeout<T: 'static>(fut: Box<Future<Item = Result<T, ()>, Error = Error>>,
h: &reactor::Handle)
-> Box<Future<Item = T, Error = Error>> {
let timeout = reactor::Timeout::new(Duration::new(5, 0), h).unwrap();
let timed = fut.select(timeout.map(Err).from_err()).then(
|res| match res {
let timed = fut.select(timeout.map(Err).from_err())
.then(|res| match res {
Ok((Ok(inner), _timeout)) => Ok(inner),
Ok((_, _accept)) => Err(Error::Timeout),
Err((e, _other)) => Err(e),
},
);
});
Box::new(timed)
}

View file

@ -67,10 +67,10 @@ impl Readable for PeerData {
fn read(reader: &mut Reader) -> Result<PeerData, ser::Error> {
let addr = SockAddr::read(reader)?;
let (capab, ua, fl) = ser_multiread!(reader, read_u32, read_vec, read_u8);
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
let capabilities = Capabilities::from_bits(capab).ok_or(
ser::Error::CorruptedData,
)?;
let user_agent = String::from_utf8(ua)
.map_err(|_| ser::Error::CorruptedData)?;
let capabilities = Capabilities::from_bits(capab)
.ok_or(ser::Error::CorruptedData)?;
match State::from_u8(fl) {
Some(flags) => {
Ok(PeerData {
@ -109,22 +109,18 @@ impl PeerStore {
}
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
self.db.exists(
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..],
)
self.db
.exists(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..])
}
pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> {
self.db.delete(
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..],
)
self.db
.delete(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..])
}
pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> {
let peers_iter = self.db.iter::<PeerData>(&to_key(
PEER_PREFIX,
&mut "".to_string().into_bytes(),
));
let peers_iter = self.db
.iter::<PeerData>(&to_key(PEER_PREFIX, &mut "".to_string().into_bytes()));
let mut peers = Vec::with_capacity(count);
for p in peers_iter {
if p.flags == state && p.capabilities.contains(cap) {

View file

@ -118,7 +118,7 @@ pub trait Protocol {
/// block so needs to be called withing a coroutine. Should also be called
/// only once.
fn handle(&self, conn: TcpStream, na: Arc<NetAdapter>)
-> Box<Future<Item = (), Error = Error>>;
-> Box<Future<Item = (), Error = Error>>;
/// Sends a ping message to the remote peer.
fn send_ping(&self) -> Result<(), Error>;

View file

@ -14,7 +14,6 @@
extern crate grin_core as core;
extern crate grin_p2p as p2p;
extern crate env_logger;
extern crate futures;
extern crate tokio_core;
@ -33,7 +32,6 @@ use p2p::Peer;
// followed by a ping/pong exchange to make sure the connection is live.
#[test]
fn peer_handshake() {
env_logger::init().unwrap();
let mut evtlp = Core::new().unwrap();
let handle = evtlp.handle();

View file

@ -9,9 +9,9 @@ grin_core = { path = "../core" }
grin_keychain = { path = "../keychain" }
grin_store = { path = "../store" }
grin_p2p = { path = "../p2p" }
grin_util = { path = "../util" }
secp256k1zkp = { git = "https://github.com/mimblewimble/rust-secp256k1-zkp" }
serde = "~1.0.8"
serde_derive = "~1.0.8"
time = "^0.1"
rand = "0.3"
log = "0.3"

View file

@ -31,7 +31,6 @@ extern crate rand;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate log;
extern crate blake2_rfc as blake2;
extern crate grin_core as core;
extern crate grin_keychain as keychain;

View file

@ -8,17 +8,17 @@ workspace = ".."
blake2-rfc = "~0.2.17"
rand = "^0.3"
time = "^0.1"
env_logger="^0.3.5"
log = "^0.3"
slog = "^2.0.12"
lazy_static = "~0.2.8"
serde = "~1.0.8"
serde_derive = "~1.0.8"
grin_core = { path = "../core" }
grin_util = { path = "../util" }
[dependencies.cuckoo_miner]
git = "https://github.com/mimblewimble/cuckoo-miner"
tag="grin_integration_13"
tag="barrier_test"
#path = "../../cuckoo-miner"
#uncomment this feature to turn off plugin builds
#features=["no-plugin-build"]

View file

@ -314,8 +314,8 @@ impl Miner {
/// Utility to transform a 8 bytes of a byte array into a u64.
fn u8_to_u64(p: &[u8], i: usize) -> u64 {
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 |
(p[i + 3] as u64) << 24 | (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 |
(p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56
(p[i + 3] as u64) << 24 | (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 |
(p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56
}
#[cfg(test)]
@ -324,183 +324,32 @@ mod test {
use core::core::Proof;
static V1: [u32; 42] = [
0x1fe9,
0x2050,
0x4581,
0x6322,
0x65ab,
0xb3c1,
0xc1a4,
0xe257,
0x106ae,
0x17b11,
0x202d4,
0x2705d,
0x2deb2,
0x2f80e,
0x32298,
0x34782,
0x35c5a,
0x37458,
0x38f28,
0x406b2,
0x40e34,
0x40fc6,
0x42220,
0x42d13,
0x46c0f,
0x4fd47,
0x55ad2,
0x598f7,
0x5aa8f,
0x62aa3,
0x65725,
0x65dcb,
0x671c7,
0x6eb20,
0x752fe,
0x7594f,
0x79b9c,
0x7f775,
0x81635,
0x8401c,
0x844e5,
0x89fa8,
];
static V2: [u32; 42] = [
0x2a37,
0x7557,
0xa3c3,
0xfce6,
0x1248e,
0x15837,
0x1827f,
0x18a93,
0x1a7dd,
0x1b56b,
0x1ceb4,
0x1f962,
0x1fe2a,
0x29cb9,
0x2f30e,
0x2f771,
0x336bf,
0x34355,
0x391d7,
0x39495,
0x3be0c,
0x463be,
0x4d0c2,
0x4eead,
0x50214,
0x520de,
0x52a86,
0x53818,
0x53b3b,
0x54c0b,
0x572fa,
0x5d79c,
0x5e3c2,
0x6769e,
0x6a0fe,
0x6d835,
0x6fc7c,
0x70f03,
0x79d4a,
0x7b03e,
0x81e09,
0x9bd44,
];
static V3: [u32; 42] = [
0x8158,
0x9f18,
0xc4ba,
0x108c7,
0x11caa,
0x13b82,
0x1618f,
0x1c83b,
0x1ec89,
0x24354,
0x28864,
0x2a0fb,
0x2ce50,
0x2e8fa,
0x32b36,
0x343e6,
0x34dc9,
0x36881,
0x3ffca,
0x40f79,
0x42721,
0x43b8c,
0x44b9d,
0x47ed3,
0x4cd34,
0x5278a,
0x5ab64,
0x5b4d4,
0x5d842,
0x5fa33,
0x6464e,
0x676ee,
0x685d6,
0x69df0,
0x6a5fd,
0x6bda3,
0x72544,
0x77974,
0x7908c,
0x80e67,
0x81ef4,
0x8d882,
];
static V1: [u32; 42] = [0x1fe9, 0x2050, 0x4581, 0x6322, 0x65ab, 0xb3c1, 0xc1a4, 0xe257,
0x106ae, 0x17b11, 0x202d4, 0x2705d, 0x2deb2, 0x2f80e, 0x32298,
0x34782, 0x35c5a, 0x37458, 0x38f28, 0x406b2, 0x40e34, 0x40fc6,
0x42220, 0x42d13, 0x46c0f, 0x4fd47, 0x55ad2, 0x598f7, 0x5aa8f,
0x62aa3, 0x65725, 0x65dcb, 0x671c7, 0x6eb20, 0x752fe, 0x7594f,
0x79b9c, 0x7f775, 0x81635, 0x8401c, 0x844e5, 0x89fa8];
static V2: [u32; 42] = [0x2a37, 0x7557, 0xa3c3, 0xfce6, 0x1248e, 0x15837, 0x1827f, 0x18a93,
0x1a7dd, 0x1b56b, 0x1ceb4, 0x1f962, 0x1fe2a, 0x29cb9, 0x2f30e,
0x2f771, 0x336bf, 0x34355, 0x391d7, 0x39495, 0x3be0c, 0x463be,
0x4d0c2, 0x4eead, 0x50214, 0x520de, 0x52a86, 0x53818, 0x53b3b,
0x54c0b, 0x572fa, 0x5d79c, 0x5e3c2, 0x6769e, 0x6a0fe, 0x6d835,
0x6fc7c, 0x70f03, 0x79d4a, 0x7b03e, 0x81e09, 0x9bd44];
static V3: [u32; 42] = [0x8158, 0x9f18, 0xc4ba, 0x108c7, 0x11caa, 0x13b82, 0x1618f, 0x1c83b,
0x1ec89, 0x24354, 0x28864, 0x2a0fb, 0x2ce50, 0x2e8fa, 0x32b36,
0x343e6, 0x34dc9, 0x36881, 0x3ffca, 0x40f79, 0x42721, 0x43b8c,
0x44b9d, 0x47ed3, 0x4cd34, 0x5278a, 0x5ab64, 0x5b4d4, 0x5d842,
0x5fa33, 0x6464e, 0x676ee, 0x685d6, 0x69df0, 0x6a5fd, 0x6bda3,
0x72544, 0x77974, 0x7908c, 0x80e67, 0x81ef4, 0x8d882];
// cuckoo28 at 50% edges of letter 'u'
static V4: [u32; 42] = [
0x1CBBFD,
0x2C5452,
0x520338,
0x6740C5,
0x8C6997,
0xC77150,
0xFD4972,
0x1060FA7,
0x11BFEA0,
0x1343E8D,
0x14CE02A,
0x1533515,
0x1715E61,
0x1996D9B,
0x1CB296B,
0x1FCA180,
0x209A367,
0x20AD02E,
0x23CD2E4,
0x2A3B360,
0x2DD1C0C,
0x333A200,
0x33D77BC,
0x3620C78,
0x3DD7FB8,
0x3FBFA49,
0x41BDED2,
0x4A86FD9,
0x570DE24,
0x57CAB86,
0x594B886,
0x5C74C94,
0x5DE7572,
0x60ADD6F,
0x635918B,
0x6C9E120,
0x6EFA583,
0x7394ACA,
0x7556A23,
0x77F70AA,
0x7CF750A,
0x7F60790,
];
static V4: [u32; 42] = [0x1CBBFD, 0x2C5452, 0x520338, 0x6740C5, 0x8C6997, 0xC77150, 0xFD4972,
0x1060FA7, 0x11BFEA0, 0x1343E8D, 0x14CE02A, 0x1533515, 0x1715E61,
0x1996D9B, 0x1CB296B, 0x1FCA180, 0x209A367, 0x20AD02E, 0x23CD2E4,
0x2A3B360, 0x2DD1C0C, 0x333A200, 0x33D77BC, 0x3620C78, 0x3DD7FB8,
0x3FBFA49, 0x41BDED2, 0x4A86FD9, 0x570DE24, 0x57CAB86, 0x594B886,
0x5C74C94, 0x5DE7572, 0x60ADD6F, 0x635918B, 0x6C9E120, 0x6EFA583,
0x7394ACA, 0x7556A23, 0x77F70AA, 0x7CF750A, 0x7F60790];
/// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few
/// known cycle proofs
@ -519,51 +368,28 @@ mod test {
#[test]
fn validate20_vectors() {
assert!(Cuckoo::new(&[49], 20).verify(
Proof::new(V1.to_vec().clone()),
75,
));
assert!(Cuckoo::new(&[50], 20).verify(
Proof::new(V2.to_vec().clone()),
70,
));
assert!(Cuckoo::new(&[51], 20).verify(
Proof::new(V3.to_vec().clone()),
70,
));
assert!(Cuckoo::new(&[49], 20).verify(Proof::new(V1.to_vec().clone()), 75));
assert!(Cuckoo::new(&[50], 20).verify(Proof::new(V2.to_vec().clone()), 70));
assert!(Cuckoo::new(&[51], 20).verify(Proof::new(V3.to_vec().clone()), 70));
}
#[test]
fn validate28_vectors() {
let mut test_header = [0; 32];
test_header[0] = 24;
assert!(Cuckoo::new(&test_header, 28).verify(
Proof::new(V4.to_vec().clone()),
50,
));
assert!(Cuckoo::new(&test_header, 28).verify(Proof::new(V4.to_vec().clone()), 50));
}
#[test]
fn validate_fail() {
// edge checks
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75));
assert!(!Cuckoo::new(&[49], 20).verify(
Proof::new(vec![0xffff; 42]),
75,
));
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0xffff; 42]), 75));
// wrong data for proof
assert!(!Cuckoo::new(&[50], 20).verify(
Proof::new(V1.to_vec().clone()),
75,
));
assert!(!Cuckoo::new(&[50], 20).verify(Proof::new(V1.to_vec().clone()), 75));
let mut test_header = [0; 32];
test_header[0] = 24;
assert!(!Cuckoo::new(&test_header, 20).verify(
Proof::new(
V4.to_vec().clone(),
),
50,
));
assert!(!Cuckoo::new(&test_header, 20).verify(Proof::new(V4.to_vec().clone()), 50));
}

View file

@ -34,13 +34,13 @@ extern crate time;
#[macro_use]
extern crate lazy_static;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate slog;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate grin_core as core;
extern crate grin_util as util;
extern crate cuckoo_miner;
@ -63,9 +63,7 @@ use cuckoo::{Cuckoo, Error};
pub trait MiningWorker {
/// This only sets parameters and does initialisation work now
fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self
where
Self: Sized;
fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self where Self: Sized;
/// Actually perform a mining attempt on the given input and
/// return a proof if found
@ -85,11 +83,10 @@ pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
/// Uses the much easier Cuckoo20 (mostly for
/// tests).
pub fn pow20<T: MiningWorker>(
miner: &mut T,
bh: &mut BlockHeader,
diff: Difficulty,
) -> Result<(), Error> {
pub fn pow20<T: MiningWorker>(miner: &mut T,
bh: &mut BlockHeader,
diff: Difficulty)
-> Result<(), Error> {
pow_size(miner, bh, diff, 20)
}
@ -99,7 +96,7 @@ pub fn pow20<T: MiningWorker>(
///
pub fn mine_genesis_block(miner_config: Option<types::MinerConfig>) -> Option<core::core::Block> {
info!("Starting miner loop for Genesis Block");
info!(util::LOGGER, "Starting miner loop for Genesis Block");
let mut gen = genesis::genesis();
let diff = gen.header.difficulty.clone();
@ -127,12 +124,11 @@ pub fn mine_genesis_block(miner_config: Option<types::MinerConfig>) -> Option<co
/// Mining Worker,
/// until the required difficulty target is reached. May take a while for a low
/// target...
pub fn pow_size<T: MiningWorker + ?Sized>(
miner: &mut T,
bh: &mut BlockHeader,
diff: Difficulty,
_: u32,
) -> Result<(), Error> {
pub fn pow_size<T: MiningWorker + ?Sized>(miner: &mut T,
bh: &mut BlockHeader,
diff: Difficulty,
_: u32)
-> Result<(), Error> {
let start_nonce = bh.nonce;
// if we're in production mode, try the pre-mined solution first

View file

@ -26,6 +26,7 @@ use core::global;
use core::core::Proof;
use types::MinerConfig;
use util::LOGGER;
use std::sync::Mutex;
@ -91,7 +92,7 @@ impl PluginMiner {
// Load from here instead
if let Some(ref c) = *loaded_config_ref {
debug!("Not re-loading plugin or directory.");
debug!(LOGGER, "Not re-loading plugin or directory.");
// this will load the associated plugin
let result = CuckooMiner::new(c.clone());
self.miner = Some(result.unwrap());
@ -104,6 +105,7 @@ impl PluginMiner {
if let Err(_) = result {
error!(
LOGGER,
"Unable to load cuckoo-miner plugin directory, either from configuration or [exe_path]/plugins."
);
panic!("Unable to load plugin directory... Please check configuration values");
@ -123,7 +125,12 @@ impl PluginMiner {
let mut config = CuckooMinerConfig::new();
info!("Mining plugin {} - {}", index, caps[0].full_path.clone());
info!(
LOGGER,
"Mining plugin {} - {}",
index,
caps[0].full_path.clone()
);
config.plugin_full_path = caps[0].full_path.clone();
if let Some(l) = miner_config.clone().cuckoo_miner_plugin_config {
if let Some(lp) = l[index].parameter_list.clone() {
@ -140,8 +147,8 @@ impl PluginMiner {
// this will load the associated plugin
let result = CuckooMiner::new(cuckoo_configs.clone());
if let Err(e) = result {
error!("Error initializing mining plugin: {:?}", e);
// error!("Accepted values are: {:?}", caps[0].parameters);
error!(LOGGER, "Error initializing mining plugin: {:?}", e);
// error!(LOGGER, "Accepted values are: {:?}", caps[0].parameters);
panic!("Unable to init mining plugin.");
}
@ -155,7 +162,7 @@ impl PluginMiner {
// this will load the associated plugin
let result = CuckooMiner::new(self.config.clone());
if let Err(e) = result {
error!("Error initializing mining plugin: {:?}", e);
error!(LOGGER, "Error initializing mining plugin: {:?}", e);
panic!("Unable to init mining plugin.");
}
result.unwrap()

View file

@ -14,11 +14,10 @@
//! Main for building the binary of a Grin peer-to-peer node.
#[macro_use]
extern crate slog;
extern crate clap;
extern crate daemonize;
#[macro_use]
extern crate log;
extern crate env_logger;
extern crate serde;
extern crate serde_json;
extern crate blake2_rfc as blake2;
@ -29,6 +28,7 @@ extern crate grin_wallet as wallet;
extern crate grin_keychain as keychain;
extern crate grin_config as config;
extern crate grin_core as core;
extern crate grin_util as util;
use std::thread;
use std::io::Read;
@ -42,9 +42,11 @@ use config::GlobalConfig;
use wallet::WalletConfig;
use core::global;
use keychain::Keychain;
use util::{LOGGER,init_logger};
fn start_from_config_file(mut global_config: GlobalConfig) {
info!(
LOGGER,
"Starting the Grin server from configuration file at {}",
global_config.config_file_path.unwrap().to_str().unwrap()
);
@ -67,7 +69,6 @@ fn start_from_config_file(mut global_config: GlobalConfig) {
}
fn main() {
env_logger::init().unwrap();
// First, load a global config object,
// then modify that object with any switches
@ -84,7 +85,10 @@ fn main() {
});
if global_config.using_config_file {
//initialise the logger
init_logger(global_config.members.as_mut().unwrap().logging.clone());
info!(
LOGGER,
"Using configuration file at: {}",
global_config
.config_file_path
@ -255,7 +259,7 @@ fn main() {
/// arguments
/// to build a proper configuration and runs Grin with that configuration.
fn server_command(server_args: &ArgMatches, global_config: GlobalConfig) {
info!("Starting the Grin server...");
info!(LOGGER, "Starting the Grin server...");
// just get defaults from the global config
let mut server_config = global_config.members.unwrap().server;
@ -305,8 +309,8 @@ fn server_command(server_args: &ArgMatches, global_config: GlobalConfig) {
}
});
match daemonize.start() {
Ok(_) => info!("Grin server succesfully started."),
Err(e) => error!("Error starting: {}", e),
Ok(_) => info!(LOGGER, "Grin server succesfully started."),
Err(e) => error!(LOGGER, "Error starting: {}", e),
}
}
("stop", _) => println!("TODO, just 'kill $pid' for now."),
@ -351,6 +355,7 @@ fn wallet_command(wallet_args: &ArgMatches) {
wallet::receive_json_tx(&wallet_config, &keychain, contents.as_str()).unwrap();
} else {
info!(
LOGGER,
"Starting the Grin wallet receiving daemon at {}...",
wallet_config.api_http_addr
);
@ -363,7 +368,7 @@ fn wallet_command(wallet_args: &ArgMatches) {
},
);
apis.start(wallet_config.api_http_addr).unwrap_or_else(|e| {
error!("Failed to start Grin wallet receiver: {}.", e);
error!(LOGGER, "Failed to start Grin wallet receiver: {}.", e);
});
}
}

View file

@ -7,13 +7,13 @@ workspace = ".."
[dependencies]
byteorder = "^0.5"
env_logger="^0.3.5"
log = "^0.3"
slog = "^2.0.12"
libc = "^0.2"
memmap = { git = "https://github.com/danburkert/memmap-rs" }
rocksdb = "^0.7.0"
grin_core = { path = "../core" }
grin_util = { path = "../util" }
[dev-dependencies]
env_logger="^0.3.5"
time = "^0.1"

View file

@ -22,9 +22,10 @@
extern crate byteorder;
extern crate grin_core as core;
extern crate grin_util as util;
extern crate libc;
#[macro_use]
extern crate log;
extern crate slog;
extern crate env_logger;
extern crate memmap;
extern crate rocksdb;
@ -110,9 +111,9 @@ impl Store {
/// Gets a value from the db, provided its key
pub fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>, Error> {
let db = self.rdb.read().unwrap();
db.get(key).map(|r| r.map(|o| o.to_vec())).map_err(
From::from,
)
db.get(key)
.map(|r| r.map(|o| o.to_vec()))
.map_err(From::from)
}
/// Gets a `Readable` value from the db, provided its key. Encapsulates
@ -124,11 +125,10 @@ impl Store {
/// Gets a `Readable` value from the db, provided its key, allowing to
/// extract only partial data. The underlying Readable size must align
/// accordingly. Encapsulates serialization.
pub fn get_ser_limited<T: ser::Readable>(
&self,
key: &[u8],
len: usize,
) -> Result<Option<T>, Error> {
pub fn get_ser_limited<T: ser::Readable>(&self,
key: &[u8],
len: usize)
-> Result<Option<T>, Error> {
let data = try!(self.get(key));
match data {
Some(val) => {
@ -213,16 +213,14 @@ impl<'a> Batch<'a> {
/// An iterator thad produces Readable instances back. Wraps the lower level
/// DBIterator and deserializes the returned values.
pub struct SerIterator<T>
where
T: ser::Readable,
where T: ser::Readable
{
iter: DBIterator,
_marker: PhantomData<T>,
}
impl<T> Iterator for SerIterator<T>
where
T: ser::Readable,
where T: ser::Readable
{
type Item = T;

View file

@ -29,6 +29,7 @@ use libc::{off_t as off64_t, ftruncate as ftruncate64};
use core::core::pmmr::{self, Summable, Backend, HashSum, VecBackend};
use core::ser;
use util::LOGGER;
const PMMR_DATA_FILE: &'static str = "pmmr_dat.bin";
const PMMR_RM_LOG_FILE: &'static str = "pmmr_rm_log.bin";
@ -263,8 +264,7 @@ fn include_tuple(v: &Vec<(u64, u32)>, e: u64) -> bool {
/// * A remove log tracks the positions that need to be pruned from the
/// main storage file.
pub struct PMMRBackend<T>
where
T: Summable + Clone,
where T: Summable + Clone
{
data_dir: String,
hashsum_file: AppendOnlyFile,
@ -279,16 +279,13 @@ where
}
impl<T> Backend<T> for PMMRBackend<T>
where
T: Summable + Clone,
where T: Summable + Clone
{
/// Append the provided HashSums to the backend storage.
#[allow(unused_variables)]
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> {
self.buffer.append(
position - (self.buffer_index as u64),
data.clone(),
)?;
self.buffer
.append(position - (self.buffer_index as u64), data.clone())?;
Ok(())
}
@ -322,6 +319,7 @@ where
Ok(hashsum) => Some(hashsum),
Err(e) => {
error!(
LOGGER,
"Corrupted storage, could not read an entry from sum tree store: {:?}",
e
);
@ -332,9 +330,9 @@ where
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
assert!(self.buffer.len() == 0, "Rewind on non empty buffer.");
self.remove_log.truncate(index).map_err(|e| {
format!("Could not truncate remove log: {}", e)
})?;
self.remove_log
.truncate(index)
.map_err(|e| format!("Could not truncate remove log: {}", e))?;
self.rewind = Some((position, index, self.buffer_index));
self.buffer_index = position as usize;
Ok(())
@ -346,21 +344,19 @@ where
for position in &positions {
let pos_sz = *position as usize;
if pos_sz > self.buffer_index &&
pos_sz - 1 < self.buffer_index + self.buffer.len()
{
pos_sz - 1 < self.buffer_index + self.buffer.len() {
self.buffer.remove(vec![*position], index).unwrap();
}
}
}
self.remove_log.append(positions, index).map_err(|e| {
format!("Could not write to log storage, disk full? {:?}", e)
})
self.remove_log
.append(positions, index)
.map_err(|e| format!("Could not write to log storage, disk full? {:?}", e))
}
}
impl<T> PMMRBackend<T>
where
T: Summable + Clone,
where T: Summable + Clone
{
/// Instantiates a new PMMR backend that will use the provided directly to
/// store its files.
@ -445,8 +441,7 @@ where
/// position index in db
pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> {
if !(max_len > 0 && self.remove_log.len() > max_len ||
max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES)
{
max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES) {
return Ok(());
}
@ -456,6 +451,7 @@ where
if let None = self.pruned_nodes.pruned_pos(pos.0) {
// TODO we likely can recover from this by directly jumping to 3
error!(
LOGGER,
"The remove log contains nodes that are already in the pruned \
list, a previous compaction likely failed."
);
@ -475,11 +471,8 @@ where
(pos - 1 - shift.unwrap()) * record_len
})
.collect();
self.hashsum_file.save_prune(
tmp_prune_file.clone(),
to_rm,
record_len,
)?;
self.hashsum_file
.save_prune(tmp_prune_file.clone(), to_rm, record_len)?;
// 2. update the prune list and save it in place
for &(rm_pos, _) in &self.remove_log.removed[..] {
@ -508,8 +501,7 @@ where
// Read an ordered vector of scalars from a file.
fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
where
T: ser::Readable + cmp::Ord,
where T: ser::Readable + cmp::Ord
{
let file_path = Path::new(&path);
@ -551,16 +543,16 @@ where
}
fn write_vec<T>(path: String, v: &Vec<T>) -> io::Result<()>
where
T: ser::Writeable,
where T: ser::Writeable
{
let mut file_path = File::create(&path)?;
ser::serialize(&mut file_path, v).map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("Failed to serialize data when writing to {}", path),
)
})?;
ser::serialize(&mut file_path, v)
.map_err(|_| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("Failed to serialize data when writing to {}", path),
)
})?;
Ok(())
}

View file

@ -1,5 +1,15 @@
[package]
name = "grin_util"
version = "0.1.0"
authors = ["Ignotus Peverell <igno.peverell@protonmail.com>"]
authors = ["Ignotus Peverell <igno.peverell@protonmail.com>",
"Yeastplume <yeastplume@protonmail.com>"]
workspace = ".."
[dependencies]
slog = "^2.0.12"
slog-term = "^2.2.0"
slog-async = "^2.1.0"
lazy_static = "~0.2.8"
serde = "~1.0.8"
serde_derive = "~1.0.8"

View file

@ -12,9 +12,35 @@
// See the License for the specific language governing permissions and
// limitations under the License.
/// Various low-level utilities that factor Rust patterns that are frequent
/// within the grin codebase.
//! Logging, as well as various low-level utilities that factor Rust
//! patterns that are frequent within the grin codebase.
#![deny(non_upper_case_globals)]
#![deny(non_camel_case_types)]
#![deny(non_snake_case)]
#![deny(unused_mut)]
#![warn(missing_docs)]
#[macro_use]
extern crate slog;
extern crate slog_term;
extern crate slog_async;
#[macro_use]
extern crate lazy_static;
extern crate serde;
#[macro_use]
extern crate serde_derive;
// Logging related
pub mod logger;
pub use logger::{LOGGER, init_logger};
pub mod types;
pub use types::LoggingConfig;
// other utils
use std::cell::{RefCell, Ref};
#[allow(unused_imports)]
use std::ops::Deref;
@ -22,12 +48,13 @@ use std::ops::Deref;
mod hex;
pub use hex::*;
// Encapsulation of a RefCell<Option<T>> for one-time initialization after
// construction. This implementation will purposefully fail hard if not used
// properly, for example if it's not initialized before being first used
// (borrowed).
/// Encapsulation of a RefCell<Option<T>> for one-time initialization after
/// construction. This implementation will purposefully fail hard if not used
/// properly, for example if it's not initialized before being first used
/// (borrowed).
#[derive(Clone)]
pub struct OneTime<T> {
/// inner
inner: RefCell<Option<T>>,
}

87
util/src/logger.rs Normal file
View file

@ -0,0 +1,87 @@
// Copyright 2017 The Grin Developers
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Logging wrapper to be used throughout all crates in the workspace
use std::fs::OpenOptions;
use std::sync::Mutex;
use slog::{Logger, Drain, Level, LevelFilter, Duplicate, Discard};
use slog_term;
use slog_async;
use types::{LogLevel, LoggingConfig};
fn convert_log_level(in_level:&LogLevel)->Level{
match *in_level {
LogLevel::Info => Level::Info,
LogLevel::Critical => Level::Critical,
LogLevel::Warning => Level::Warning,
LogLevel::Debug => Level::Debug,
LogLevel::Trace => Level::Trace,
LogLevel::Error => Level::Error,
}
}
lazy_static! {
/// Static Logging configuration, should only be set once, before first logging call
static ref LOGGING_CONFIG: Mutex<LoggingConfig> = Mutex::new(LoggingConfig::default());
/// And a static reference to the logger itself, accessible from all crates
pub static ref LOGGER: Logger = {
let config = LOGGING_CONFIG.lock().unwrap();
let slog_level_stdout = convert_log_level(&config.stdout_log_level);
let slog_level_file = convert_log_level(&config.file_log_level);
//Terminal output drain
let terminal_decorator = slog_term::TermDecorator::new().build();
let terminal_drain = slog_term::FullFormat::new(terminal_decorator).build().fuse();
let terminal_drain = LevelFilter::new(terminal_drain, slog_level_stdout).fuse();
let mut terminal_drain = slog_async::Async::new(terminal_drain).build().fuse();
if !config.log_to_stdout {
terminal_drain = slog_async::Async::new(Discard{}).build().fuse();
}
let mut file_drain_final = slog_async::Async::new(Discard{}).build().fuse();
if config.log_to_file {
//File drain
let file = OpenOptions::new()
.create(true)
.write(true)
.append(config.log_file_append)
.truncate(false)
.open(&config.log_file_path)
.unwrap();
let file_decorator = slog_term::PlainDecorator::new(file);
let file_drain = slog_term::FullFormat::new(file_decorator).build().fuse();
let file_drain = LevelFilter::new(file_drain, slog_level_file).fuse();
file_drain_final = slog_async::Async::new(file_drain).build().fuse();
}
//Compose file and terminal drains
let composite_drain = Duplicate::new(terminal_drain, file_drain_final).fuse();
let log = Logger::root(composite_drain, o!());
log
};
}
/// Initialises the logger with the given configuration
pub fn init_logger(config:Option<LoggingConfig>){
if let Some(c) = config {
let mut config_ref=LOGGING_CONFIG.lock().unwrap();
*config_ref=c.clone();
}
}

62
util/src/types.rs Normal file
View file

@ -0,0 +1,62 @@
// Copyright 2017 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Logging configuration types
/// Log level types, as slog's don't implement serialize
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum LogLevel {
/// Critical
Critical,
/// Error
Error,
/// Warning
Warning,
/// Info
Info,
/// Debug
Debug,
/// Trace
Trace,
}
/// Logging config
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LoggingConfig {
/// whether to log to stdout
pub log_to_stdout: bool,
/// logging level for stdout
pub stdout_log_level: LogLevel,
/// whether to log to file
pub log_to_file: bool,
/// log file level
pub file_log_level: LogLevel,
/// Log file path
pub log_file_path: String,
/// Whether to append to log or replace
pub log_file_append: bool,
}
impl Default for LoggingConfig {
fn default() -> LoggingConfig {
LoggingConfig {
log_to_stdout: true,
stdout_log_level: LogLevel::Debug,
log_to_file: false,
file_log_level: LogLevel::Trace,
log_file_path: String::from("grin.log"),
log_file_append: false,
}
}
}

View file

@ -10,7 +10,7 @@ authors = [
[dependencies]
byteorder = "1"
log = "^0.3"
slog = "^2.0.12"
rand = "^0.3"
blake2-rfc = "~0.2.17"
serde = "~1.0.8"

View file

@ -20,7 +20,6 @@ use types::*;
use keychain::Keychain;
use util;
fn refresh_output(out: &mut OutputData, api_out: Option<api::Output>, tip: &api::Tip) {
if let Some(api_out) = api_out {
out.height = api_out.height;
@ -40,26 +39,21 @@ fn refresh_output(out: &mut OutputData, api_out: Option<api::Output>, tip: &api:
/// Goes through the list of outputs that haven't been spent yet and check
/// with a node whether their status has changed.
pub fn refresh_outputs(
config: &WalletConfig,
keychain: &Keychain,
) -> Result<(), Error>{
pub fn refresh_outputs(config: &WalletConfig, keychain: &Keychain) -> Result<(), Error> {
let tip = get_tip_from_node(config)?;
WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
// check each output that's not spent
for mut out in wallet_data.outputs
.values_mut()
.filter(|out| {
out.status != OutputStatus::Spent
})
{
for mut out in wallet_data
.outputs
.values_mut()
.filter(|out| out.status != OutputStatus::Spent) {
// TODO check the pool for unconfirmed
match get_output_from_node(config, keychain, out.value, out.n_child) {
Ok(api_out) => refresh_output(&mut out, api_out, &tip),
Err(_) => {
// TODO find error with connection and return
// error!("Error contacting server node at {}. Is it running?",
// error!(LOGGER, "Error contacting server node at {}. Is it running?",
// config.check_node_api_http_addr);
}
}

View file

@ -16,10 +16,7 @@ use checker;
use keychain::Keychain;
use types::{WalletConfig, WalletData};
pub fn show_info(
config: &WalletConfig,
keychain: &Keychain,
) {
pub fn show_info(config: &WalletConfig, keychain: &Keychain) {
let fingerprint = keychain.clone().fingerprint();
let _ = checker::refresh_outputs(&config, &keychain);
@ -30,7 +27,8 @@ pub fn show_info(
println!("identifier, height, lock_height, status, value");
println!("----------------------------------");
let mut outputs = wallet_data.outputs
let mut outputs = wallet_data
.outputs
.values()
.filter(|out| out.fingerprint == fingerprint)
.collect::<Vec<_>>();

View file

@ -17,7 +17,7 @@
extern crate byteorder;
extern crate blake2_rfc as blake2;
#[macro_use]
extern crate log;
extern crate slog;
extern crate rand;
extern crate serde;
#[macro_use]

View file

@ -56,6 +56,7 @@ use api::{self, ApiEndpoint, Operation, ApiResult};
use keychain::{BlindingFactor, Keychain};
use types::*;
use util;
use util::LOGGER;
/// Dummy wrapper for the hex-encoded serialized transaction.
#[derive(Serialize, Deserialize)]
@ -66,11 +67,10 @@ pub struct TxWrapper {
/// Receive an already well formed JSON transaction issuance and finalize the
/// transaction, adding our receiving output, to broadcast to the rest of the
/// network.
pub fn receive_json_tx(
config: &WalletConfig,
keychain: &Keychain,
partial_tx_str: &str,
) -> Result<(), Error> {
pub fn receive_json_tx(config: &WalletConfig,
keychain: &Keychain,
partial_tx_str: &str)
-> Result<(), Error> {
let (amount, blinding, partial_tx) = partial_tx_from_json(keychain, partial_tx_str)?;
let final_tx = receive_transaction(config, keychain, amount, blinding, partial_tx)?;
let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap());
@ -96,10 +96,8 @@ impl ApiEndpoint for WalletReceiver {
type OP_OUT = CbData;
fn operations(&self) -> Vec<Operation> {
vec![
Operation::Custom("coinbase".to_string()),
Operation::Custom("receive_json_tx".to_string()),
]
vec![Operation::Custom("coinbase".to_string()),
Operation::Custom("receive_json_tx".to_string())]
}
fn operation(&self, op: String, input: WalletReceiveRequest) -> ApiResult<CbData> {
@ -107,29 +105,31 @@ impl ApiEndpoint for WalletReceiver {
"coinbase" => {
match input {
WalletReceiveRequest::Coinbase(cb_fees) => {
debug!("Operation {} with fees {:?}", op, cb_fees);
let (out, kern, block_fees) =
receive_coinbase(
&self.config,
&self.keychain,
&cb_fees,
).map_err(|e| {
debug!(LOGGER, "Operation {} with fees {:?}", op, cb_fees);
let (out, kern, block_fees) = receive_coinbase(
&self.config,
&self.keychain,
&cb_fees,
).map_err(|e| {
api::Error::Internal(format!("Error building coinbase: {:?}", e))
})?;
let out_bin =
ser::ser_vec(&out).map_err(|e| {
let out_bin = ser::ser_vec(&out)
.map_err(|e| {
api::Error::Internal(format!("Error serializing output: {:?}", e))
})?;
let kern_bin =
ser::ser_vec(&kern).map_err(|e| {
let kern_bin = ser::ser_vec(&kern)
.map_err(|e| {
api::Error::Internal(format!("Error serializing kernel: {:?}", e))
})?;
let pubkey_bin = match block_fees.pubkey {
Some(pubkey) => {
ser::ser_vec(&pubkey).map_err(|e| {
api::Error::Internal(format!("Error serializing kernel: {:?}", e))
})?
},
ser::ser_vec(&pubkey)
.map_err(|e| {
api::Error::Internal(
format!("Error serializing kernel: {:?}", e),
)
})?
}
None => vec![],
};
@ -139,29 +139,34 @@ impl ApiEndpoint for WalletReceiver {
pubkey: util::to_hex(pubkey_bin),
})
}
_ => Err(api::Error::Argument(
format!("Incorrect request data: {}", op),
)),
_ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))),
}
}
"receive_json_tx" => {
match input {
WalletReceiveRequest::PartialTransaction(partial_tx_str) => {
debug!("Operation {} with transaction {}", op, &partial_tx_str);
receive_json_tx(&self.config, &self.keychain, &partial_tx_str).map_err(|e| {
api::Error::Internal(format!("Error processing partial transaction: {:?}", e))
}).unwrap();
debug!(
LOGGER,
"Operation {} with transaction {}",
op,
&partial_tx_str
);
receive_json_tx(&self.config, &self.keychain, &partial_tx_str)
.map_err(|e| {
api::Error::Internal(
format!("Error processing partial transaction: {:?}", e),
)
})
.unwrap();
//TODO: Return emptiness for now, should be a proper enum return type
// TODO: Return emptiness for now, should be a proper enum return type
Ok(CbData {
output: String::from(""),
kernel: String::from(""),
pubkey: String::from(""),
})
}
_ => Err(api::Error::Argument(
format!("Incorrect request data: {}", op),
)),
_ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))),
}
}
_ => Err(api::Error::Argument(format!("Unknown operation: {}", op))),
@ -170,11 +175,10 @@ impl ApiEndpoint for WalletReceiver {
}
/// Build a coinbase output and the corresponding kernel
fn receive_coinbase(
config: &WalletConfig,
keychain: &Keychain,
block_fees: &BlockFees,
) -> Result<(Output, TxKernel, BlockFees), Error> {
fn receive_coinbase(config: &WalletConfig,
keychain: &Keychain,
block_fees: &BlockFees)
-> Result<(Output, TxKernel, BlockFees), Error> {
let fingerprint = keychain.fingerprint();
// operate within a lock on wallet data
@ -184,7 +188,7 @@ fn receive_coinbase(
Some(pubkey) => {
let derivation = keychain.derivation_from_pubkey(&pubkey)?;
(pubkey.clone(), derivation)
},
}
None => {
let derivation = wallet_data.next_child(fingerprint.clone());
let pubkey = keychain.derive_pubkey(derivation)?;
@ -203,15 +207,20 @@ fn receive_coinbase(
lock_height: 0,
});
debug!("Received coinbase and built candidate output - {}, {}, {}",
fingerprint.clone(), pubkey.fingerprint(), derivation);
debug!(
LOGGER,
"Received coinbase and built candidate output - {}, {}, {}",
fingerprint.clone(),
pubkey.fingerprint(),
derivation
);
debug!("block_fees - {:?}", block_fees);
debug!(LOGGER, "block_fees - {:?}", block_fees);
let mut block_fees = block_fees.clone();
block_fees.pubkey = Some(pubkey.clone());
debug!("block_fees updated - {:?}", block_fees);
debug!(LOGGER, "block_fees updated - {:?}", block_fees);
let (out, kern) = Block::reward_output(
&keychain,
@ -246,14 +255,16 @@ fn receive_transaction(
let fee_amount = tx_fee(partial.inputs.len(), partial.outputs.len() + 1, None);
let out_amount = amount - fee_amount;
let (tx_final, _) = build::transaction(vec![
build::initial_tx(partial),
build::with_excess(blinding),
build::output(out_amount, pubkey.clone()),
build::with_fee(fee_amount),
], keychain)?;
let (tx_final, _) = build::transaction(
vec![build::initial_tx(partial),
build::with_excess(blinding),
build::output(out_amount, pubkey.clone()),
build::with_fee(fee_amount)],
keychain,
)?;
// make sure the resulting transaction is valid (could have been lied to on excess)
// make sure the resulting transaction is valid (could have been lied to on
// excess)
tx_final.validate(&keychain.secp())?;
// track the new output and return the finalized transaction to broadcast
@ -266,9 +277,13 @@ fn receive_transaction(
height: 0,
lock_height: 0,
});
debug!("Received txn and built output - {}, {}, {}",
fingerprint.clone(), pubkey.fingerprint(), derivation);
debug!(
LOGGER,
"Received txn and built output - {}, {}, {}",
fingerprint.clone(),
pubkey.fingerprint(),
derivation
);
Ok(tx_final)
})?
}

View file

@ -19,12 +19,14 @@ use core::ser;
use keychain::{BlindingFactor, Keychain, Fingerprint, Identifier};
use receiver::TxWrapper;
use types::*;
use util::LOGGER;
use util;
/// Issue a new transaction to the provided sender by spending some of our
/// wallet
/// UTXOs. The destination can be "stdout" (for command line) or a URL to the
/// recipients wallet receiver (to be implemented).
pub fn issue_send_tx(
config: &WalletConfig,
keychain: &Keychain,
@ -43,10 +45,12 @@ pub fn issue_send_tx(
println!("{}", json_tx);
} else if &dest[..4] == "http" {
let url = format!("{}/v1/receive/receive_json_tx", &dest);
debug!("Posting partial transaction to {}", url);
debug!(LOGGER, "Posting partial transaction to {}", url);
let request = WalletReceiveRequest::PartialTransaction(json_tx);
let _: CbData = api::client::post(url.as_str(), &request)
.expect(&format!("Wallet receiver at {} unreachable, could not send transaction. Is it running?", url));
let _: CbData = api::client::post(url.as_str(), &request).expect(&format!(
"Wallet receiver at {} unreachable, could not send transaction. Is it running?",
url
));
} else {
panic!("dest not in expected format: {}", dest);
}
@ -175,15 +179,9 @@ mod test {
let keychain = Keychain::from_random_seed().unwrap();
let pk1 = keychain.derive_pubkey(1).unwrap();
let (tx, _) = transaction(
vec![output(105, pk1.clone())],
&keychain,
).unwrap();
let (tx, _) = transaction(vec![output(105, pk1.clone())], &keychain).unwrap();
let (tx2, _) = transaction(
vec![input(105, pk1.clone())],
&keychain,
).unwrap();
let (tx2, _) = transaction(vec![input(105, pk1.clone())], &keychain).unwrap();
assert_eq!(tx.outputs[0].commitment(), tx2.inputs[0].commitment());
}

View file

@ -28,6 +28,7 @@ use core::core::{Transaction, transaction};
use core::ser;
use keychain;
use util;
use util::LOGGER;
const DAT_FILE: &'static str = "wallet.dat";
const LOCK_FILE: &'static str = "wallet.lock";
@ -195,12 +196,11 @@ impl WalletData {
/// across operating systems, this just creates a lock file with a "should
/// not exist" option.
pub fn with_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error>
where
F: FnOnce(&mut WalletData) -> T,
where F: FnOnce(&mut WalletData) -> T
{
// create directory if it doesn't exist
fs::create_dir_all(data_file_dir).unwrap_or_else(|why| {
info!("! {:?}", why.kind());
info!(LOGGER, "! {:?}", why.kind());
});
let data_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, DAT_FILE);
@ -229,6 +229,7 @@ impl WalletData {
return Err(e);
}
debug!(
LOGGER,
"failed to obtain wallet.lock, retries - {}, sleeping",
retries
);
@ -266,25 +267,25 @@ impl WalletData {
/// Read the wallet data from disk.
fn read(data_file_path: &str) -> Result<WalletData, Error> {
let data_file = File::open(data_file_path).map_err(|e| {
Error::WalletData(format!("Could not open {}: {}", data_file_path, e))
})?;
serde_json::from_reader(data_file).map_err(|e| {
Error::WalletData(format!("Error reading {}: {}", data_file_path, e))
})
let data_file =
File::open(data_file_path)
.map_err(|e| Error::WalletData(format!("Could not open {}: {}", data_file_path, e)))?;
serde_json::from_reader(data_file)
.map_err(|e| Error::WalletData(format!("Error reading {}: {}", data_file_path, e)))
}
/// Write the wallet data to disk.
fn write(&self, data_file_path: &str) -> Result<(), Error> {
let mut data_file = File::create(data_file_path).map_err(|e| {
Error::WalletData(format!("Could not create {}: {}", data_file_path, e))
})?;
let res_json = serde_json::to_vec_pretty(self).map_err(|e| {
Error::WalletData(format!("Error serializing wallet data: {}", e))
})?;
data_file.write_all(res_json.as_slice()).map_err(|e| {
Error::WalletData(format!("Error writing {}: {}", data_file_path, e))
})
let mut data_file =
File::create(data_file_path)
.map_err(|e| {
Error::WalletData(format!("Could not create {}: {}", data_file_path, e))
})?;
let res_json = serde_json::to_vec_pretty(self)
.map_err(|e| Error::WalletData(format!("Error serializing wallet data: {}", e)))?;
data_file
.write_all(res_json.as_slice())
.map_err(|e| Error::WalletData(format!("Error writing {}: {}", data_file_path, e)))
}
/// Append a new output data to the wallet data.
@ -306,11 +307,10 @@ impl WalletData {
/// Select a subset of unspent outputs to spend in a transaction
/// transferring the provided amount.
pub fn select(
&self,
fingerprint: keychain::Fingerprint,
amount: u64,
) -> (Vec<OutputData>, i64) {
pub fn select(&self,
fingerprint: keychain::Fingerprint,
amount: u64)
-> (Vec<OutputData>, i64) {
let mut to_spend = vec![];
let mut input_total = 0;
@ -352,11 +352,10 @@ struct JSONPartialTx {
/// Encodes the information for a partial transaction (not yet completed by the
/// receiver) into JSON.
pub fn partial_tx_to_json(
receive_amount: u64,
blind_sum: keychain::BlindingFactor,
tx: Transaction,
) -> String {
pub fn partial_tx_to_json(receive_amount: u64,
blind_sum: keychain::BlindingFactor,
tx: Transaction)
-> String {
let partial_tx = JSONPartialTx {
amount: receive_amount,
blind_sum: util::to_hex(blind_sum.secret_key().as_ref().to_vec()),
@ -367,10 +366,9 @@ pub fn partial_tx_to_json(
/// Reads a partial transaction encoded as JSON into the amount, sum of blinding
/// factors and the transaction itself.
pub fn partial_tx_from_json(
keychain: &keychain::Keychain,
json_str: &str,
) -> Result<(u64, keychain::BlindingFactor, Transaction), Error> {
pub fn partial_tx_from_json(keychain: &keychain::Keychain,
json_str: &str)
-> Result<(u64, keychain::BlindingFactor, Transaction), Error> {
let partial_tx: JSONPartialTx = serde_json::from_str(json_str)?;
let blind_bin = util::from_hex(partial_tx.blind_sum)?;
@ -380,11 +378,10 @@ pub fn partial_tx_from_json(
let blinding = keychain::BlindingFactor::from_slice(keychain.secp(), &blind_bin[..])?;
let tx_bin = util::from_hex(partial_tx.tx)?;
let tx = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
Error::Format(
"Could not deserialize transaction, invalid format.".to_string(),
)
})?;
let tx = ser::deserialize(&mut &tx_bin[..])
.map_err(|_| {
Error::Format("Could not deserialize transaction, invalid format.".to_string())
})?;
Ok((partial_tx.amount, blinding, tx))
}