Cleanup HTTP APIs, update ports to avoid gap, rustfmt

Moved the HTTP APIs away from the REST endpoint abstraction and
to simpler Hyper handlers. Re-established all routes as v1.
Changed wallet receiver port to 13415 to avoid a gap in port
numbers.

Finally, rustfmt seems to have ignored specific files arguments,
running on everything.
This commit is contained in:
Ignotus Peverell 2017-10-31 19:32:33 -04:00
parent 05d22cb632
commit e4ebb7c7cb
No known key found for this signature in database
GPG key ID: 99CD25F39F8F8211
78 changed files with 1705 additions and 1928 deletions

View file

@ -17,7 +17,7 @@
use hyper; use hyper;
use hyper::client::Response; use hyper::client::Response;
use hyper::status::{StatusClass, StatusCode}; use hyper::status::{StatusClass, StatusCode};
use serde::{Serialize, Deserialize}; use serde::{Deserialize, Serialize};
use serde_json; use serde_json;
use rest::Error; use rest::Error;
@ -26,12 +26,14 @@ use rest::Error;
/// returns a JSON object. Handles request building, JSON deserialization and /// returns a JSON object. Handles request building, JSON deserialization and
/// response code checking. /// response code checking.
pub fn get<'a, T>(url: &'a str) -> Result<T, Error> pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
where for<'de> T: Deserialize<'de> where
for<'de> T: Deserialize<'de>,
{ {
let client = hyper::Client::new(); let client = hyper::Client::new();
let res = check_error(client.get(url).send())?; let res = check_error(client.get(url).send())?;
serde_json::from_reader(res) serde_json::from_reader(res).map_err(|e| {
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e))) Error::Internal(format!("Server returned invalid JSON: {}", e))
})
} }
/// Helper function to easily issue a HTTP POST request with the provided JSON /// Helper function to easily issue a HTTP POST request with the provided JSON
@ -39,15 +41,18 @@ pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
/// building, JSON serialization and deserialization, and response code /// building, JSON serialization and deserialization, and response code
/// checking. /// checking.
pub fn post<'a, IN, OUT>(url: &'a str, input: &IN) -> Result<OUT, Error> pub fn post<'a, IN, OUT>(url: &'a str, input: &IN) -> Result<OUT, Error>
where IN: Serialize, where
for<'de> OUT: Deserialize<'de> IN: Serialize,
for<'de> OUT: Deserialize<'de>,
{ {
let in_json = serde_json::to_string(input) let in_json = serde_json::to_string(input).map_err(|e| {
.map_err(|e| Error::Internal(format!("Could not serialize data to JSON: {}", e)))?; Error::Internal(format!("Could not serialize data to JSON: {}", e))
})?;
let client = hyper::Client::new(); let client = hyper::Client::new();
let res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?; let res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?;
serde_json::from_reader(res) serde_json::from_reader(res).map_err(|e| {
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e))) Error::Internal(format!("Server returned invalid JSON: {}", e))
})
} }
// convert hyper error and check for non success response codes // convert hyper error and check for non success response codes
@ -59,13 +64,11 @@ fn check_error(res: hyper::Result<Response>) -> Result<Response, Error> {
match response.status.class() { match response.status.class() {
StatusClass::Success => Ok(response), StatusClass::Success => Ok(response),
StatusClass::ServerError => Err(Error::Internal(format!("Server error."))), StatusClass::ServerError => Err(Error::Internal(format!("Server error."))),
StatusClass::ClientError => { StatusClass::ClientError => if response.status == StatusCode::NotFound {
if response.status == StatusCode::NotFound {
Err(Error::NotFound) Err(Error::NotFound)
} else { } else {
Err(Error::Argument(format!("Argument error"))) Err(Error::Argument(format!("Argument error")))
} },
}
_ => Err(Error::Internal(format!("Unrecognized error."))), _ => Err(Error::Internal(format!("Unrecognized error."))),
} }
} }

View file

@ -1,127 +0,0 @@
// Copyright 2016 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::{Arc, RwLock};
use std::thread;
use chain;
use core::core::Transaction;
use core::ser;
use pool;
use handlers::{UtxoHandler, ChainHandler, SumTreeHandler};
use rest::*;
use types::*;
use util;
use util::LOGGER;
/// ApiEndpoint implementation for the transaction pool, to check its status
/// and size as well as push new transactions.
#[derive(Clone)]
pub struct PoolApi<T> {
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
}
impl<T> ApiEndpoint for PoolApi<T>
where
T: pool::BlockChain + Clone + Send + Sync + 'static,
{
type ID = String;
type T = PoolInfo;
type OP_IN = TxWrapper;
type OP_OUT = ();
fn operations(&self) -> Vec<Operation> {
vec![Operation::Get, Operation::Custom("push".to_string())]
}
fn get(&self, _: String) -> ApiResult<PoolInfo> {
let pool = self.tx_pool.read().unwrap();
Ok(PoolInfo {
pool_size: pool.pool_size(),
orphans_size: pool.orphans_size(),
total_size: pool.total_size(),
})
}
fn operation(&self, _: String, input: TxWrapper) -> ApiResult<()> {
let tx_bin = util::from_hex(input.tx_hex).map_err(|_| {
Error::Argument(format!("Invalid hex in transaction wrapper."))
})?;
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
Error::Argument(
"Could not deserialize transaction, invalid format.".to_string(),
)
})?;
let source = pool::TxSource {
debug_name: "push-api".to_string(),
identifier: "?.?.?.?".to_string(),
};
info!(
LOGGER,
"Pushing transaction with {} inputs and {} outputs to pool.",
tx.inputs.len(),
tx.outputs.len()
);
self.tx_pool
.write()
.unwrap()
.add_to_memory_pool(source, tx)
.map_err(|e| {
Error::Internal(format!("Addition to transaction pool failed: {:?}", e))
})?;
Ok(())
}
}
/// Dummy wrapper for the hex-encoded serialized transaction.
#[derive(Serialize, Deserialize)]
pub struct TxWrapper {
tx_hex: String,
}
/// Start all server REST APIs. Just register all of them on a ApiServer
/// instance and runs the corresponding HTTP server.
pub fn start_rest_apis<T>(
addr: String,
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
) where
T: pool::BlockChain + Clone + Send + Sync + 'static,
{
thread::spawn(move || {
let mut apis = ApiServer::new("/v1".to_string());
apis.register_endpoint("/pool".to_string(), PoolApi {tx_pool: tx_pool});
// register a nested router at "/v2" for flexibility
// so we can experiment with raw iron handlers
let utxo_handler = UtxoHandler {chain: chain.clone()};
let chain_tip_handler = ChainHandler {chain: chain.clone()};
let sumtree_handler = SumTreeHandler {chain: chain.clone()};
let router = router!(
chain_tip: get "/chain" => chain_tip_handler,
chain_utxos: get "/chain/utxos" => utxo_handler,
sumtree_roots: get "/sumtrees/*" => sumtree_handler,
);
apis.register_handler("/v2", router);
apis.start(&addr[..]).unwrap_or_else(|e| {
error!(LOGGER, "Failed to start API HTTP server: {}.", e);
});
});
}

View file

@ -12,37 +12,45 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
use std::io::Read;
use std::sync::Arc; use std::sync::{Arc, RwLock};
use std::thread;
use iron::prelude::*; use iron::prelude::*;
use iron::Handler; use iron::Handler;
use iron::status; use iron::status;
use urlencoded::UrlEncodedQuery; use urlencoded::UrlEncodedQuery;
use serde::Serialize;
use serde_json; use serde_json;
use chain; use chain;
use core::core::Transaction;
use core::ser;
use pool;
use rest::*; use rest::*;
use types::*;
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
use types::*;
use util; use util;
use util::LOGGER; use util::LOGGER;
pub struct UtxoHandler { // Supports retrieval of multiple outputs in a single request -
pub chain: Arc<chain::Chain>, // GET /v1/chain/utxos?id=xxx,yyy,zzz
// GET /v1/chain/utxos?id=xxx&id=yyy&id=zzz
struct UtxoHandler {
chain: Arc<chain::Chain>,
} }
impl UtxoHandler { impl UtxoHandler {
fn get_utxo(&self, id: &str) -> Result<Output, Error> { fn get_utxo(&self, id: &str) -> Result<Output, Error> {
debug!(LOGGER, "getting utxo: {}", id); debug!(LOGGER, "getting utxo: {}", id);
let c = util::from_hex(String::from(id)) let c = util::from_hex(String::from(id)).map_err(|_| {
.map_err(|_| {
Error::Argument(format!("Not a valid commitment: {}", id)) Error::Argument(format!("Not a valid commitment: {}", id))
})?; })?;
let commit = Commitment::from_vec(c); let commit = Commitment::from_vec(c);
let out = self.chain.get_unspent(&commit) let out = self.chain
.get_unspent(&commit)
.map_err(|_| Error::NotFound)?; .map_err(|_| Error::NotFound)?;
let header = self.chain let header = self.chain
@ -53,11 +61,6 @@ impl UtxoHandler {
} }
} }
//
// Supports retrieval of multiple outputs in a single request -
// GET /v2/chain/utxos?id=xxx,yyy,zzz
// GET /v2/chain/utxos?id=xxx&id=yyy&id=zzz
//
impl Handler for UtxoHandler { impl Handler for UtxoHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> { fn handle(&self, req: &mut Request) -> IronResult<Response> {
let mut commitments: Vec<&str> = vec![]; let mut commitments: Vec<&str> = vec![];
@ -72,60 +75,49 @@ impl Handler for UtxoHandler {
} }
let mut utxos: Vec<Output> = vec![]; let mut utxos: Vec<Output> = vec![];
for commit in commitments { for commit in commitments {
if let Ok(out) = self.get_utxo(commit) { if let Ok(out) = self.get_utxo(commit) {
utxos.push(out); utxos.push(out);
} }
} }
json_response(&utxos)
match serde_json::to_string(&utxos) {
Ok(json) => Ok(Response::with((status::Ok, json))),
Err(_) => Ok(Response::with((status::BadRequest, ""))),
}
} }
} }
// Sum tree handler // Sum tree handler. Retrieve the roots:
// GET /v1/sumtrees/roots
pub struct SumTreeHandler { //
pub chain: Arc<chain::Chain>, // Last inserted nodes::
// GET /v1/sumtrees/lastutxos (gets last 10)
// GET /v1/sumtrees/lastutxos?n=5
// GET /v1/sumtrees/lastrangeproofs
// GET /v1/sumtrees/lastkernels
struct SumTreeHandler {
chain: Arc<chain::Chain>,
} }
impl SumTreeHandler { impl SumTreeHandler {
//gets roots // gets roots
fn get_roots(&self) -> SumTrees { fn get_roots(&self) -> SumTrees {
SumTrees::from_head(self.chain.clone()) SumTrees::from_head(self.chain.clone())
} }
// gets last n utxos inserted in to the tree // gets last n utxos inserted in to the tree
fn get_last_n_utxo(&self, distance:u64) -> Vec<SumTreeNode> { fn get_last_n_utxo(&self, distance: u64) -> Vec<SumTreeNode> {
SumTreeNode::get_last_n_utxo(self.chain.clone(), distance) SumTreeNode::get_last_n_utxo(self.chain.clone(), distance)
} }
// gets last n utxos inserted in to the tree // gets last n utxos inserted in to the tree
fn get_last_n_rangeproof(&self, distance:u64) -> Vec<SumTreeNode> { fn get_last_n_rangeproof(&self, distance: u64) -> Vec<SumTreeNode> {
SumTreeNode::get_last_n_rangeproof(self.chain.clone(), distance) SumTreeNode::get_last_n_rangeproof(self.chain.clone(), distance)
} }
// gets last n utxos inserted in to the tree // gets last n utxos inserted in to the tree
fn get_last_n_kernel(&self, distance:u64) -> Vec<SumTreeNode> { fn get_last_n_kernel(&self, distance: u64) -> Vec<SumTreeNode> {
SumTreeNode::get_last_n_kernel(self.chain.clone(), distance) SumTreeNode::get_last_n_kernel(self.chain.clone(), distance)
} }
} }
//
// Retrieve the roots:
// GET /v2/sumtrees/roots
//
// Last inserted nodes::
// GET /v2/sumtrees/lastutxos (gets last 10)
// GET /v2/sumtrees/lastutxos?n=5
// GET /v2/sumtrees/lastrangeproofs
// GET /v2/sumtrees/lastkernels
//
impl Handler for SumTreeHandler { impl Handler for SumTreeHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> { fn handle(&self, req: &mut Request) -> IronResult<Response> {
let url = req.url.clone(); let url = req.url.clone();
@ -133,40 +125,29 @@ impl Handler for SumTreeHandler {
if *path_elems.last().unwrap() == "" { if *path_elems.last().unwrap() == "" {
path_elems.pop(); path_elems.pop();
} }
//TODO: probably need to set a reasonable max limit here // TODO: probably need to set a reasonable max limit here
let mut last_n=10; let mut last_n = 10;
if let Ok(params) = req.get_ref::<UrlEncodedQuery>() { if let Ok(params) = req.get_ref::<UrlEncodedQuery>() {
if let Some(nums) = params.get("n") { if let Some(nums) = params.get("n") {
for num in nums { for num in nums {
if let Ok(n) = str::parse(num) { if let Ok(n) = str::parse(num) {
last_n=n; last_n = n;
} }
} }
} }
} }
match *path_elems.last().unwrap(){ match *path_elems.last().unwrap() {
"roots" => match serde_json::to_string_pretty(&self.get_roots()) { "roots" => json_response(&self.get_roots()),
Ok(json) => Ok(Response::with((status::Ok, json))), "lastutxos" => json_response(&self.get_last_n_utxo(last_n)),
Err(_) => Ok(Response::with((status::BadRequest, ""))), "lastrangeproofs" => json_response(&self.get_last_n_rangeproof(last_n)),
}, "lastkernels" => json_response(&self.get_last_n_kernel(last_n)),
"lastutxos" => match serde_json::to_string_pretty(&self.get_last_n_utxo(last_n)) { _ => Ok(Response::with((status::BadRequest, ""))),
Ok(json) => Ok(Response::with((status::Ok, json))),
Err(_) => Ok(Response::with((status::BadRequest, ""))),
},
"lastrangeproofs" => match serde_json::to_string_pretty(&self.get_last_n_rangeproof(last_n)) {
Ok(json) => Ok(Response::with((status::Ok, json))),
Err(_) => Ok(Response::with((status::BadRequest, ""))),
},
"lastkernels" => match serde_json::to_string_pretty(&self.get_last_n_kernel(last_n)) {
Ok(json) => Ok(Response::with((status::Ok, json))),
Err(_) => Ok(Response::with((status::BadRequest, ""))),
},_ => Ok(Response::with((status::BadRequest, "")))
} }
} }
} }
// Chain Handler // Chain handler. Get the head details.
// GET /v1/chain
pub struct ChainHandler { pub struct ChainHandler {
pub chain: Arc<chain::Chain>, pub chain: Arc<chain::Chain>,
} }
@ -177,16 +158,134 @@ impl ChainHandler {
} }
} }
//
// Get the head details
// GET /v2/chain
//
impl Handler for ChainHandler { impl Handler for ChainHandler {
fn handle(&self, _req: &mut Request) -> IronResult<Response> { fn handle(&self, _req: &mut Request) -> IronResult<Response> {
match serde_json::to_string_pretty(&self.get_tip()) { json_response(&self.get_tip())
Ok(json) => Ok(Response::with((status::Ok, json))),
Err(_) => Ok(Response::with((status::BadRequest, ""))),
}
} }
} }
// Get basic information about the transaction pool.
struct PoolInfoHandler<T> {
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
}
impl<T> Handler for PoolInfoHandler<T>
where
T: pool::BlockChain + Send + Sync + 'static,
{
fn handle(&self, _req: &mut Request) -> IronResult<Response> {
let pool = self.tx_pool.read().unwrap();
json_response(&PoolInfo {
pool_size: pool.pool_size(),
orphans_size: pool.orphans_size(),
total_size: pool.total_size(),
})
}
}
/// Dummy wrapper for the hex-encoded serialized transaction.
#[derive(Serialize, Deserialize)]
struct TxWrapper {
tx_hex: String,
}
// Push new transactions to our transaction pool, that should broadcast it
// to the network if valid.
struct PoolPushHandler<T> {
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
}
impl<T> Handler for PoolPushHandler<T>
where
T: pool::BlockChain + Send + Sync + 'static,
{
fn handle(&self, req: &mut Request) -> IronResult<Response> {
let wrapper: TxWrapper = serde_json::from_reader(req.body.by_ref())
.map_err(|e| IronError::new(e, status::BadRequest))?;
let tx_bin = util::from_hex(wrapper.tx_hex).map_err(|_| {
Error::Argument(format!("Invalid hex in transaction wrapper."))
})?;
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
Error::Argument("Could not deserialize transaction, invalid format.".to_string())
})?;
let source = pool::TxSource {
debug_name: "push-api".to_string(),
identifier: "?.?.?.?".to_string(),
};
info!(
LOGGER,
"Pushing transaction with {} inputs and {} outputs to pool.",
tx.inputs.len(),
tx.outputs.len()
);
self.tx_pool
.write()
.unwrap()
.add_to_memory_pool(source, tx)
.map_err(|e| {
Error::Internal(format!("Addition to transaction pool failed: {:?}", e))
})?;
Ok(Response::with(status::Ok))
}
}
// Utility to serialize a struct into JSON and produce a sensible IronResult
// out of it.
fn json_response<T>(s: &T) -> IronResult<Response>
where
T: Serialize,
{
match serde_json::to_string_pretty(s) {
Ok(json) => Ok(Response::with((status::Ok, json))),
Err(_) => Ok(Response::with((status::InternalServerError, ""))),
}
}
/// Start all server HTTP handlers. Register all of them with Iron
/// and runs the corresponding HTTP server.
pub fn start_rest_apis<T>(
addr: String,
chain: Arc<chain::Chain>,
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
) where
T: pool::BlockChain + Send + Sync + 'static,
{
thread::spawn(move || {
// build handlers and register them under the appropriate endpoint
let utxo_handler = UtxoHandler {
chain: chain.clone(),
};
let chain_tip_handler = ChainHandler {
chain: chain.clone(),
};
let sumtree_handler = SumTreeHandler {
chain: chain.clone(),
};
let pool_info_handler = PoolInfoHandler {
tx_pool: tx_pool.clone(),
};
let pool_push_handler = PoolPushHandler {
tx_pool: tx_pool.clone(),
};
let router = router!(
chain_tip: get "/chain" => chain_tip_handler,
chain_utxos: get "/chain/utxos" => utxo_handler,
sumtree_roots: get "/sumtrees/*" => sumtree_handler,
pool_info: get "/pool" => pool_info_handler,
pool_push: post "/pool/push" => pool_push_handler,
);
let mut apis = ApiServer::new("/v1".to_string());
apis.register_handler(router);
info!(LOGGER, "Starting HTTP API server at {}.", addr);
apis.start(&addr[..]).unwrap_or_else(|e| {
error!(LOGGER, "Failed to start API HTTP server: {}.", e);
});
});
}

View file

@ -12,31 +12,30 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
extern crate grin_core as core;
extern crate grin_chain as chain; extern crate grin_chain as chain;
extern crate grin_core as core;
extern crate grin_pool as pool; extern crate grin_pool as pool;
extern crate grin_store as store; extern crate grin_store as store;
extern crate grin_util as util; extern crate grin_util as util;
extern crate hyper; extern crate hyper;
#[macro_use]
extern crate slog;
extern crate iron; extern crate iron;
extern crate urlencoded; extern crate mount;
#[macro_use] #[macro_use]
extern crate router; extern crate router;
extern crate mount;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate urlencoded;
pub mod client; pub mod client;
mod endpoints;
mod handlers; mod handlers;
mod rest; mod rest;
mod types; mod types;
pub use endpoints::start_rest_apis; pub use handlers::start_rest_apis;
pub use types::*; pub use types::*;
pub use rest::*; pub use rest::*;

View file

@ -19,26 +19,18 @@
//! register them on a ApiServer. //! register them on a ApiServer.
use std::error; use std::error;
use std::fmt::{self, Display, Debug, Formatter}; use std::fmt::{self, Display, Formatter};
use std::io::Read;
use std::net::ToSocketAddrs; use std::net::ToSocketAddrs;
use std::string::ToString; use std::string::ToString;
use std::str::FromStr;
use std::mem; use std::mem;
use iron::prelude::*; use iron::prelude::*;
use iron::{status, headers, Listening}; use iron::{status, Listening};
use iron::method::Method;
use iron::modifiers::Header;
use iron::middleware::Handler; use iron::middleware::Handler;
use router::Router; use router::Router;
use mount::Mount; use mount::Mount;
use serde::Serialize;
use serde::de::DeserializeOwned;
use serde_json;
use store; use store;
use util::LOGGER;
/// Errors that can be returned by an ApiEndpoint implementation. /// Errors that can be returned by an ApiEndpoint implementation.
#[derive(Debug)] #[derive(Debug)]
@ -87,161 +79,6 @@ impl From<store::Error> for Error {
} }
} }
#[derive(Debug, Clone)]
#[allow(dead_code)]
pub enum Operation {
Create,
Delete,
Update,
Get,
Custom(String),
}
impl Operation {
fn to_method(&self) -> Method {
match *self {
Operation::Create => Method::Post,
Operation::Delete => Method::Delete,
Operation::Update => Method::Put,
Operation::Get => Method::Get,
Operation::Custom(_) => Method::Post,
}
}
}
pub type ApiResult<T> = ::std::result::Result<T, Error>;
/// Trait to implement to expose a service as a RESTful HTTP endpoint. Each
/// method corresponds to a specific relative URL and HTTP method following
/// basic REST principles:
///
/// * create: POST /
/// * get: GET /:id
/// * update: PUT /:id
/// * delete: DELETE /:id
///
/// The methods method defines which operation the endpoint implements, they're
/// all optional by default. It also allows the framework to automatically
/// define the OPTIONS HTTP method.
///
/// The type accepted by create and update, and returned by get, must implement
/// the serde Serialize and Deserialize traits. The identifier type returned by
/// create and accepted by all other methods must have a string representation.
pub trait ApiEndpoint: Clone + Send + Sync + 'static {
type ID: ToString + FromStr;
type T: Serialize + DeserializeOwned;
type OP_IN: Serialize + DeserializeOwned;
type OP_OUT: Serialize + DeserializeOwned;
fn operations(&self) -> Vec<Operation>;
#[allow(unused_variables)]
fn create(&self, o: Self::T) -> ApiResult<Self::ID> {
unimplemented!()
}
#[allow(unused_variables)]
fn delete(&self, id: Self::ID) -> ApiResult<()> {
unimplemented!()
}
#[allow(unused_variables)]
fn update(&self, id: Self::ID, o: Self::T) -> ApiResult<()> {
unimplemented!()
}
#[allow(unused_variables)]
fn get(&self, id: Self::ID) -> ApiResult<Self::T> {
unimplemented!()
}
#[allow(unused_variables)]
fn operation(&self, op: String, input: Self::OP_IN) -> ApiResult<Self::OP_OUT> {
unimplemented!()
}
}
// Wrapper required to define the implementation below, Rust doesn't let us
// define the parametric implementation for trait from another crate.
struct ApiWrapper<E>(E);
impl<E> Handler for ApiWrapper<E>
where E: ApiEndpoint,
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error
{
fn handle(&self, req: &mut Request) -> IronResult<Response> {
match req.method {
Method::Get => {
let res = self.0.get(extract_param(req, "id")?)?;
let res_json = serde_json::to_string(&res)
.map_err(|e| IronError::new(e, status::InternalServerError))?;
Ok(Response::with((status::Ok, res_json)))
}
Method::Put => {
let id = extract_param(req, "id")?;
let t: E::T = serde_json::from_reader(req.body.by_ref())
.map_err(|e| IronError::new(e, status::BadRequest))?;
self.0.update(id, t)?;
Ok(Response::with(status::NoContent))
}
Method::Delete => {
let id = extract_param(req, "id")?;
self.0.delete(id)?;
Ok(Response::with(status::NoContent))
}
Method::Post => {
let t: E::T = serde_json::from_reader(req.body.by_ref())
.map_err(|e| IronError::new(e, status::BadRequest))?;
let id = self.0.create(t)?;
Ok(Response::with((status::Created, id.to_string())))
}
_ => Ok(Response::with(status::MethodNotAllowed)),
}
}
}
struct OpWrapper<E> {
operation: String,
endpoint: E,
}
impl<E> Handler for OpWrapper<E>
where E: ApiEndpoint
{
fn handle(&self, req: &mut Request) -> IronResult<Response> {
let t: E::OP_IN = serde_json::from_reader(req.body.by_ref()).map_err(|e| {
IronError::new(e, status::BadRequest)
})?;
let res = self.endpoint.operation(self.operation.clone(), t);
match res {
Ok(resp) => {
let res_json = serde_json::to_string(&resp).map_err(|e| {
IronError::new(e, status::InternalServerError)
})?;
Ok(Response::with((status::Ok, res_json)))
}
Err(e) => {
error!(LOGGER, "API operation: {:?}", e);
Err(IronError::from(e))
}
}
}
}
fn extract_param<ID>(req: &mut Request, param: &'static str) -> IronResult<ID>
where ID: ToString + FromStr,
<ID as FromStr>::Err: Debug + Send + error::Error + 'static
{
let id = req.extensions
.get::<Router>()
.unwrap()
.find(param)
.unwrap_or("");
id.parse::<ID>()
.map_err(|e| IronError::new(e, status::BadRequest))
}
/// HTTP server allowing the registration of ApiEndpoint implementations. /// HTTP server allowing the registration of ApiEndpoint implementations.
pub struct ApiServer { pub struct ApiServer {
root: String, root: String,
@ -281,119 +118,7 @@ impl ApiServer {
} }
/// Registers an iron handler (via mount) /// Registers an iron handler (via mount)
pub fn register_handler<H: Handler>(&mut self, route: &str, handler: H) -> &mut Mount { pub fn register_handler<H: Handler>(&mut self, handler: H) -> &mut Mount {
self.mount.mount(route, handler) self.mount.mount(&self.root, handler)
}
/// Register a new API endpoint, providing a relative URL for the new
/// endpoint.
pub fn register_endpoint<E>(&mut self, subpath: String, endpoint: E)
where E: ApiEndpoint,
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error
{
assert_eq!(subpath.chars().nth(0).unwrap(), '/');
// declare a route for each method actually implemented by the endpoint
let route_postfix = &subpath[1..];
let root = self.root.clone() + &subpath;
for op in endpoint.operations() {
let route_name = format!("{:?}_{}", op, route_postfix);
// special case of custom operations
if let Operation::Custom(op_s) = op.clone() {
let wrapper = OpWrapper {
operation: op_s.clone(),
endpoint: endpoint.clone(),
};
let full_path = format!("{}/{}", root.clone(), op_s.clone());
self.router
.route(op.to_method(), full_path.clone(), wrapper, route_name);
info!(LOGGER, "route: POST {}", full_path);
} else {
// regular REST operations
let full_path = match op.clone() {
Operation::Get => root.clone() + "/:id",
Operation::Update => root.clone() + "/:id",
Operation::Delete => root.clone() + "/:id",
Operation::Create => root.clone(),
_ => panic!("unreachable"),
};
let wrapper = ApiWrapper(endpoint.clone());
self.router
.route(op.to_method(), full_path.clone(), wrapper, route_name);
info!(LOGGER, "route: {} {}", op.to_method(), full_path);
}
}
// support for the HTTP Options method by differentiating what's on the
// root resource vs the id resource
let (root_opts, sub_opts) = endpoint
.operations()
.iter()
.fold((vec![], vec![]), |mut acc, op| {
let m = op.to_method();
if m == Method::Post {
acc.0.push(m);
} else {
acc.1.push(m);
}
acc
});
self.router.options(
root.clone(),
move |_: &mut Request| {
Ok(Response::with((status::Ok, Header(headers::Allow(root_opts.clone())))))
},
"option_".to_string() + route_postfix,
);
self.router.options(
root.clone() + "/:id",
move |_: &mut Request| {
Ok(Response::with((status::Ok, Header(headers::Allow(sub_opts.clone())))))
},
"option_id_".to_string() + route_postfix,
);
}
}
#[cfg(test)]
mod test {
use super::*;
#[derive(Serialize, Deserialize)]
pub struct Animal {
name: String,
legs: u32,
lethal: bool,
}
#[derive(Clone)]
pub struct TestApi;
impl ApiEndpoint for TestApi {
type ID = String;
type T = Animal;
type OP_IN = ();
type OP_OUT = ();
fn operations(&self) -> Vec<Operation> {
vec![Operation::Get]
}
fn get(&self, name: String) -> ApiResult<Animal> {
Ok(Animal {
name: name,
legs: 4,
lethal: false,
})
}
}
#[test]
fn req_chain_json() {
let mut apis = ApiServer::new("/v1".to_string());
apis.register_endpoint("/animal".to_string(), TestApi);
} }
} }

View file

@ -59,7 +59,7 @@ pub struct SumTrees {
impl SumTrees { impl SumTrees {
pub fn from_head(head: Arc<chain::Chain>) -> SumTrees { pub fn from_head(head: Arc<chain::Chain>) -> SumTrees {
let roots=head.get_sumtree_roots(); let roots = head.get_sumtree_roots();
SumTrees { SumTrees {
utxo_root_hash: util::to_hex(roots.0.hash.to_vec()), utxo_root_hash: util::to_hex(roots.0.hash.to_vec()),
utxo_root_sum: util::to_hex(roots.0.sum.commit.0.to_vec()), utxo_root_sum: util::to_hex(roots.0.sum.commit.0.to_vec()),
@ -80,8 +80,7 @@ pub struct SumTreeNode {
} }
impl SumTreeNode { impl SumTreeNode {
pub fn get_last_n_utxo(chain: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
pub fn get_last_n_utxo(chain: Arc<chain::Chain>, distance:u64) -> Vec<SumTreeNode> {
let mut return_vec = Vec::new(); let mut return_vec = Vec::new();
let last_n = chain.get_last_n_utxo(distance); let last_n = chain.get_last_n_utxo(distance);
for elem_output in last_n { for elem_output in last_n {
@ -101,7 +100,7 @@ impl SumTreeNode {
return_vec return_vec
} }
pub fn get_last_n_rangeproof(head: Arc<chain::Chain>, distance:u64) -> Vec<SumTreeNode> { pub fn get_last_n_rangeproof(head: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
let mut return_vec = Vec::new(); let mut return_vec = Vec::new();
let last_n = head.get_last_n_rangeproof(distance); let last_n = head.get_last_n_rangeproof(distance);
for elem in last_n { for elem in last_n {
@ -113,7 +112,7 @@ impl SumTreeNode {
return_vec return_vec
} }
pub fn get_last_n_kernel(head: Arc<chain::Chain>, distance:u64) -> Vec<SumTreeNode> { pub fn get_last_n_kernel(head: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
let mut return_vec = Vec::new(); let mut return_vec = Vec::new();
let last_n = head.get_last_n_kernel(distance); let last_n = head.get_last_n_kernel(distance);
for elem in last_n { for elem in last_n {
@ -149,9 +148,10 @@ pub struct Output {
impl Output { impl Output {
pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> Output { pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> Output {
let (output_type, lock_height) = match output.features { let (output_type, lock_height) = match output.features {
x if x.contains(core::transaction::COINBASE_OUTPUT) => { x if x.contains(core::transaction::COINBASE_OUTPUT) => (
(OutputType::Coinbase, block_header.height + global::coinbase_maturity()) OutputType::Coinbase,
} block_header.height + global::coinbase_maturity(),
),
_ => (OutputType::Transaction, 0), _ => (OutputType::Transaction, 0),
}; };
@ -165,12 +165,13 @@ impl Output {
} }
} }
//As above, except formatted a bit better for human viewing // As above, except formatted a bit better for human viewing
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
pub struct OutputPrintable { pub struct OutputPrintable {
/// The type of output Coinbase|Transaction /// The type of output Coinbase|Transaction
pub output_type: OutputType, pub output_type: OutputType,
/// The homomorphic commitment representing the output's amount (as hex string) /// The homomorphic commitment representing the output's amount (as hex
/// string)
pub commit: String, pub commit: String,
/// The height of the block creating this output /// The height of the block creating this output
pub height: u64, pub height: u64,
@ -185,9 +186,10 @@ pub struct OutputPrintable {
impl OutputPrintable { impl OutputPrintable {
pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> OutputPrintable { pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> OutputPrintable {
let (output_type, lock_height) = match output.features { let (output_type, lock_height) = match output.features {
x if x.contains(core::transaction::COINBASE_OUTPUT) => { x if x.contains(core::transaction::COINBASE_OUTPUT) => (
(OutputType::Coinbase, block_header.height + global::coinbase_maturity()) OutputType::Coinbase,
} block_header.height + global::coinbase_maturity(),
),
_ => (OutputType::Transaction, 0), _ => (OutputType::Transaction, 0),
}; };
OutputPrintable { OutputPrintable {

View file

@ -20,8 +20,8 @@ use std::sync::{Arc, Mutex, RwLock};
use util::secp::pedersen::{Commitment, RangeProof}; use util::secp::pedersen::{Commitment, RangeProof};
use core::core::{SumCommit}; use core::core::SumCommit;
use core::core::pmmr::{NoSum, HashSum}; use core::core::pmmr::{HashSum, NoSum};
use core::core::{Block, BlockHeader, Output, TxKernel}; use core::core::{Block, BlockHeader, Output, TxKernel};
use core::core::target::Difficulty; use core::core::target::Difficulty;
@ -119,7 +119,9 @@ impl Chain {
/// has been added to the longest chain, None if it's added to an (as of /// has been added to the longest chain, None if it's added to an (as of
/// now) orphan chain. /// now) orphan chain.
pub fn process_block(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> { pub fn process_block(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
let head = self.store.head().map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?; let head = self.store
.head()
.map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?;
let height = head.height; let height = head.height;
let ctx = self.ctx_from_head(head, opts); let ctx = self.ctx_from_head(head, opts);
@ -143,13 +145,11 @@ impl Chain {
self.check_orphans(); self.check_orphans();
} }
Ok(None) => {} Ok(None) => {}
Err(Error::Orphan) => { Err(Error::Orphan) => if b.header.height < height + (MAX_ORPHANS as u64) {
if b.header.height < height + (MAX_ORPHANS as u64) {
let mut orphans = self.orphans.lock().unwrap(); let mut orphans = self.orphans.lock().unwrap();
orphans.push_front((opts, b)); orphans.push_front((opts, b));
orphans.truncate(MAX_ORPHANS); orphans.truncate(MAX_ORPHANS);
} },
}
Err(ref e) => { Err(ref e) => {
info!( info!(
LOGGER, LOGGER,
@ -171,8 +171,9 @@ impl Chain {
bh: &BlockHeader, bh: &BlockHeader,
opts: Options, opts: Options,
) -> Result<Option<Tip>, Error> { ) -> Result<Option<Tip>, Error> {
let head = self.store
let head = self.store.get_header_head().map_err(|e| Error::StoreErr(e, "chain header head".to_owned()))?; .get_header_head()
.map_err(|e| Error::StoreErr(e, "chain header head".to_owned()))?;
let ctx = self.ctx_from_head(head, opts); let ctx = self.ctx_from_head(head, opts);
pipe::process_block_header(bh, ctx) pipe::process_block_header(bh, ctx)
@ -227,9 +228,9 @@ impl Chain {
let sumtrees = self.sumtrees.read().unwrap(); let sumtrees = self.sumtrees.read().unwrap();
let is_unspent = sumtrees.is_unspent(output_ref)?; let is_unspent = sumtrees.is_unspent(output_ref)?;
if is_unspent { if is_unspent {
self.store.get_output_by_commit(output_ref).map_err(|e| self.store
Error::StoreErr(e, "chain get unspent".to_owned()) .get_output_by_commit(output_ref)
) .map_err(|e| Error::StoreErr(e, "chain get unspent".to_owned()))
} else { } else {
Err(Error::OutputNotFound) Err(Error::OutputNotFound)
} }
@ -254,9 +255,13 @@ impl Chain {
} }
/// returs sumtree roots /// returs sumtree roots
pub fn get_sumtree_roots(&self) -> (HashSum<SumCommit>, pub fn get_sumtree_roots(
&self,
) -> (
HashSum<SumCommit>,
HashSum<NoSum<RangeProof>>, HashSum<NoSum<RangeProof>>,
HashSum<NoSum<TxKernel>>) { HashSum<NoSum<TxKernel>>,
) {
let mut sumtrees = self.sumtrees.write().unwrap(); let mut sumtrees = self.sumtrees.write().unwrap();
sumtrees.roots() sumtrees.roots()
} }
@ -264,10 +269,10 @@ impl Chain {
/// returns the last n nodes inserted into the utxo sum tree /// returns the last n nodes inserted into the utxo sum tree
/// returns sum tree hash plus output itself (as the sum is contained /// returns sum tree hash plus output itself (as the sum is contained
/// in the output anyhow) /// in the output anyhow)
pub fn get_last_n_utxo(&self, distance: u64) -> Vec<(Hash, Output)>{ pub fn get_last_n_utxo(&self, distance: u64) -> Vec<(Hash, Output)> {
let mut sumtrees = self.sumtrees.write().unwrap(); let mut sumtrees = self.sumtrees.write().unwrap();
let mut return_vec = Vec::new(); let mut return_vec = Vec::new();
let sum_nodes=sumtrees.last_n_utxo(distance); let sum_nodes = sumtrees.last_n_utxo(distance);
for sum_commit in sum_nodes { for sum_commit in sum_nodes {
let output = self.store.get_output_by_commit(&sum_commit.sum.commit); let output = self.store.get_output_by_commit(&sum_commit.sum.commit);
return_vec.push((sum_commit.hash, output.unwrap())); return_vec.push((sum_commit.hash, output.unwrap()));
@ -276,13 +281,13 @@ impl Chain {
} }
/// as above, for rangeproofs /// as above, for rangeproofs
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<HashSum<NoSum<RangeProof>>>{ pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<HashSum<NoSum<RangeProof>>> {
let mut sumtrees = self.sumtrees.write().unwrap(); let mut sumtrees = self.sumtrees.write().unwrap();
sumtrees.last_n_rangeproof(distance) sumtrees.last_n_rangeproof(distance)
} }
/// as above, for kernels /// as above, for kernels
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<HashSum<NoSum<TxKernel>>>{ pub fn get_last_n_kernel(&self, distance: u64) -> Vec<HashSum<NoSum<TxKernel>>> {
let mut sumtrees = self.sumtrees.write().unwrap(); let mut sumtrees = self.sumtrees.write().unwrap();
sumtrees.last_n_kernel(distance) sumtrees.last_n_kernel(distance)
} }
@ -299,24 +304,30 @@ impl Chain {
/// Block header for the chain head /// Block header for the chain head
pub fn head_header(&self) -> Result<BlockHeader, Error> { pub fn head_header(&self) -> Result<BlockHeader, Error> {
self.store.head_header().map_err(|e| Error::StoreErr(e, "chain head header".to_owned())) self.store
.head_header()
.map_err(|e| Error::StoreErr(e, "chain head header".to_owned()))
} }
/// Gets a block header by hash /// Gets a block header by hash
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> { pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
self.store.get_block(h).map_err(|e| Error::StoreErr(e, "chain get block".to_owned())) self.store
.get_block(h)
.map_err(|e| Error::StoreErr(e, "chain get block".to_owned()))
} }
/// Gets a block header by hash /// Gets a block header by hash
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> { pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
self.store.get_block_header(h).map_err(|e| Error::StoreErr(e, "chain get header".to_owned())) self.store
.get_block_header(h)
.map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
} }
/// Gets the block header at the provided height /// Gets the block header at the provided height
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> { pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
self.store.get_header_by_height(height).map_err(|e| self.store.get_header_by_height(height).map_err(|e| {
Error::StoreErr(e, "chain get header by height".to_owned()), Error::StoreErr(e, "chain get header by height".to_owned())
) })
} }
/// Gets the block header by the provided output commitment /// Gets the block header by the provided output commitment
@ -331,7 +342,9 @@ impl Chain {
/// Get the tip of the header chain /// Get the tip of the header chain
pub fn get_header_head(&self) -> Result<Tip, Error> { pub fn get_header_head(&self) -> Result<Tip, Error> {
self.store.get_header_head().map_err(|e |Error::StoreErr(e, "chain get header head".to_owned())) self.store
.get_header_head()
.map_err(|e| Error::StoreErr(e, "chain get header head".to_owned()))
} }
/// Builds an iterator on blocks starting from the current chain head and /// Builds an iterator on blocks starting from the current chain head and

View file

@ -23,16 +23,16 @@
#[macro_use] #[macro_use]
extern crate bitflags; extern crate bitflags;
extern crate byteorder; extern crate byteorder;
#[macro_use]
extern crate slog;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate time; extern crate time;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_util as util;
extern crate grin_store; extern crate grin_store;
extern crate grin_util as util;
mod chain; mod chain;
pub mod pipe; pub mod pipe;
@ -43,4 +43,4 @@ pub mod types;
// Re-export the base interface // Re-export the base interface
pub use chain::Chain; pub use chain::Chain;
pub use types::{ChainStore, Tip, ChainAdapter, SYNC, NONE, SKIP_POW, EASY_POW, Options, Error}; pub use types::{ChainAdapter, ChainStore, Error, Options, Tip, EASY_POW, NONE, SKIP_POW, SYNC};

View file

@ -21,7 +21,7 @@ use time;
use core::consensus; use core::consensus;
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::core::{BlockHeader, Block}; use core::core::{Block, BlockHeader};
use core::core::transaction; use core::core::transaction;
use types::*; use types::*;
use store; use store;
@ -68,13 +68,13 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
let mut sumtrees = local_sumtrees.write().unwrap(); let mut sumtrees = local_sumtrees.write().unwrap();
// update head now that we're in the lock // update head now that we're in the lock
ctx.head = ctx.store.head(). ctx.head = ctx.store
map_err(|e| Error::StoreErr(e, "pipe reload head".to_owned()))?; .head()
.map_err(|e| Error::StoreErr(e, "pipe reload head".to_owned()))?;
// start a chain extension unit of work dependent on the success of the // start a chain extension unit of work dependent on the success of the
// internal validation and saving operations // internal validation and saving operations
sumtree::extending(&mut sumtrees, |mut extension| { sumtree::extending(&mut sumtrees, |mut extension| {
validate_block(b, &mut ctx, &mut extension)?; validate_block(b, &mut ctx, &mut extension)?;
debug!( debug!(
LOGGER, LOGGER,
@ -94,7 +94,6 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
/// Process the block header /// Process the block header
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> { pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
info!( info!(
LOGGER, LOGGER,
"Starting validation pipeline for block header {} at {}.", "Starting validation pipeline for block header {} at {}.",
@ -147,8 +146,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
return Err(Error::InvalidBlockVersion(header.version)); return Err(Error::InvalidBlockVersion(header.version));
} }
if header.timestamp > if header.timestamp
time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64)) > time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
{ {
// refuse blocks more than 12 blocks intervals in future (as in bitcoin) // refuse blocks more than 12 blocks intervals in future (as in bitcoin)
// TODO add warning in p2p code if local time is too different from peers // TODO add warning in p2p code if local time is too different from peers
@ -168,9 +167,9 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
} }
// first I/O cost, better as late as possible // first I/O cost, better as late as possible
let prev = try!(ctx.store.get_block_header(&header.previous).map_err(|e| let prev = try!(ctx.store.get_block_header(&header.previous,).map_err(|e| {
Error::StoreErr(e, format!("previous block header {}", header.previous)), Error::StoreErr(e, format!("previous block header {}", header.previous))
)); },));
if header.height != prev.height + 1 { if header.height != prev.height + 1 {
return Err(Error::InvalidBlockHeight); return Err(Error::InvalidBlockHeight);
@ -189,9 +188,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
} }
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone()); let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
let difficulty = consensus::next_difficulty(diff_iter).map_err(|e| { let difficulty =
Error::Other(e.to_string()) consensus::next_difficulty(diff_iter).map_err(|e| Error::Other(e.to_string()))?;
})?;
if header.difficulty < difficulty { if header.difficulty < difficulty {
return Err(Error::DifficultyTooLow); return Err(Error::DifficultyTooLow);
} }
@ -219,7 +217,6 @@ fn validate_block(
// standard head extension // standard head extension
ext.apply_block(b)?; ext.apply_block(b)?;
} else { } else {
// extending a fork, first identify the block where forking occurred // extending a fork, first identify the block where forking occurred
// keeping the hashes of blocks along the fork // keeping the hashes of blocks along the fork
let mut current = b.header.previous; let mut current = b.header.previous;
@ -241,11 +238,7 @@ fn validate_block(
if forked_block.header.height > 0 { if forked_block.header.height > 0 {
let last_output = &forked_block.outputs[forked_block.outputs.len() - 1]; let last_output = &forked_block.outputs[forked_block.outputs.len() - 1];
let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1]; let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1];
ext.rewind( ext.rewind(forked_block.header.height, last_output, last_kernel)?;
forked_block.header.height,
last_output,
last_kernel,
)?;
} }
// apply all forked blocks, including this new one // apply all forked blocks, including this new one
@ -257,10 +250,9 @@ fn validate_block(
} }
let (utxo_root, rproof_root, kernel_root) = ext.roots(); let (utxo_root, rproof_root, kernel_root) = ext.roots();
if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root || if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root
kernel_root.hash != b.header.kernel_root || kernel_root.hash != b.header.kernel_root
{ {
ext.dump(false); ext.dump(false);
return Err(Error::InvalidRoot); return Err(Error::InvalidRoot);
} }
@ -269,12 +261,9 @@ fn validate_block(
for input in &b.inputs { for input in &b.inputs {
if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) { if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) {
if output.features.contains(transaction::COINBASE_OUTPUT) { if output.features.contains(transaction::COINBASE_OUTPUT) {
if let Ok(output_header) = if let Ok(output_header) = ctx.store
ctx.store.get_block_header_by_output_commit( .get_block_header_by_output_commit(&input.commitment())
&input.commitment(),
)
{ {
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction // TODO - make sure we are not off-by-1 here vs. the equivalent tansaction
// validation rule // validation rule
if b.header.height <= output_header.height + global::coinbase_maturity() { if b.header.height <= output_header.height + global::coinbase_maturity() {
@ -290,12 +279,16 @@ fn validate_block(
/// Officially adds the block to our chain. /// Officially adds the block to our chain.
fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> { fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
ctx.store.save_block(b).map_err(|e| Error::StoreErr(e, "pipe save block".to_owned())) ctx.store
.save_block(b)
.map_err(|e| Error::StoreErr(e, "pipe save block".to_owned()))
} }
/// Officially adds the block header to our header chain. /// Officially adds the block header to our header chain.
fn add_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> { fn add_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
ctx.store.save_block_header(bh).map_err(|e| Error::StoreErr(e, "pipe save header".to_owned())) ctx.store
.save_block_header(bh)
.map_err(|e| Error::StoreErr(e, "pipe save header".to_owned()))
} }
/// Directly updates the head if we've just appended a new block to it or handle /// Directly updates the head if we've just appended a new block to it or handle
@ -306,20 +299,30 @@ fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error>
// when extending the head), update it // when extending the head), update it
let tip = Tip::from_block(&b.header); let tip = Tip::from_block(&b.header);
if tip.total_difficulty > ctx.head.total_difficulty { if tip.total_difficulty > ctx.head.total_difficulty {
// update the block height index // update the block height index
ctx.store.setup_height(&b.header).map_err(|e| Error::StoreErr(e, "pipe setup height".to_owned()))?; ctx.store
.setup_height(&b.header)
.map_err(|e| Error::StoreErr(e, "pipe setup height".to_owned()))?;
// in sync mode, only update the "body chain", otherwise update both the // in sync mode, only update the "body chain", otherwise update both the
// "header chain" and "body chain", updating the header chain in sync resets // "header chain" and "body chain", updating the header chain in sync resets
// all additional "future" headers we've received // all additional "future" headers we've received
if ctx.opts.intersects(SYNC) { if ctx.opts.intersects(SYNC) {
ctx.store.save_body_head(&tip).map_err(|e| Error::StoreErr(e, "pipe save body".to_owned()))?; ctx.store
.save_body_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save body".to_owned()))?;
} else { } else {
ctx.store.save_head(&tip).map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?; ctx.store
.save_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?;
} }
ctx.head = tip.clone(); ctx.head = tip.clone();
info!(LOGGER, "Updated head to {} at {}.", b.hash(), b.header.height); info!(
LOGGER,
"Updated head to {} at {}.",
b.hash(),
b.header.height
);
Ok(Some(tip)) Ok(Some(tip))
} else { } else {
Ok(None) Ok(None)
@ -334,7 +337,9 @@ fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option
// when extending the head), update it // when extending the head), update it
let tip = Tip::from_block(bh); let tip = Tip::from_block(bh);
if tip.total_difficulty > ctx.head.total_difficulty { if tip.total_difficulty > ctx.head.total_difficulty {
ctx.store.save_header_head(&tip).map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?; ctx.store
.save_header_head(&tip)
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
ctx.head = tip.clone(); ctx.head = tip.clone();
info!( info!(

View file

@ -23,7 +23,7 @@ use core::core::hash::{Hash, Hashed};
use core::core::{Block, BlockHeader, Output}; use core::core::{Block, BlockHeader, Output};
use core::consensus::TargetError; use core::consensus::TargetError;
use core::core::target::Difficulty; use core::core::target::Difficulty;
use grin_store::{self, Error, to_key, u64_to_key, option_to_not_found}; use grin_store::{self, option_to_not_found, to_key, Error, u64_to_key};
const STORE_SUBPATH: &'static str = "chain"; const STORE_SUBPATH: &'static str = "chain";
@ -85,9 +85,10 @@ impl ChainStore for ChainKVStore {
} }
fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> { fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
option_to_not_found(self.db.get_ser( option_to_not_found(
&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec()), self.db
)) .get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
)
} }
fn check_block_exists(&self, h: &Hash) -> Result<bool, Error> { fn check_block_exists(&self, h: &Hash) -> Result<bool, Error> {
@ -111,16 +112,14 @@ impl ChainStore for ChainKVStore {
&to_key( &to_key(
OUTPUT_COMMIT_PREFIX, OUTPUT_COMMIT_PREFIX,
&mut out.commitment().as_ref().to_vec(), &mut out.commitment().as_ref().to_vec(),
) )[..],
[..],
out, out,
)? )?
.put_ser( .put_ser(
&to_key( &to_key(
HEADER_BY_OUTPUT_PREFIX, HEADER_BY_OUTPUT_PREFIX,
&mut out.commitment().as_ref().to_vec(), &mut out.commitment().as_ref().to_vec(),
) )[..],
[..],
&b.hash(), &b.hash(),
)?; )?;
} }
@ -172,10 +171,10 @@ impl ChainStore for ChainKVStore {
} }
fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, Error> { fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, Error> {
option_to_not_found(self.db.get_ser(&to_key( option_to_not_found(
OUTPUT_COMMIT_PREFIX, self.db
&mut commit.as_ref().to_vec(), .get_ser(&to_key(OUTPUT_COMMIT_PREFIX, &mut commit.as_ref().to_vec())),
))) )
} }
fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), Error> { fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), Error> {
@ -186,10 +185,10 @@ impl ChainStore for ChainKVStore {
} }
fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> { fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
option_to_not_found(self.db.get_ser(&to_key( option_to_not_found(
COMMIT_POS_PREFIX, self.db
&mut commit.as_ref().to_vec(), .get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())),
))) )
} }
fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> { fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> {
@ -200,10 +199,10 @@ impl ChainStore for ChainKVStore {
} }
fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, Error> { fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, Error> {
option_to_not_found(self.db.get_ser(&to_key( option_to_not_found(
KERNEL_POS_PREFIX, self.db
&mut excess.as_ref().to_vec(), .get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())),
))) )
} }
/// Maintain consistency of the "header_by_height" index by traversing back /// Maintain consistency of the "header_by_height" index by traversing back
@ -213,10 +212,8 @@ impl ChainStore for ChainKVStore {
/// that is consistent with its height (everything prior to this will be /// that is consistent with its height (everything prior to this will be
/// consistent) /// consistent)
fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> { fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> {
self.db.put_ser( self.db
&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), .put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), bh)?;
bh,
)?;
if bh.height == 0 { if bh.height == 0 {
return Ok(()); return Ok(());
} }

View file

@ -24,7 +24,7 @@ use util::secp;
use util::secp::pedersen::{RangeProof, Commitment}; use util::secp::pedersen::{RangeProof, Commitment};
use core::core::{Block, Output, SumCommit, TxKernel}; use core::core::{Block, Output, SumCommit, TxKernel};
use core::core::pmmr::{Summable, NoSum, PMMR, HashSum, Backend}; use core::core::pmmr::{Backend, HashSum, NoSum, Summable, PMMR};
use grin_store; use grin_store;
use grin_store::sumtree::PMMRBackend; use grin_store::sumtree::PMMRBackend;
use types::ChainStore; use types::ChainStore;
@ -121,7 +121,11 @@ impl SumTrees {
/// Get sum tree roots /// Get sum tree roots
pub fn roots( pub fn roots(
&mut self, &mut self,
) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) { ) -> (
HashSum<SumCommit>,
HashSum<NoSum<RangeProof>>,
HashSum<NoSum<TxKernel>>,
) {
let output_pmmr = PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos); let output_pmmr = PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
let rproof_pmmr = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos); let rproof_pmmr = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
let kernel_pmmr = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos); let kernel_pmmr = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
@ -140,7 +144,6 @@ pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result<T, Error
where where
F: FnOnce(&mut Extension) -> Result<T, Error>, F: FnOnce(&mut Extension) -> Result<T, Error>,
{ {
let sizes: (u64, u64, u64); let sizes: (u64, u64, u64);
let res: Result<T, Error>; let res: Result<T, Error>;
let rollback: bool; let rollback: bool;
@ -319,7 +322,11 @@ impl<'a> Extension<'a> {
/// and kernel sum trees. /// and kernel sum trees.
pub fn roots( pub fn roots(
&self, &self,
) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) { ) -> (
HashSum<SumCommit>,
HashSum<NoSum<RangeProof>>,
HashSum<NoSum<TxKernel>>,
) {
( (
self.output_pmmr.root(), self.output_pmmr.root(),
self.rproof_pmmr.root(), self.rproof_pmmr.root(),

View file

@ -19,7 +19,7 @@ use std::io;
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
use grin_store as store; use grin_store as store;
use core::core::{Block, BlockHeader, block, Output}; use core::core::{block, Block, BlockHeader, Output};
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::core::target::Difficulty; use core::core::target::Difficulty;
use core::ser; use core::ser;
@ -209,9 +209,10 @@ pub trait ChainStore: Send + Sync {
fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, store::Error>; fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, store::Error>;
/// Gets a block_header for the given input commit /// Gets a block_header for the given input commit
fn get_block_header_by_output_commit(&self, fn get_block_header_by_output_commit(
commit: &Commitment) &self,
-> Result<BlockHeader, store::Error>; commit: &Commitment,
) -> Result<BlockHeader, store::Error>;
/// Saves the position of an output, represented by its commitment, in the /// Saves the position of an output, represented by its commitment, in the
/// UTXO MMR. Used as an index for spending and pruning. /// UTXO MMR. Used as an index for spending and pruning.

View file

@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
extern crate grin_core as core;
extern crate grin_chain as chain;
extern crate grin_keychain as keychain;
extern crate env_logger; extern crate env_logger;
extern crate time; extern crate grin_chain as chain;
extern crate rand; extern crate grin_core as core;
extern crate grin_keychain as keychain;
extern crate grin_pow as pow; extern crate grin_pow as pow;
extern crate rand;
extern crate time;
use std::fs; use std::fs;
use std::sync::Arc; use std::sync::Arc;
@ -34,7 +34,7 @@ use core::global::MiningParameterMode;
use keychain::Keychain; use keychain::Keychain;
use pow::{types, cuckoo, MiningWorker}; use pow::{cuckoo, types, MiningWorker};
fn clean_output_dir(dir_name: &str) { fn clean_output_dir(dir_name: &str) {
let _ = fs::remove_dir_all(dir_name); let _ = fs::remove_dir_all(dir_name);

View file

@ -12,13 +12,13 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
extern crate grin_core as core; extern crate env_logger;
extern crate grin_chain as chain; extern crate grin_chain as chain;
extern crate grin_core as core;
extern crate grin_keychain as keychain; extern crate grin_keychain as keychain;
extern crate grin_pow as pow; extern crate grin_pow as pow;
extern crate env_logger;
extern crate time;
extern crate rand; extern crate rand;
extern crate time;
use std::fs; use std::fs;
use std::sync::Arc; use std::sync::Arc;
@ -32,7 +32,7 @@ use core::global::MiningParameterMode;
use keychain::Keychain; use keychain::Keychain;
use pow::{types, cuckoo, MiningWorker}; use pow::{cuckoo, types, MiningWorker};
fn clean_output_dir(dir_name: &str) { fn clean_output_dir(dir_name: &str) {
let _ = fs::remove_dir_all(dir_name); let _ = fs::remove_dir_all(dir_name);
@ -91,9 +91,11 @@ fn test_coinbase_maturity() {
).unwrap(); ).unwrap();
assert_eq!(block.outputs.len(), 1); assert_eq!(block.outputs.len(), 1);
assert!(block.outputs[0].features.contains( assert!(
transaction::COINBASE_OUTPUT, block.outputs[0]
)); .features
.contains(transaction::COINBASE_OUTPUT,)
);
chain.process_block(block, chain::EASY_POW).unwrap(); chain.process_block(block, chain::EASY_POW).unwrap();
@ -109,7 +111,8 @@ fn test_coinbase_maturity() {
&keychain, &keychain,
).unwrap(); ).unwrap();
let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], &keychain, &key_id3).unwrap(); let mut block =
core::core::Block::new(&prev, vec![&coinbase_txn], &keychain, &key_id3).unwrap();
block.header.timestamp = prev.timestamp + time::Duration::seconds(60); block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap(); let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
@ -156,7 +159,8 @@ fn test_coinbase_maturity() {
let prev = chain.head_header().unwrap(); let prev = chain.head_header().unwrap();
let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], &keychain, &key_id4).unwrap(); let mut block =
core::core::Block::new(&prev, vec![&coinbase_txn], &keychain, &key_id4).unwrap();
block.header.timestamp = prev.timestamp + time::Duration::seconds(60); block.header.timestamp = prev.timestamp + time::Duration::seconds(60);

View file

@ -23,7 +23,7 @@ use toml;
use grin::ServerConfig; use grin::ServerConfig;
use pow::types::MinerConfig; use pow::types::MinerConfig;
use util::LoggingConfig; use util::LoggingConfig;
use types::{ConfigMembers, GlobalConfig, ConfigError}; use types::{ConfigError, ConfigMembers, GlobalConfig};
/// The default file name to use when trying to derive /// The default file name to use when trying to derive
/// the config file location /// the config file location
@ -86,7 +86,6 @@ impl GlobalConfig {
// Give up // Give up
Err(ConfigError::FileNotFoundError(String::from(""))) Err(ConfigError::FileNotFoundError(String::from("")))
} }
/// Takes the path to a config file, or if NONE, tries /// Takes the path to a config file, or if NONE, tries
@ -98,7 +97,7 @@ impl GlobalConfig {
if let Some(fp) = file_path { if let Some(fp) = file_path {
return_value.config_file_path = Some(PathBuf::from(&fp)); return_value.config_file_path = Some(PathBuf::from(&fp));
} else { } else {
let _result=return_value.derive_config_location(); let _result = return_value.derive_config_location();
} }
// No attempt at a config file, just return defaults // No attempt at a config file, just return defaults

View file

@ -28,11 +28,11 @@ extern crate toml;
extern crate grin_grin as grin; extern crate grin_grin as grin;
extern crate grin_p2p as p2p; extern crate grin_p2p as p2p;
extern crate grin_wallet as wallet;
extern crate grin_pow as pow; extern crate grin_pow as pow;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet;
pub mod config; pub mod config;
pub mod types; pub mod types;
pub use types::{GlobalConfig, ConfigMembers, ConfigError}; pub use types::{ConfigError, ConfigMembers, GlobalConfig};

View file

@ -41,14 +41,12 @@ pub enum ConfigError {
impl fmt::Display for ConfigError { impl fmt::Display for ConfigError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
ConfigError::ParseError(ref file_name, ref message) => { ConfigError::ParseError(ref file_name, ref message) => write!(
write!(
f, f,
"Error parsing configuration file at {} - {}", "Error parsing configuration file at {} - {}",
file_name, file_name,
message message
) ),
}
ConfigError::FileIOError(ref file_name, ref message) => { ConfigError::FileIOError(ref file_name, ref message) => {
write!(f, "{} {}", message, file_name) write!(f, "{} {}", message, file_name)
} }

View file

@ -13,15 +13,15 @@
#![feature(test)] #![feature(test)]
extern crate test;
extern crate rand;
extern crate grin_core as core; extern crate grin_core as core;
extern crate rand;
extern crate test;
use rand::Rng; use rand::Rng;
use test::Bencher; use test::Bencher;
use core::core::sumtree::{self, SumTree, Summable}; use core::core::sumtree::{self, SumTree, Summable};
use core::ser::{Writeable, Writer, Error}; use core::ser::{Error, Writeable, Writer};
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
struct TestElem([u32; 4]); struct TestElem([u32; 4]);
@ -30,8 +30,8 @@ impl Summable for TestElem {
fn sum(&self) -> u64 { fn sum(&self) -> u64 {
// sums are not allowed to overflow, so we use this simple // sums are not allowed to overflow, so we use this simple
// non-injective "sum" function that will still be homomorphic // non-injective "sum" function that will still be homomorphic
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 + self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
self.0[3] as u64 + self.0[3] as u64
} }
} }

View file

@ -28,7 +28,7 @@ use core::target::Difficulty;
pub const GRIN_BASE: u64 = 1_000_000_000; pub const GRIN_BASE: u64 = 1_000_000_000;
/// The block subsidy amount /// The block subsidy amount
pub const REWARD: u64 = 50*GRIN_BASE; pub const REWARD: u64 = 50 * GRIN_BASE;
/// Actual block reward for a given total fee amount /// Actual block reward for a given total fee amount
pub fn reward(fee: u64) -> u64 { pub fn reward(fee: u64) -> u64 {
@ -80,8 +80,8 @@ pub const MAX_BLOCK_WEIGHT: usize = 80_000;
/// Whether a block exceeds the maximum acceptable weight /// Whether a block exceeds the maximum acceptable weight
pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool { pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool {
input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT + input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT
kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT + kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT
} }
/// Fork every 250,000 blocks for first 2 years, simple number and just a /// Fork every 250,000 blocks for first 2 years, simple number and just a
@ -150,7 +150,6 @@ pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
where where
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>, T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
{ {
// Block times at the begining and end of the adjustment window, used to // Block times at the begining and end of the adjustment window, used to
// calculate medians later. // calculate medians later.
let mut window_begin = vec![]; let mut window_begin = vec![];
@ -204,9 +203,7 @@ where
ts_damp ts_damp
}; };
Ok( Ok(diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / Difficulty::from_num(adj_ts))
diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / Difficulty::from_num(adj_ts),
)
} }
/// Consensus rule that collections of items are sorted lexicographically over the wire. /// Consensus rule that collections of items are sorted lexicographically over the wire.

View file

@ -20,12 +20,12 @@ use util::secp::{self, Secp256k1};
use std::collections::HashSet; use std::collections::HashSet;
use core::Committed; use core::Committed;
use core::{Input, Output, SwitchCommitHash, Proof, TxKernel, Transaction, COINBASE_KERNEL, use core::{Input, Output, Proof, SwitchCommitHash, Transaction, TxKernel, COINBASE_KERNEL,
COINBASE_OUTPUT}; COINBASE_OUTPUT};
use consensus::{MINIMUM_DIFFICULTY, REWARD, reward, exceeds_weight}; use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD};
use core::hash::{Hash, Hashed, ZERO_HASH}; use core::hash::{Hash, Hashed, ZERO_HASH};
use core::target::Difficulty; use core::target::Difficulty;
use ser::{self, Readable, Reader, Writeable, Writer, WriteableSorted, read_and_verify_sorted}; use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
use util::LOGGER; use util::LOGGER;
use global; use global;
use keychain; use keychain;
@ -282,7 +282,6 @@ impl Block {
reward_out: Output, reward_out: Output,
reward_kern: TxKernel, reward_kern: TxKernel,
) -> Result<Block, secp::Error> { ) -> Result<Block, secp::Error> {
// note: the following reads easily but may not be the most efficient due to // note: the following reads easily but may not be the most efficient due to
// repeated iterations, revisit if a problem // repeated iterations, revisit if a problem
let secp = Secp256k1::with_caps(secp::ContextFlag::Commit); let secp = Secp256k1::with_caps(secp::ContextFlag::Commit);
@ -317,8 +316,8 @@ impl Block {
..time::now_utc() ..time::now_utc()
}, },
previous: prev.hash(), previous: prev.hash(),
total_difficulty: prev.pow.clone().to_difficulty() + total_difficulty: prev.pow.clone().to_difficulty()
prev.total_difficulty.clone(), + prev.total_difficulty.clone(),
..Default::default() ..Default::default()
}, },
inputs: inputs, inputs: inputs,
@ -439,7 +438,9 @@ impl Block {
} }
if k.lock_height > self.header.height { if k.lock_height > self.header.height {
return Err(Error::KernelLockHeight { lock_height: k.lock_height }); return Err(Error::KernelLockHeight {
lock_height: k.lock_height,
});
} }
} }
@ -470,13 +471,12 @@ impl Block {
// * That the sum of blinding factors for all coinbase-marked outputs match // * That the sum of blinding factors for all coinbase-marked outputs match
// the coinbase-marked kernels. // the coinbase-marked kernels.
fn verify_coinbase(&self, secp: &Secp256k1) -> Result<(), Error> { fn verify_coinbase(&self, secp: &Secp256k1) -> Result<(), Error> {
let cb_outs = filter_map_vec!(self.outputs, |out| if out.features.contains( let cb_outs = filter_map_vec!(self.outputs, |out| {
COINBASE_OUTPUT, if out.features.contains(COINBASE_OUTPUT) {
)
{
Some(out.commitment()) Some(out.commitment())
} else { } else {
None None
}
}); });
let cb_kerns = filter_map_vec!(self.kernels, |k| if k.features.contains(COINBASE_KERNEL) { let cb_kerns = filter_map_vec!(self.kernels, |k| if k.features.contains(COINBASE_KERNEL) {
Some(k.excess) Some(k.excess)

View file

@ -27,11 +27,11 @@
use util::secp; use util::secp;
use core::{Transaction, Input, Output, SwitchCommitHash, DEFAULT_OUTPUT}; use core::{Input, Output, SwitchCommitHash, Transaction, DEFAULT_OUTPUT};
use core::transaction::kernel_sig_msg; use core::transaction::kernel_sig_msg;
use util::LOGGER; use util::LOGGER;
use keychain; use keychain;
use keychain::{Keychain, BlindSum, BlindingFactor, Identifier}; use keychain::{BlindSum, BlindingFactor, Identifier, Keychain};
/// Context information available to transaction combinators. /// Context information available to transaction combinators.
pub struct Context<'a> { pub struct Context<'a> {
@ -40,7 +40,8 @@ pub struct Context<'a> {
/// Function type returned by the transaction combinators. Transforms a /// Function type returned by the transaction combinators. Transforms a
/// (Transaction, BlindSum) pair into another, provided some context. /// (Transaction, BlindSum) pair into another, provided some context.
pub type Append = for<'a> Fn(&'a mut Context, (Transaction, BlindSum)) -> (Transaction, BlindSum); pub type Append = for<'a> Fn(&'a mut Context, (Transaction, BlindSum))
-> (Transaction, BlindSum);
/// Adds an input with the provided value and blinding key to the transaction /// Adds an input with the provided value and blinding key to the transaction
/// being built. /// being built.
@ -132,10 +133,11 @@ pub fn transaction(
keychain: &keychain::Keychain, keychain: &keychain::Keychain,
) -> Result<(Transaction, BlindingFactor), keychain::Error> { ) -> Result<(Transaction, BlindingFactor), keychain::Error> {
let mut ctx = Context { keychain }; let mut ctx = Context { keychain };
let (mut tx, sum) = elems.iter().fold( let (mut tx, sum) = elems
(Transaction::empty(), BlindSum::new()), .iter()
|acc, elem| elem(&mut ctx, acc), .fold((Transaction::empty(), BlindSum::new()), |acc, elem| {
); elem(&mut ctx, acc)
});
let blind_sum = ctx.keychain.blind_sum(&sum)?; let blind_sum = ctx.keychain.blind_sum(&sum)?;
let msg = secp::Message::from_slice(&kernel_sig_msg(tx.fee, tx.lock_height))?; let msg = secp::Message::from_slice(&kernel_sig_msg(tx.fee, tx.lock_height))?;
let sig = ctx.keychain.sign_with_blinding(&msg, &blind_sum)?; let sig = ctx.keychain.sign_with_blinding(&msg, &blind_sum)?;

View file

@ -24,7 +24,7 @@ use std::convert::AsRef;
use blake2::blake2b::Blake2b; use blake2::blake2b::Blake2b;
use consensus::VerifySortOrder; use consensus::VerifySortOrder;
use ser::{self, Reader, Readable, Writer, Writeable, Error, AsFixedBytes}; use ser::{self, AsFixedBytes, Error, Readable, Reader, Writeable, Writer};
use util::LOGGER; use util::LOGGER;
/// A hash consisting of all zeroes, used as a sentinel. No known preimage. /// A hash consisting of all zeroes, used as a sentinel. No known preimage.
@ -153,7 +153,9 @@ impl HashWriter {
impl Default for HashWriter { impl Default for HashWriter {
fn default() -> HashWriter { fn default() -> HashWriter {
HashWriter { state: Blake2b::new(32) } HashWriter {
state: Blake2b::new(32),
}
} }
} }
@ -173,7 +175,7 @@ pub trait Hashed {
/// Obtain the hash of the object /// Obtain the hash of the object
fn hash(&self) -> Hash; fn hash(&self) -> Hash;
/// Hash the object together with another writeable object /// Hash the object together with another writeable object
fn hash_with<T: Writeable>(&self, other:T) -> Hash; fn hash_with<T: Writeable>(&self, other: T) -> Hash;
} }
impl<W: ser::Writeable> Hashed for W { impl<W: ser::Writeable> Hashed for W {
@ -185,7 +187,7 @@ impl<W: ser::Writeable> Hashed for W {
Hash(ret) Hash(ret)
} }
fn hash_with<T: Writeable>(&self, other:T) -> Hash{ fn hash_with<T: Writeable>(&self, other: T) -> Hash {
let mut hasher = HashWriter::default(); let mut hasher = HashWriter::default();
ser::Writeable::write(self, &mut hasher).unwrap(); ser::Writeable::write(self, &mut hasher).unwrap();
trace!(LOGGER, "Hashing with additional data"); trace!(LOGGER, "Hashing with additional data");
@ -202,7 +204,8 @@ impl<T: Writeable> VerifySortOrder<T> for Vec<T> {
.map(|item| item.hash()) .map(|item| item.hash())
.collect::<Vec<_>>() .collect::<Vec<_>>()
.windows(2) .windows(2)
.any(|pair| pair[0] > pair[1]) { .any(|pair| pair[0] > pair[1])
{
true => Err(ser::Error::BadlySorted), true => Err(ser::Error::BadlySorted),
false => Ok(()), false => Ok(()),
} }

View file

@ -31,8 +31,8 @@ use util::secp::pedersen::*;
pub use self::block::*; pub use self::block::*;
pub use self::transaction::*; pub use self::transaction::*;
use self::hash::{Hashed}; use self::hash::Hashed;
use ser::{Writeable, Writer, Reader, Readable, Error}; use ser::{Error, Readable, Reader, Writeable, Writer};
use global; use global;
// use keychain; // use keychain;
@ -186,11 +186,11 @@ impl Writeable for Proof {
mod test { mod test {
use super::*; use super::*;
use core::hash::ZERO_HASH; use core::hash::ZERO_HASH;
use core::build::{input, output, with_fee, initial_tx, with_excess, with_lock_height}; use core::build::{initial_tx, input, output, with_excess, with_fee, with_lock_height};
use core::block::Error::KernelLockHeight; use core::block::Error::KernelLockHeight;
use ser; use ser;
use keychain; use keychain;
use keychain::{Keychain, BlindingFactor}; use keychain::{BlindingFactor, Keychain};
#[test] #[test]
#[should_panic(expected = "InvalidSecretKey")] #[should_panic(expected = "InvalidSecretKey")]
@ -421,7 +421,9 @@ mod test {
&key_id3.clone(), &key_id3.clone(),
).unwrap(); ).unwrap();
match b.validate(keychain.secp()) { match b.validate(keychain.secp()) {
Err(KernelLockHeight { lock_height: height }) => { Err(KernelLockHeight {
lock_height: height,
}) => {
assert_eq!(height, 2); assert_eq!(height, 2);
} }
_ => panic!("expecting KernelLockHeight error here"), _ => panic!("expecting KernelLockHeight error here"),

View file

@ -119,7 +119,7 @@ where
T: Summable + Hashed, T: Summable + Hashed,
{ {
/// Create a hash sum from a summable /// Create a hash sum from a summable
pub fn from_summable<W: Writeable>(idx: u64, elmt: &T, hash_with:Option<W>) -> HashSum<T> { pub fn from_summable<W: Writeable>(idx: u64, elmt: &T, hash_with: Option<W>) -> HashSum<T> {
let hash = match hash_with { let hash = match hash_with {
Some(h) => elmt.hash_with(h), Some(h) => elmt.hash_with(h),
None => elmt.hash(), None => elmt.hash(),
@ -259,7 +259,7 @@ where
/// Push a new Summable element in the MMR. Computes new related peaks at /// Push a new Summable element in the MMR. Computes new related peaks at
/// the same time if applicable. /// the same time if applicable.
pub fn push<W: Writeable>(&mut self, elmt: T, hash_with:Option<W>) -> Result<u64, String> { pub fn push<W: Writeable>(&mut self, elmt: T, hash_with: Option<W>) -> Result<u64, String> {
let elmt_pos = self.last_pos + 1; let elmt_pos = self.last_pos + 1;
let mut current_hashsum = HashSum::from_summable(elmt_pos, &elmt, hash_with); let mut current_hashsum = HashSum::from_summable(elmt_pos, &elmt, hash_with);
let mut to_append = vec![current_hashsum.clone()]; let mut to_append = vec![current_hashsum.clone()];
@ -272,9 +272,9 @@ where
// creation of another parent. // creation of another parent.
while bintree_postorder_height(pos + 1) > height { while bintree_postorder_height(pos + 1) > height {
let left_sibling = bintree_jump_left_sibling(pos); let left_sibling = bintree_jump_left_sibling(pos);
let left_hashsum = self.backend.get(left_sibling).expect( let left_hashsum = self.backend
"missing left sibling in tree, should not have been pruned", .get(left_sibling)
); .expect("missing left sibling in tree, should not have been pruned");
current_hashsum = left_hashsum + current_hashsum; current_hashsum = left_hashsum + current_hashsum;
to_append.push(current_hashsum.clone()); to_append.push(current_hashsum.clone());
@ -353,22 +353,22 @@ where
/// Helper function to get the last N nodes inserted, i.e. the last /// Helper function to get the last N nodes inserted, i.e. the last
/// n nodes along the bottom of the tree /// n nodes along the bottom of the tree
pub fn get_last_n_insertions(&self, n: u64) -> Vec<HashSum<T>> { pub fn get_last_n_insertions(&self, n: u64) -> Vec<HashSum<T>> {
let mut return_vec=Vec::new(); let mut return_vec = Vec::new();
let mut last_leaf = self.last_pos; let mut last_leaf = self.last_pos;
let size=self.unpruned_size(); let size = self.unpruned_size();
//Special case that causes issues in bintree functions, // Special case that causes issues in bintree functions,
//just return // just return
if size==1 { if size == 1 {
return_vec.push(self.backend.get(last_leaf).unwrap()); return_vec.push(self.backend.get(last_leaf).unwrap());
return return_vec; return return_vec;
} }
//if size is even, we're already at the bottom, otherwise // if size is even, we're already at the bottom, otherwise
//we need to traverse down to it (reverse post-order direction) // we need to traverse down to it (reverse post-order direction)
if size % 2 == 1 { if size % 2 == 1 {
last_leaf=bintree_rightmost(self.last_pos); last_leaf = bintree_rightmost(self.last_pos);
} }
for _ in 0..n as u64 { for _ in 0..n as u64 {
if last_leaf==0 { if last_leaf == 0 {
break; break;
} }
if bintree_postorder_height(last_leaf) > 0 { if bintree_postorder_height(last_leaf) > 0 {
@ -376,7 +376,7 @@ where
} }
return_vec.push(self.backend.get(last_leaf).unwrap()); return_vec.push(self.backend.get(last_leaf).unwrap());
last_leaf=bintree_jump_left_sibling(last_leaf); last_leaf = bintree_jump_left_sibling(last_leaf);
} }
return_vec return_vec
} }
@ -394,16 +394,16 @@ where
if sz > 600 { if sz > 600 {
return; return;
} }
let start = if short && sz > 7 { sz/8 - 1 } else { 0 }; let start = if short && sz > 7 { sz / 8 - 1 } else { 0 };
for n in start..(sz/8+1) { for n in start..(sz / 8 + 1) {
let mut idx = "".to_owned(); let mut idx = "".to_owned();
let mut hashes = "".to_owned(); let mut hashes = "".to_owned();
for m in (n*8)..(n+1)*8 { for m in (n * 8)..(n + 1) * 8 {
if m >= sz { if m >= sz {
break; break;
} }
idx.push_str(&format!("{:>8} ", m + 1)); idx.push_str(&format!("{:>8} ", m + 1));
let ohs = self.get(m+1); let ohs = self.get(m + 1);
match ohs { match ohs {
Some(hs) => hashes.push_str(&format!("{} ", hs.hash)), Some(hs) => hashes.push_str(&format!("{} ", hs.hash)),
None => hashes.push_str(&format!("{:>8} ", "??")), None => hashes.push_str(&format!("{:>8} ", "??")),
@ -503,7 +503,9 @@ pub struct PruneList {
impl PruneList { impl PruneList {
/// Instantiate a new empty prune list /// Instantiate a new empty prune list
pub fn new() -> PruneList { pub fn new() -> PruneList {
PruneList { pruned_nodes: vec![] } PruneList {
pruned_nodes: vec![],
}
} }
/// Computes by how many positions a node at pos should be shifted given the /// Computes by how many positions a node at pos should be shifted given the
@ -583,7 +585,6 @@ impl PruneList {
/// side of the range, and navigates toward lower siblings toward the right /// side of the range, and navigates toward lower siblings toward the right
/// of the range. /// of the range.
fn peaks(num: u64) -> Vec<u64> { fn peaks(num: u64) -> Vec<u64> {
// detecting an invalid mountain range, when siblings exist but no parent // detecting an invalid mountain range, when siblings exist but no parent
// exists // exists
if bintree_postorder_height(num + 1) > bintree_postorder_height(num) { if bintree_postorder_height(num + 1) > bintree_postorder_height(num) {
@ -845,8 +846,8 @@ mod test {
fn sum(&self) -> u64 { fn sum(&self) -> u64 {
// sums are not allowed to overflow, so we use this simple // sums are not allowed to overflow, so we use this simple
// non-injective "sum" function that will still be homomorphic // non-injective "sum" function that will still be homomorphic
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 + self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
self.0[3] as u64 + self.0[3] as u64
} }
fn sum_len() -> usize { fn sum_len() -> usize {
8 8
@ -896,7 +897,8 @@ mod test {
// two elements // two elements
pmmr.push(elems[1], None::<TestElem>).unwrap(); pmmr.push(elems[1], None::<TestElem>).unwrap();
let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>) + HashSum::from_summable(2, &elems[1], None::<TestElem>); let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>)
+ HashSum::from_summable(2, &elems[1], None::<TestElem>);
assert_eq!(pmmr.root(), sum2); assert_eq!(pmmr.root(), sum2);
assert_eq!(pmmr.unpruned_size(), 3); assert_eq!(pmmr.unpruned_size(), 3);
@ -908,8 +910,9 @@ mod test {
// four elements // four elements
pmmr.push(elems[3], None::<TestElem>).unwrap(); pmmr.push(elems[3], None::<TestElem>).unwrap();
let sum4 = sum2 + let sum4 = sum2
(HashSum::from_summable(4, &elems[2], None::<TestElem>) + HashSum::from_summable(5, &elems[3], None::<TestElem>)); + (HashSum::from_summable(4, &elems[2], None::<TestElem>)
+ HashSum::from_summable(5, &elems[3], None::<TestElem>));
assert_eq!(pmmr.root(), sum4); assert_eq!(pmmr.root(), sum4);
assert_eq!(pmmr.unpruned_size(), 7); assert_eq!(pmmr.unpruned_size(), 7);
@ -921,8 +924,9 @@ mod test {
// six elements // six elements
pmmr.push(elems[5], None::<TestElem>).unwrap(); pmmr.push(elems[5], None::<TestElem>).unwrap();
let sum6 = sum4.clone() + let sum6 = sum4.clone()
(HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>)); + (HashSum::from_summable(8, &elems[4], None::<TestElem>)
+ HashSum::from_summable(9, &elems[5], None::<TestElem>));
assert_eq!(pmmr.root(), sum6.clone()); assert_eq!(pmmr.root(), sum6.clone());
assert_eq!(pmmr.unpruned_size(), 10); assert_eq!(pmmr.unpruned_size(), 10);
@ -934,9 +938,11 @@ mod test {
// eight elements // eight elements
pmmr.push(elems[7], None::<TestElem>).unwrap(); pmmr.push(elems[7], None::<TestElem>).unwrap();
let sum8 = sum4 + let sum8 = sum4
((HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>)) + + ((HashSum::from_summable(8, &elems[4], None::<TestElem>)
(HashSum::from_summable(11, &elems[6], None::<TestElem>) + HashSum::from_summable(12, &elems[7], None::<TestElem>))); + HashSum::from_summable(9, &elems[5], None::<TestElem>))
+ (HashSum::from_summable(11, &elems[6], None::<TestElem>)
+ HashSum::from_summable(12, &elems[7], None::<TestElem>)));
assert_eq!(pmmr.root(), sum8); assert_eq!(pmmr.root(), sum8);
assert_eq!(pmmr.unpruned_size(), 15); assert_eq!(pmmr.unpruned_size(), 15);
@ -949,7 +955,6 @@ mod test {
#[test] #[test]
fn pmmr_get_last_n_insertions() { fn pmmr_get_last_n_insertions() {
let elems = [ let elems = [
TestElem([0, 0, 0, 1]), TestElem([0, 0, 0, 1]),
TestElem([0, 0, 0, 2]), TestElem([0, 0, 0, 2]),
@ -964,28 +969,31 @@ mod test {
let mut ba = VecBackend::new(); let mut ba = VecBackend::new();
let mut pmmr = PMMR::new(&mut ba); let mut pmmr = PMMR::new(&mut ba);
//test when empty // test when empty
let res=pmmr.get_last_n_insertions(19); let res = pmmr.get_last_n_insertions(19);
assert!(res.len()==0); assert!(res.len() == 0);
pmmr.push(elems[0], None::<TestElem>).unwrap(); pmmr.push(elems[0], None::<TestElem>).unwrap();
let res=pmmr.get_last_n_insertions(19); let res = pmmr.get_last_n_insertions(19);
assert!(res.len()==1 && res[0].sum==1); assert!(res.len() == 1 && res[0].sum == 1);
pmmr.push(elems[1], None::<TestElem>).unwrap(); pmmr.push(elems[1], None::<TestElem>).unwrap();
let res = pmmr.get_last_n_insertions(12); let res = pmmr.get_last_n_insertions(12);
assert!(res[0].sum==2 && res[1].sum==1); assert!(res[0].sum == 2 && res[1].sum == 1);
pmmr.push(elems[2], None::<TestElem>).unwrap(); pmmr.push(elems[2], None::<TestElem>).unwrap();
let res = pmmr.get_last_n_insertions(2); let res = pmmr.get_last_n_insertions(2);
assert!(res[0].sum==3 && res[1].sum==2); assert!(res[0].sum == 3 && res[1].sum == 2);
pmmr.push(elems[3], None::<TestElem>).unwrap(); pmmr.push(elems[3], None::<TestElem>).unwrap();
let res = pmmr.get_last_n_insertions(19); let res = pmmr.get_last_n_insertions(19);
assert!(res[0].sum==4 && res[1].sum==3 && res[2].sum==2 && res[3].sum==1 && res.len()==4); assert!(
res[0].sum == 4 && res[1].sum == 3 && res[2].sum == 2 && res[3].sum == 1
&& res.len() == 4
);
pmmr.push(elems[5], None::<TestElem>).unwrap(); pmmr.push(elems[5], None::<TestElem>).unwrap();
pmmr.push(elems[6], None::<TestElem>).unwrap(); pmmr.push(elems[6], None::<TestElem>).unwrap();
@ -993,8 +1001,10 @@ mod test {
pmmr.push(elems[8], None::<TestElem>).unwrap(); pmmr.push(elems[8], None::<TestElem>).unwrap();
let res = pmmr.get_last_n_insertions(7); let res = pmmr.get_last_n_insertions(7);
assert!(res[0].sum==9 && res[1].sum==8 && res[2].sum==7 && res[3].sum==6 && res.len()==7); assert!(
res[0].sum == 9 && res[1].sum == 8 && res[2].sum == 7 && res[3].sum == 6
&& res.len() == 7
);
} }
#[test] #[test]

View file

@ -20,13 +20,13 @@
//! wrapper in case the internal representation needs to change again //! wrapper in case the internal representation needs to change again
use std::fmt; use std::fmt;
use std::ops::{Add, Mul, Div, Sub}; use std::ops::{Add, Div, Mul, Sub};
use serde::{Serialize, Serializer, Deserialize, Deserializer, de}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
use byteorder::{ByteOrder, BigEndian}; use byteorder::{BigEndian, ByteOrder};
use core::hash::Hash; use core::hash::Hash;
use ser::{self, Reader, Writer, Writeable, Readable}; use ser::{self, Readable, Reader, Writeable, Writer};
/// The target is the 32-bytes hash block hashes must be lower than. /// The target is the 32-bytes hash block hashes must be lower than.
pub const MAX_TARGET: [u8; 8] = [0xf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]; pub const MAX_TARGET: [u8; 8] = [0xf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
@ -63,7 +63,9 @@ impl Difficulty {
let mut in_vec = h.to_vec(); let mut in_vec = h.to_vec();
in_vec.truncate(8); in_vec.truncate(8);
let num = BigEndian::read_u64(&in_vec); let num = BigEndian::read_u64(&in_vec);
Difficulty { num: max_target / num } Difficulty {
num: max_target / num,
}
} }
/// Converts the difficulty into a u64 /// Converts the difficulty into a u64
@ -81,28 +83,36 @@ impl fmt::Display for Difficulty {
impl Add<Difficulty> for Difficulty { impl Add<Difficulty> for Difficulty {
type Output = Difficulty; type Output = Difficulty;
fn add(self, other: Difficulty) -> Difficulty { fn add(self, other: Difficulty) -> Difficulty {
Difficulty { num: self.num + other.num } Difficulty {
num: self.num + other.num,
}
} }
} }
impl Sub<Difficulty> for Difficulty { impl Sub<Difficulty> for Difficulty {
type Output = Difficulty; type Output = Difficulty;
fn sub(self, other: Difficulty) -> Difficulty { fn sub(self, other: Difficulty) -> Difficulty {
Difficulty { num: self.num - other.num } Difficulty {
num: self.num - other.num,
}
} }
} }
impl Mul<Difficulty> for Difficulty { impl Mul<Difficulty> for Difficulty {
type Output = Difficulty; type Output = Difficulty;
fn mul(self, other: Difficulty) -> Difficulty { fn mul(self, other: Difficulty) -> Difficulty {
Difficulty { num: self.num * other.num } Difficulty {
num: self.num * other.num,
}
} }
} }
impl Div<Difficulty> for Difficulty { impl Div<Difficulty> for Difficulty {
type Output = Difficulty; type Output = Difficulty;
fn div(self, other: Difficulty) -> Difficulty { fn div(self, other: Difficulty) -> Difficulty {
Difficulty { num: self.num / other.num } Difficulty {
num: self.num / other.num,
}
} }
} }
@ -157,6 +167,8 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
&"a value number", &"a value number",
)); ));
}; };
Ok(Difficulty { num: num_in.unwrap() }) Ok(Difficulty {
num: num_in.unwrap(),
})
} }
} }

View file

@ -14,7 +14,7 @@
//! Transactions //! Transactions
use byteorder::{ByteOrder, BigEndian}; use byteorder::{BigEndian, ByteOrder};
use blake2::blake2b::blake2b; use blake2::blake2b::blake2b;
use util::secp::{self, Secp256k1, Message, Signature}; use util::secp::{self, Secp256k1, Message, Signature};
use util::secp::pedersen::{RangeProof, Commitment}; use util::secp::pedersen::{RangeProof, Commitment};
@ -23,7 +23,7 @@ use std::ops;
use core::Committed; use core::Committed;
use core::pmmr::Summable; use core::pmmr::Summable;
use keychain::{Identifier, Keychain}; use keychain::{Identifier, Keychain};
use ser::{self, Reader, Writer, Readable, Writeable, WriteableSorted, read_and_verify_sorted}; use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
use util::LOGGER; use util::LOGGER;
/// The size to use for the stored blake2 hash of a switch_commitment /// The size to use for the stored blake2 hash of a switch_commitment
@ -102,9 +102,8 @@ impl Writeable for TxKernel {
impl Readable for TxKernel { impl Readable for TxKernel {
fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> { fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> {
let features = KernelFeatures::from_bits(reader.read_u8()?).ok_or( let features =
ser::Error::CorruptedData, KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
)?;
Ok(TxKernel { Ok(TxKernel {
features: features, features: features,
@ -456,9 +455,8 @@ impl Writeable for Output {
/// an Output from a binary stream. /// an Output from a binary stream.
impl Readable for Output { impl Readable for Output {
fn read(reader: &mut Reader) -> Result<Output, ser::Error> { fn read(reader: &mut Reader) -> Result<Output, ser::Error> {
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or( let features =
ser::Error::CorruptedData, OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
)?;
Ok(Output { Ok(Output {
features: features, features: features,
@ -494,13 +492,11 @@ impl Output {
/// value from the range proof and the commitment /// value from the range proof and the commitment
pub fn recover_value(&self, keychain: &Keychain, key_id: &Identifier) -> Option<u64> { pub fn recover_value(&self, keychain: &Keychain, key_id: &Identifier) -> Option<u64> {
match keychain.rewind_range_proof(key_id, self.commit, self.proof) { match keychain.rewind_range_proof(key_id, self.commit, self.proof) {
Ok(proof_info) => { Ok(proof_info) => if proof_info.success {
if proof_info.success {
Some(proof_info.value) Some(proof_info.value)
} else { } else {
None None
} },
}
Err(_) => None, Err(_) => None,
} }
} }
@ -554,10 +550,9 @@ impl ops::Add for SumCommit {
type Output = SumCommit; type Output = SumCommit;
fn add(self, other: SumCommit) -> SumCommit { fn add(self, other: SumCommit) -> SumCommit {
let sum = match self.secp.commit_sum( let sum = match self.secp
vec![self.commit.clone(), other.commit.clone()], .commit_sum(vec![self.commit.clone(), other.commit.clone()], vec![])
vec![], {
) {
Ok(s) => s, Ok(s) => s,
Err(_) => Commitment::from_vec(vec![1; 33]), Err(_) => Commitment::from_vec(vec![1; 33]),
}; };

View file

@ -25,8 +25,10 @@
extern crate bitflags; extern crate bitflags;
extern crate blake2_rfc as blake2; extern crate blake2_rfc as blake2;
extern crate byteorder; extern crate byteorder;
extern crate grin_keychain as keychain;
extern crate grin_util as util;
#[macro_use] #[macro_use]
extern crate slog; extern crate lazy_static;
extern crate num_bigint as bigint; extern crate num_bigint as bigint;
extern crate rand; extern crate rand;
extern crate grin_keychain as keychain; extern crate grin_keychain as keychain;
@ -34,9 +36,9 @@ extern crate grin_util as util;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate time;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate slog;
extern crate time;
#[macro_use] #[macro_use]
pub mod macros; pub mod macros;

View file

@ -19,9 +19,9 @@
//! To use it simply implement `Writeable` or `Readable` and then use the //! To use it simply implement `Writeable` or `Readable` and then use the
//! `serialize` or `deserialize` functions on them as appropriate. //! `serialize` or `deserialize` functions on them as appropriate.
use std::{error, fmt, cmp}; use std::{cmp, error, fmt};
use std::io::{self, Write, Read}; use std::io::{self, Read, Write};
use byteorder::{ByteOrder, ReadBytesExt, BigEndian}; use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
use keychain::{Identifier, IDENTIFIER_SIZE}; use keychain::{Identifier, IDENTIFIER_SIZE};
use core::hash::Hashed; use core::hash::Hashed;
use consensus::VerifySortOrder; use consensus::VerifySortOrder;
@ -199,7 +199,8 @@ pub trait WriteableSorted {
/// A consensus rule requires everything is sorted lexicographically to avoid /// A consensus rule requires everything is sorted lexicographically to avoid
/// leaking any information through specific ordering of items. /// leaking any information through specific ordering of items.
pub fn read_and_verify_sorted<T>(reader: &mut Reader, count: u64) -> Result<Vec<T>, Error> pub fn read_and_verify_sorted<T>(reader: &mut Reader, count: u64) -> Result<Vec<T>, Error>
where T: Readable + Hashed + Writeable where
T: Readable + Hashed + Writeable,
{ {
let result: Vec<T> = try!((0..count).map(|_| T::read(reader)).collect()); let result: Vec<T> = try!((0..count).map(|_| T::read(reader)).collect());
result.verify_sort_order()?; result.verify_sort_order()?;
@ -276,9 +277,10 @@ impl<'a> Reader for BinReader<'a> {
return Err(Error::TooLargeReadErr); return Err(Error::TooLargeReadErr);
} }
let mut buf = vec![0; length]; let mut buf = vec![0; length];
self.source.read_exact(&mut buf).map(move |_| buf).map_err( self.source
Error::IOErr, .read_exact(&mut buf)
) .map(move |_| buf)
.map_err(Error::IOErr)
} }
fn expect_u8(&mut self, val: u8) -> Result<u8, Error> { fn expect_u8(&mut self, val: u8) -> Result<u8, Error> {
@ -532,7 +534,8 @@ impl AsFixedBytes for [u8; 20] {
fn len(&self) -> usize { fn len(&self) -> usize {
return 20; return 20;
} }
}impl AsFixedBytes for [u8; 32] { }
impl AsFixedBytes for [u8; 32] {
fn len(&self) -> usize { fn len(&self) -> usize {
return 32; return 32;
} }

View file

@ -105,7 +105,7 @@ Before running your mining server, a wallet server needs to be set up and listen
See [wallet](wallet.md) for more info on the various Grin wallet commands and options. See [wallet](wallet.md) for more info on the various Grin wallet commands and options.
This will create a wallet server listening on the default port 13416 with the password "password". Next, in another terminal window in the 'node1' directory, run a full mining node with the following command: This will create a wallet server listening on the default port 13415 with the password "password". Next, in another terminal window in the 'node1' directory, run a full mining node with the following command:
node1$ grin server -m run node1$ grin server -m run

View file

@ -111,11 +111,11 @@ attempt_time_per_block = 90
#the wallet reciever to which coinbase rewards will be sent #the wallet reciever to which coinbase rewards will be sent
wallet_receiver_url = "http://127.0.0.1:13416" wallet_receiver_url = "http://127.0.0.1:13415"
#whether to ignore the reward (mostly for testing) #whether to ignore the reward (mostly for testing)
burn_reward = false burn_reward = true
#testing value, optional #testing value, optional
#slow_down_in_millis = 30 #slow_down_in_millis = 30

View file

@ -27,6 +27,7 @@ serde_json = "~1.0.2"
tokio-core="~0.1.1" tokio-core="~0.1.1"
tokio-timer="~0.1.0" tokio-timer="~0.1.0"
rand = "^0.3" rand = "^0.3"
router = "~0.5.1"
itertools = "~0.6.0" itertools = "~0.6.0"
[dev_dependencies] [dev_dependencies]

View file

@ -21,7 +21,7 @@ use core::core::{self, Output};
use core::core::block::BlockHeader; use core::core::block::BlockHeader;
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use core::core::target::Difficulty; use core::core::target::Difficulty;
use p2p::{self, NetAdapter, Server, PeerStore, PeerData, State}; use p2p::{self, NetAdapter, PeerData, PeerStore, Server, State};
use pool; use pool;
use util::secp::pedersen::Commitment; use util::secp::pedersen::Commitment;
use util::OneTime; use util::OneTime;
@ -180,11 +180,8 @@ impl NetAdapter for NetToChainAdapter {
/// Find good peers we know with the provided capability and return their /// Find good peers we know with the provided capability and return their
/// addresses. /// addresses.
fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec<SocketAddr> { fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec<SocketAddr> {
let peers = self.peer_store.find_peers( let peers = self.peer_store
State::Healthy, .find_peers(State::Healthy, capab, p2p::MAX_PEER_ADDRS as usize);
capab,
p2p::MAX_PEER_ADDRS as usize,
);
debug!(LOGGER, "Got {} peer addrs to send.", peers.len()); debug!(LOGGER, "Got {} peer addrs to send.", peers.len());
map_vec!(peers, |p| p.addr) map_vec!(peers, |p| p.addr)
} }
@ -244,11 +241,11 @@ impl NetToChainAdapter {
pub fn start_sync(&self, sync: sync::Syncer) { pub fn start_sync(&self, sync: sync::Syncer) {
let arc_sync = Arc::new(sync); let arc_sync = Arc::new(sync);
self.syncer.init(arc_sync.clone()); self.syncer.init(arc_sync.clone());
let _ = thread::Builder::new().name("syncer".to_string()).spawn( let _ = thread::Builder::new()
move || { .name("syncer".to_string())
.spawn(move || {
let _ = arc_sync.run(); let _ = arc_sync.run();
}, });
);
} }
pub fn syncing(&self) -> bool { pub fn syncing(&self) -> bool {
@ -325,7 +322,9 @@ impl pool::PoolAdapter for PoolToNetAdapter {
impl PoolToNetAdapter { impl PoolToNetAdapter {
/// Create a new pool to net adapter /// Create a new pool to net adapter
pub fn new() -> PoolToNetAdapter { pub fn new() -> PoolToNetAdapter {
PoolToNetAdapter { p2p: OneTime::new() } PoolToNetAdapter {
p2p: OneTime::new(),
}
} }
/// Setup the p2p server on the adapter /// Setup the p2p server on the adapter
@ -345,7 +344,9 @@ pub struct PoolToChainAdapter {
impl PoolToChainAdapter { impl PoolToChainAdapter {
/// Create a new pool adapter /// Create a new pool adapter
pub fn new() -> PoolToChainAdapter { pub fn new() -> PoolToChainAdapter {
PoolToChainAdapter { chain: OneTime::new() } PoolToChainAdapter {
chain: OneTime::new(),
}
} }
pub fn set_chain(&self, chain_ref: Arc<chain::Chain>) { pub fn set_chain(&self, chain_ref: Arc<chain::Chain>) {
@ -355,13 +356,14 @@ impl PoolToChainAdapter {
impl pool::BlockChain for PoolToChainAdapter { impl pool::BlockChain for PoolToChainAdapter {
fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, pool::PoolError> { fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, pool::PoolError> {
self.chain.borrow().get_unspent(output_ref).map_err( self.chain
|e| match e { .borrow()
.get_unspent(output_ref)
.map_err(|e| match e {
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound, chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent, chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
_ => pool::PoolError::GenericPoolError, _ => pool::PoolError::GenericPoolError,
}, })
)
} }
fn get_block_header_by_output_commit( fn get_block_header_by_output_commit(
@ -375,8 +377,9 @@ impl pool::BlockChain for PoolToChainAdapter {
} }
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> { fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
self.chain.borrow().head_header().map_err(|_| { self.chain
pool::PoolError::GenericPoolError .borrow()
}) .head_header()
.map_err(|_| pool::PoolError::GenericPoolError)
} }
} }

View file

@ -1,4 +1,4 @@
// Copyright 2016 The Grin Developers // Copyright 2016-2017 The Grin Developers
// //
// Licensed under the Apache License, Version 2.0 (the "License"); // Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License. // you may not use this file except in compliance with the License.
@ -21,32 +21,32 @@
#![deny(unused_mut)] #![deny(unused_mut)]
#![warn(missing_docs)] #![warn(missing_docs)]
#[macro_use]
extern crate slog;
extern crate futures; extern crate futures;
extern crate futures_cpupool as cpupool; extern crate futures_cpupool as cpupool;
extern crate hyper; extern crate hyper;
extern crate itertools;
extern crate rand; extern crate rand;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate time; extern crate time;
extern crate tokio_core; extern crate tokio_core;
extern crate tokio_timer; extern crate tokio_timer;
extern crate itertools;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_chain as chain; extern crate grin_chain as chain;
#[macro_use] #[macro_use]
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_keychain as keychain;
extern crate grin_p2p as p2p; extern crate grin_p2p as p2p;
extern crate grin_pool as pool; extern crate grin_pool as pool;
extern crate grin_pow as pow;
extern crate grin_store as store; extern crate grin_store as store;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_keychain as keychain;
extern crate grin_wallet as wallet; extern crate grin_wallet as wallet;
extern crate grin_pow as pow;
mod adapters; mod adapters;
mod server; mod server;
@ -56,4 +56,4 @@ mod types;
mod miner; mod miner;
pub use server::Server; pub use server::Server;
pub use types::{ServerConfig, Seeding, ServerStats}; pub use types::{Seeding, ServerConfig, ServerStats};

View file

@ -17,7 +17,7 @@
use rand::{self, Rng}; use rand::{self, Rng};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use std::{thread, str}; use std::{str, thread};
use std; use std;
use time; use time;
@ -97,7 +97,6 @@ impl ser::Writer for HeaderPartWriter {
for i in 0..bytes_in.len() { for i in 0..bytes_in.len() {
self.pre_nonce.push(bytes_in.as_ref()[i]) self.pre_nonce.push(bytes_in.as_ref()[i])
} }
} else if self.bytes_written != 0 { } else if self.bytes_written != 0 {
for i in 0..bytes_in.len() { for i in 0..bytes_in.len() {
self.post_nonce.push(bytes_in.as_ref()[i]) self.post_nonce.push(bytes_in.as_ref()[i])
@ -158,7 +157,6 @@ impl Miner {
latest_hash: &Hash, latest_hash: &Hash,
attempt_time_per_block: u32, attempt_time_per_block: u32,
) -> Option<Proof> { ) -> Option<Proof> {
debug!( debug!(
LOGGER, LOGGER,
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.", "(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.",
@ -248,7 +246,6 @@ impl Miner {
job_handle.stop_jobs(); job_handle.stop_jobs();
sol sol
} }
/// The inner part of mining loop for cuckoo miner sync mode /// The inner part of mining loop for cuckoo miner sync mode
@ -291,7 +288,6 @@ impl Miner {
let mut sol = None; let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline { while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash(); let pow_hash = b.hash();
if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) { if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty(); let proof_diff = proof.clone().to_difficulty();
@ -324,8 +320,8 @@ impl Miner {
iter_count += 1; iter_count += 1;
// Artificial slow down // Artificial slow down
if self.config.slow_down_in_millis != None && if self.config.slow_down_in_millis != None
self.config.slow_down_in_millis.unwrap() > 0 && self.config.slow_down_in_millis.unwrap() > 0
{ {
thread::sleep(std::time::Duration::from_millis( thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(), self.config.slow_down_in_millis.unwrap(),
@ -381,7 +377,6 @@ impl Miner {
let mut sol = None; let mut sol = None;
while head.hash() == *latest_hash && time::get_time().sec < deadline { while head.hash() == *latest_hash && time::get_time().sec < deadline {
let pow_hash = b.hash(); let pow_hash = b.hash();
if let Ok(proof) = miner.mine(&pow_hash[..]) { if let Ok(proof) = miner.mine(&pow_hash[..]) {
let proof_diff = proof.clone().to_difficulty(); let proof_diff = proof.clone().to_difficulty();
@ -396,8 +391,8 @@ impl Miner {
iter_count += 1; iter_count += 1;
// Artificial slow down // Artificial slow down
if self.config.slow_down_in_millis != None && if self.config.slow_down_in_millis != None
self.config.slow_down_in_millis.unwrap() > 0 && self.config.slow_down_in_millis.unwrap() > 0
{ {
thread::sleep(std::time::Duration::from_millis( thread::sleep(std::time::Duration::from_millis(
self.config.slow_down_in_millis.unwrap(), self.config.slow_down_in_millis.unwrap(),
@ -419,7 +414,6 @@ impl Miner {
/// Starts the mining loop, building a new block on top of the existing /// Starts the mining loop, building a new block on top of the existing
/// chain anytime required and looking for PoW solution. /// chain anytime required and looking for PoW solution.
pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) { pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) {
info!( info!(
LOGGER, LOGGER,
"(Server ID: {}) Starting miner loop.", "(Server ID: {}) Starting miner loop.",
@ -550,9 +544,10 @@ impl Miner {
let difficulty = consensus::next_difficulty(diff_iter).unwrap(); let difficulty = consensus::next_difficulty(diff_iter).unwrap();
// extract current transaction from the pool // extract current transaction from the pool
let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions( let txs_box = self.tx_pool
MAX_TX, .read()
); .unwrap()
.prepare_mineable_transactions(MAX_TX);
let txs: Vec<&Transaction> = txs_box.iter().map(|tx| tx.as_ref()).collect(); let txs: Vec<&Transaction> = txs_box.iter().map(|tx| tx.as_ref()).collect();
// build the coinbase and the block itself // build the coinbase and the block itself
@ -564,7 +559,8 @@ impl Miner {
height, height,
}; };
// TODO - error handling, things can go wrong with get_coinbase (wallet api down etc.) // TODO - error handling, things can go wrong with get_coinbase (wallet api
// down etc.)
let (output, kernel, block_fees) = self.get_coinbase(block_fees).unwrap(); let (output, kernel, block_fees) = self.get_coinbase(block_fees).unwrap();
let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap(); let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap();
@ -585,9 +581,9 @@ impl Miner {
b.header.nonce = rng.gen(); b.header.nonce = rng.gen();
b.header.difficulty = difficulty; b.header.difficulty = difficulty;
b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0)); b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0));
self.chain.set_sumtree_roots(&mut b).expect( self.chain
"Error setting sum tree roots", .set_sumtree_roots(&mut b)
); .expect("Error setting sum tree roots");
(b, block_fees) (b, block_fees)
} }
@ -600,8 +596,8 @@ impl Miner {
) -> Result<(core::Output, core::TxKernel, BlockFees), Error> { ) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
let key_id = keychain.derive_key_id(1).unwrap(); let key_id = keychain.derive_key_id(1).unwrap();
let (out, kernel) = core::Block::reward_output(&keychain, &key_id, block_fees.fees) let (out, kernel) =
.unwrap(); core::Block::reward_output(&keychain, &key_id, block_fees.fees).unwrap();
Ok((out, kernel, block_fees)) Ok((out, kernel, block_fees))
} }
@ -613,8 +609,9 @@ impl Miner {
self.burn_reward(block_fees) self.burn_reward(block_fees)
} else { } else {
let url = format!( let url = format!(
"{}/v2/receive/coinbase", "{}/v1/receive/coinbase",
self.config.wallet_receiver_url.as_str()); self.config.wallet_receiver_url.as_str()
);
let res = wallet::client::create_coinbase(&url, &block_fees)?; let res = wallet::client::create_coinbase(&url, &block_fees)?;

View file

@ -68,9 +68,8 @@ impl Seeder {
h.spawn(self.listen_for_addrs(h.clone(), rx)); h.spawn(self.listen_for_addrs(h.clone(), rx));
// check seeds and start monitoring connections // check seeds and start monitoring connections
let seeder = self.connect_to_seeds(tx.clone(), seed_list).join( let seeder = self.connect_to_seeds(tx.clone(), seed_list)
self.monitor_peers(tx.clone()), .join(self.monitor_peers(tx.clone()));
);
h.spawn(seeder.map(|_| ()).map_err(|e| { h.spawn(seeder.map(|_| ()).map_err(|e| {
error!(LOGGER, "Seeding or peer monitoring error: {}", e); error!(LOGGER, "Seeding or peer monitoring error: {}", e);
@ -90,7 +89,6 @@ impl Seeder {
let mon_loop = Timer::default() let mon_loop = Timer::default()
.interval(time::Duration::from_secs(10)) .interval(time::Duration::from_secs(10))
.for_each(move |_| { .for_each(move |_| {
// maintenance step first, clean up p2p server peers and mark bans // maintenance step first, clean up p2p server peers and mark bans
// if needed // if needed
let disconnected = p2p_server.clean_peers(); let disconnected = p2p_server.clean_peers();
@ -231,8 +229,10 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
}) })
.and_then(|res| { .and_then(|res| {
// collect all chunks and split around whitespace to get a list of SocketAddr // collect all chunks and split around whitespace to get a list of SocketAddr
res.body().collect().map_err(|e| e.to_string()).and_then( res.body()
|chunks| { .collect()
.map_err(|e| e.to_string())
.and_then(|chunks| {
let res = chunks.iter().fold("".to_string(), |acc, ref chunk| { let res = chunks.iter().fold("".to_string(), |acc, ref chunk| {
acc + str::from_utf8(&chunk[..]).unwrap() acc + str::from_utf8(&chunk[..]).unwrap()
}); });
@ -240,8 +240,7 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
.map(|s| s.parse().unwrap()) .map(|s| s.parse().unwrap())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
Ok(addrs) Ok(addrs)
}, })
)
}) })
}); });
Box::new(seeds) Box::new(seeds)

View file

@ -79,7 +79,6 @@ impl Server {
/// Instantiates a new server associated with the provided future reactor. /// Instantiates a new server associated with the provided future reactor.
pub fn future(mut config: ServerConfig, evt_handle: &reactor::Handle) -> Result<Server, Error> { pub fn future(mut config: ServerConfig, evt_handle: &reactor::Handle) -> Result<Server, Error> {
let pool_adapter = Arc::new(PoolToChainAdapter::new()); let pool_adapter = Arc::new(PoolToChainAdapter::new());
let pool_net_adapter = Arc::new(PoolToNetAdapter::new()); let pool_net_adapter = Arc::new(PoolToNetAdapter::new());
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new( let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(

View file

@ -23,7 +23,7 @@ const MAX_BODY_DOWNLOADS: usize = 8;
use std::ops::{Deref, DerefMut}; use std::ops::{Deref, DerefMut};
use std::sync::{Arc, Mutex}; use std::sync::{Arc, Mutex};
use std::thread; use std::thread;
use std::time::{Instant, Duration}; use std::time::{Duration, Instant};
use core::core::hash::{Hash, Hashed}; use core::core::hash::{Hash, Hashed};
use chain; use chain;
@ -209,9 +209,9 @@ impl Syncer {
pub fn block_received(&self, bh: Hash) { pub fn block_received(&self, bh: Hash) {
// just clean up the downloading list // just clean up the downloading list
let mut bds = self.blocks_downloading.lock().unwrap(); let mut bds = self.blocks_downloading.lock().unwrap();
bds.iter().position(|ref h| h.hash == bh).map( bds.iter()
|n| bds.remove(n), .position(|ref h| h.hash == bh)
); .map(|n| bds.remove(n));
} }
/// Request some block headers from a peer to advance us /// Request some block headers from a peer to advance us

View file

@ -119,7 +119,7 @@ impl Default for ServerConfig {
fn default() -> ServerConfig { fn default() -> ServerConfig {
ServerConfig { ServerConfig {
db_root: ".grin".to_string(), db_root: ".grin".to_string(),
api_http_addr: "0.0.0.0:13415".to_string(), api_http_addr: "0.0.0.0:13413".to_string(),
capabilities: p2p::FULL_NODE, capabilities: p2p::FULL_NODE,
seeding_type: Seeding::None, seeding_type: Seeding::None,
seeds: None, seeds: None,

View file

@ -12,21 +12,21 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
extern crate grin_grin as grin;
extern crate grin_core as core;
extern crate grin_p2p as p2p;
extern crate grin_chain as chain;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_wallet as wallet; extern crate grin_chain as chain;
extern crate grin_core as core;
extern crate grin_grin as grin;
extern crate grin_keychain as keychain; extern crate grin_keychain as keychain;
extern crate grin_p2p as p2p;
extern crate grin_pow as pow; extern crate grin_pow as pow;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate blake2_rfc as blake2; extern crate blake2_rfc as blake2;
extern crate futures; extern crate futures;
extern crate futures_cpupool;
extern crate tokio_core; extern crate tokio_core;
extern crate tokio_timer; extern crate tokio_timer;
extern crate futures_cpupool;
use std::thread; use std::thread;
use std::time; use std::time;
@ -42,9 +42,7 @@ use util::secp::Secp256k1;
use self::keychain::Keychain; use self::keychain::Keychain;
use wallet::WalletConfig; use wallet::WalletConfig;
/// Just removes all results from previous runs /// Just removes all results from previous runs
pub fn clean_all_output(test_name_dir: &str) { pub fn clean_all_output(test_name_dir: &str) {
let target_dir = format!("target/test_servers/{}", test_name_dir); let target_dir = format!("target/test_servers/{}", test_name_dir);
let result = fs::remove_dir_all(target_dir); let result = fs::remove_dir_all(target_dir);
@ -116,9 +114,9 @@ impl Default for LocalServerContainerConfig {
LocalServerContainerConfig { LocalServerContainerConfig {
name: String::from("test_host"), name: String::from("test_host"),
base_addr: String::from("127.0.0.1"), base_addr: String::from("127.0.0.1"),
api_server_port: 13413,
p2p_server_port: 13414, p2p_server_port: 13414,
api_server_port: 13415, wallet_port: 13415,
wallet_port: 13416,
seed_addr: String::from(""), seed_addr: String::from(""),
is_seeding: false, is_seeding: false,
start_miner: false, start_miner: false,
@ -256,14 +254,12 @@ impl LocalServerContainer {
} }
s.get_server_stats().unwrap() s.get_server_stats().unwrap()
} }
/// Starts a wallet daemon to receive and returns the /// Starts a wallet daemon to receive and returns the
/// listening server url /// listening server url
pub fn run_wallet(&mut self, _duration_in_seconds: u64) { pub fn run_wallet(&mut self, _duration_in_seconds: u64) {
// URL on which to start the wallet listener (i.e. api server) // URL on which to start the wallet listener (i.e. api server)
let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port); let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
@ -287,23 +283,22 @@ impl LocalServerContainer {
wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone(); wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone();
wallet_config.data_file_dir = self.working_dir.clone(); wallet_config.data_file_dir = self.working_dir.clone();
let mut api_server = api::ApiServer::new("/v1".to_string()); let receive_tx_handler = wallet::WalletReceiver {
config: wallet_config.clone(),
api_server.register_endpoint( keychain: keychain.clone(),
"/receive".to_string(), };
wallet::WalletReceiver { let router = router!(
keychain: keychain, receive_tx: get "/receive/transaction" => receive_tx_handler,
config: wallet_config,
},
); );
let mut api_server = api::ApiServer::new("/v1".to_string());
api_server.register_handler(router);
api_server.start(url).unwrap_or_else(|e| { api_server.start(url).unwrap_or_else(|e| {
println!("Failed to start Grin wallet receiver: {}.", e); println!("Failed to start Grin wallet receiver: {}.", e);
}); });
self.api_server = Some(api_server); self.api_server = Some(api_server);
self.wallet_is_running = true; self.wallet_is_running = true;
} }
/// Stops the running wallet server /// Stops the running wallet server
@ -400,7 +395,6 @@ impl LocalServerContainerPool {
/// ///
pub fn create_server(&mut self, server_config: &mut LocalServerContainerConfig) { pub fn create_server(&mut self, server_config: &mut LocalServerContainerConfig) {
// If we're calling it this way, need to override these // If we're calling it this way, need to override these
server_config.p2p_server_port = self.next_p2p_port; server_config.p2p_server_port = self.next_p2p_port;
server_config.api_server_port = self.next_api_port; server_config.api_server_port = self.next_api_port;
@ -444,8 +438,6 @@ impl LocalServerContainerPool {
let _run_time = self.config.run_length_in_seconds; let _run_time = self.config.run_length_in_seconds;
self.server_containers.push(server_container); self.server_containers.push(server_container);
} }
/// adds n servers, ready to run /// adds n servers, ready to run
@ -463,7 +455,6 @@ impl LocalServerContainerPool {
/// ///
pub fn run_all_servers(self) -> Vec<grin::ServerStats> { pub fn run_all_servers(self) -> Vec<grin::ServerStats> {
let run_length = self.config.run_length_in_seconds; let run_length = self.config.run_length_in_seconds;
let mut handles = vec![]; let mut handles = vec![];
@ -488,7 +479,6 @@ impl LocalServerContainerPool {
// failure if we don't pause a bit before starting the next server // failure if we don't pause a bit before starting the next server
thread::sleep(time::Duration::from_millis(500)); thread::sleep(time::Duration::from_millis(500));
handles.push(handle); handles.push(handle);
} }
for handle in handles { for handle in handles {
@ -508,7 +498,6 @@ impl LocalServerContainerPool {
pub fn connect_all_peers(&mut self) { pub fn connect_all_peers(&mut self) {
/// just pull out all currently active servers, build a list, /// just pull out all currently active servers, build a list,
/// and feed into all servers /// and feed into all servers
let mut server_addresses: Vec<String> = Vec::new(); let mut server_addresses: Vec<String> = Vec::new();
for s in &self.server_containers { for s in &self.server_containers {
let server_address = format!("{}:{}", s.config.base_addr, s.config.p2p_server_port); let server_address = format!("{}:{}", s.config.base_addr, s.config.p2p_server_port);

View file

@ -12,14 +12,17 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
extern crate grin_grin as grin; #[macro_use]
extern crate grin_core as core; extern crate router;
extern crate grin_p2p as p2p;
extern crate grin_chain as chain;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_wallet as wallet; extern crate grin_chain as chain;
extern crate grin_core as core;
extern crate grin_grin as grin;
extern crate grin_p2p as p2p;
extern crate grin_pow as pow; extern crate grin_pow as pow;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet;
extern crate futures; extern crate futures;
extern crate tokio_core; extern crate tokio_core;
@ -31,7 +34,7 @@ use std::thread;
use std::time; use std::time;
use std::default::Default; use std::default::Default;
use futures::{Future, Poll, Async}; use futures::{Async, Future, Poll};
use futures::task::current; use futures::task::current;
use tokio_core::reactor; use tokio_core::reactor;
use tokio_timer::Timer; use tokio_timer::Timer;
@ -41,8 +44,8 @@ use core::global;
use core::global::{MiningParameterMode, MINING_PARAMETER_MODE}; use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
use wallet::WalletConfig; use wallet::WalletConfig;
use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerContainerPoolConfig, use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerContainerPool,
LocalServerContainerPool}; LocalServerContainerPoolConfig};
/// Testing the frameworks by starting a fresh server, creating a genesis /// Testing the frameworks by starting a fresh server, creating a genesis
/// Block and mining into a wallet for a bit /// Block and mining into a wallet for a bit
@ -71,7 +74,6 @@ fn basic_genesis_mine() {
pool.create_server(&mut server_config); pool.create_server(&mut server_config);
pool.run_all_servers(); pool.run_all_servers();
} }
/// Creates 5 servers, first being a seed and check that through peer address /// Creates 5 servers, first being a seed and check that through peer address
@ -177,8 +179,6 @@ fn simulate_parallel_mining() {
// Check mining difficulty here?, though I'd think it's more valuable // Check mining difficulty here?, though I'd think it's more valuable
// to simply output it. Can at least see the evolution of the difficulty target // to simply output it. Can at least see the evolution of the difficulty target
// in the debug log output for now // in the debug log output for now
} }
// TODO: Convert these tests to newer framework format // TODO: Convert these tests to newer framework format
@ -186,6 +186,7 @@ fn simulate_parallel_mining() {
/// gets propagated to all. /// gets propagated to all.
#[test] #[test]
fn a_simulate_block_propagation() { fn a_simulate_block_propagation() {
util::init_test_logger();
global::set_mining_mode(MiningParameterMode::AutomatedTesting); global::set_mining_mode(MiningParameterMode::AutomatedTesting);
let test_name_dir = "grin-prop"; let test_name_dir = "grin-prop";

View file

@ -17,7 +17,7 @@ use std::cmp::min;
use serde::{de, ser}; use serde::{de, ser};
use byteorder::{ByteOrder, BigEndian}; use byteorder::{BigEndian, ByteOrder};
use blake2::blake2b::blake2b; use blake2::blake2b::blake2b;
use util::secp; use util::secp;
use util::secp::Secp256k1; use util::secp::Secp256k1;
@ -253,11 +253,11 @@ impl ExtendedKey {
let derived = blake2b(64, &self.chaincode[..], &seed[..]); let derived = blake2b(64, &self.chaincode[..], &seed[..]);
let mut secret_key = SecretKey::from_slice(&secp, &derived.as_bytes()[0..32]) let mut secret_key =
SecretKey::from_slice(&secp, &derived.as_bytes()[0..32]).expect("Error deriving key");
secret_key
.add_assign(secp, &self.key)
.expect("Error deriving key"); .expect("Error deriving key");
secret_key.add_assign(secp, &self.key).expect(
"Error deriving key",
);
// TODO check if key != 0 ? // TODO check if key != 0 ?
let mut chain_code: [u8; 32] = [0; 32]; let mut chain_code: [u8; 32] = [0; 32];
@ -312,13 +312,10 @@ mod test {
let s = Secp256k1::new(); let s = Secp256k1::new();
let seed = from_hex("000102030405060708090a0b0c0d0e0f"); let seed = from_hex("000102030405060708090a0b0c0d0e0f");
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap(); let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
let sec = from_hex( let sec = from_hex("c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd");
"c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd",
);
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap(); let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
let chaincode = from_hex( let chaincode =
"e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72", from_hex("e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72");
);
let identifier = from_hex("83e59c48297b78b34b73"); let identifier = from_hex("83e59c48297b78b34b73");
let depth = 0; let depth = 0;
let n_child = 0; let n_child = 0;
@ -343,13 +340,10 @@ mod test {
let seed = from_hex("000102030405060708090a0b0c0d0e0f"); let seed = from_hex("000102030405060708090a0b0c0d0e0f");
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap(); let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
let derived = extk.derive(&s, 0).unwrap(); let derived = extk.derive(&s, 0).unwrap();
let sec = from_hex( let sec = from_hex("d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f");
"d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f",
);
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap(); let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
let chaincode = from_hex( let chaincode =
"243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52", from_hex("243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52");
);
let root_key_id = from_hex("83e59c48297b78b34b73"); let root_key_id = from_hex("83e59c48297b78b34b73");
let identifier = from_hex("0185adb4d8b730099c93"); let identifier = from_hex("0185adb4d8b730099c93");
let depth = 1; let depth = 1;

View file

@ -20,7 +20,7 @@ use util::secp::{Message, Secp256k1, Signature};
use util::secp::key::SecretKey; use util::secp::key::SecretKey;
use util::secp::pedersen::{Commitment, ProofMessage, ProofInfo, RangeProof}; use util::secp::pedersen::{Commitment, ProofMessage, ProofInfo, RangeProof};
use blake2; use blake2;
use blind::{BlindingFactor, BlindSum}; use blind::{BlindSum, BlindingFactor};
use extkey::{self, Identifier}; use extkey::{self, Identifier};

View file

@ -16,10 +16,10 @@
extern crate blake2_rfc as blake2; extern crate blake2_rfc as blake2;
extern crate byteorder; extern crate byteorder;
extern crate grin_util as util;
extern crate rand; extern crate rand;
extern crate grin_util as util; extern crate grin_util as util;
extern crate serde; extern crate serde;
#[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
@ -27,6 +27,6 @@ mod blind;
mod extkey; mod extkey;
pub use blind::{BlindSum, BlindingFactor}; pub use blind::{BlindSum, BlindingFactor};
pub use extkey::{Identifier, ExtendedKey, IDENTIFIER_SIZE}; pub use extkey::{ExtendedKey, Identifier, IDENTIFIER_SIZE};
pub mod keychain; pub mod keychain;
pub use keychain::{Error, Keychain}; pub use keychain::{Error, Keychain};

View file

@ -17,13 +17,13 @@
use std::iter; use std::iter;
use std::ops::Deref; use std::ops::Deref;
use std::sync::{Mutex, Arc}; use std::sync::{Arc, Mutex};
use std::time::{Instant, Duration}; use std::time::{Duration, Instant};
use futures; use futures;
use futures::{Stream, Future}; use futures::{Future, Stream};
use futures::stream; use futures::stream;
use futures::sync::mpsc::{Sender, UnboundedSender, UnboundedReceiver}; use futures::sync::mpsc::{Sender, UnboundedReceiver, UnboundedSender};
use tokio_core::net::TcpStream; use tokio_core::net::TcpStream;
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
use tokio_io::io::{read_exact, write_all}; use tokio_io::io::{read_exact, write_all};
@ -99,7 +99,6 @@ impl Connection {
where where
F: Handler + 'static, F: Handler + 'static,
{ {
let (reader, writer) = conn.split(); let (reader, writer) = conn.split();
// Set Max Read to 12 Mb/s // Set Max Read to 12 Mb/s
@ -112,9 +111,9 @@ impl Connection {
// same for closing the connection // same for closing the connection
let (close_tx, close_rx) = futures::sync::mpsc::channel(1); let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
let close_conn = close_rx.for_each(|_| Ok(())).map_err( let close_conn = close_rx
|_| Error::ConnectionClose, .for_each(|_| Ok(()))
); .map_err(|_| Error::ConnectionClose);
let me = Connection { let me = Connection {
outbound_chan: tx.clone(), outbound_chan: tx.clone(),
@ -152,7 +151,6 @@ impl Connection {
where where
W: AsyncWrite + 'static, W: AsyncWrite + 'static,
{ {
let sent_bytes = self.sent_bytes.clone(); let sent_bytes = self.sent_bytes.clone();
let send_data = rx let send_data = rx
.map_err(|_| Error::ConnectionClose) .map_err(|_| Error::ConnectionClose)
@ -181,7 +179,6 @@ impl Connection {
F: Handler + 'static, F: Handler + 'static,
R: AsyncRead + 'static, R: AsyncRead + 'static,
{ {
// infinite iterator stream so we repeat the message reading logic until the // infinite iterator stream so we repeat the message reading logic until the
// peer is stopped // peer is stopped
let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>)); let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>));
@ -229,7 +226,6 @@ impl Connection {
/// Utility function to send any Writeable. Handles adding the header and /// Utility function to send any Writeable. Handles adding the header and
/// serialization. /// serialization.
pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> { pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> {
let mut body_data = vec![]; let mut body_data = vec![];
try!(ser::serialize(&mut body_data, body)); try!(ser::serialize(&mut body_data, body));
let mut data = vec![]; let mut data = vec![];
@ -239,9 +235,9 @@ impl Connection {
)); ));
data.append(&mut body_data); data.append(&mut body_data);
self.outbound_chan.unbounded_send(data).map_err(|_| { self.outbound_chan
Error::ConnectionClose .unbounded_send(data)
}) .map_err(|_| Error::ConnectionClose)
} }
/// Bytes sent and received by this peer to the remote peer. /// Bytes sent and received by this peer to the remote peer.
@ -269,7 +265,6 @@ impl TimeoutConnection {
where where
F: Handler + 'static, F: Handler + 'static,
{ {
let expects = Arc::new(Mutex::new(vec![])); let expects = Arc::new(Mutex::new(vec![]));
// Decorates the handler to remove the "subscription" from the expected // Decorates the handler to remove the "subscription" from the expected

View file

@ -44,7 +44,9 @@ unsafe impl Send for Handshake {}
impl Handshake { impl Handshake {
/// Creates a new handshake handler /// Creates a new handshake handler
pub fn new() -> Handshake { pub fn new() -> Handshake {
Handshake { nonces: Arc::new(RwLock::new(VecDeque::with_capacity(NONCES_CAP))) } Handshake {
nonces: Arc::new(RwLock::new(VecDeque::with_capacity(NONCES_CAP))),
}
} }
/// Handles connecting to a new remote peer, starting the version handshake. /// Handles connecting to a new remote peer, starting the version handshake.

View file

@ -22,25 +22,25 @@
#[macro_use] #[macro_use]
extern crate bitflags; extern crate bitflags;
extern crate bytes;
#[macro_use] #[macro_use]
extern crate enum_primitive; extern crate enum_primitive;
extern crate futures;
#[macro_use] #[macro_use]
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_store; extern crate grin_store;
extern crate grin_util as util; extern crate grin_util as util;
#[macro_use] extern crate num;
extern crate slog;
extern crate futures;
extern crate tokio_core;
extern crate tokio_io;
extern crate bytes;
extern crate tokio_timer;
extern crate rand; extern crate rand;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate time; extern crate time;
extern crate num; extern crate tokio_core;
extern crate tokio_io;
extern crate tokio_timer;
mod conn; mod conn;
pub mod handshake; pub mod handshake;
@ -52,8 +52,8 @@ mod server;
mod store; mod store;
mod types; mod types;
pub use server::{Server, DummyAdapter}; pub use server::{DummyAdapter, Server};
pub use peer::Peer; pub use peer::Peer;
pub use types::{P2PConfig, NetAdapter, MAX_LOCATORS, MAX_BLOCK_HEADERS, MAX_PEER_ADDRS, pub use types::{Capabilities, Error, NetAdapter, P2PConfig, PeerInfo, FULL_HIST, FULL_NODE,
Capabilities, UNKNOWN, FULL_NODE, FULL_HIST, PeerInfo, Error}; MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS, UNKNOWN};
pub use store::{PeerStore, PeerData, State}; pub use store::{PeerData, PeerStore, State};

View file

@ -14,10 +14,10 @@
//! Message types that transit over the network and related serialization code. //! Message types that transit over the network and related serialization code.
use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr}; use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
use num::FromPrimitive; use num::FromPrimitive;
use futures::future::{Future, ok}; use futures::future::{ok, Future};
use tokio_core::net::TcpStream; use tokio_core::net::TcpStream;
use tokio_io::io::{read_exact, write_all}; use tokio_io::io::{read_exact, write_all};
@ -25,7 +25,7 @@ use core::consensus::MAX_MSG_LEN;
use core::core::BlockHeader; use core::core::BlockHeader;
use core::core::hash::Hash; use core::core::hash::Hash;
use core::core::target::Difficulty; use core::core::target::Difficulty;
use core::ser::{self, Writeable, Readable, Writer, Reader}; use core::ser::{self, Readable, Reader, Writeable, Writer};
use types::*; use types::*;
@ -170,13 +170,11 @@ impl Readable for MsgHeader {
try!(reader.expect_u8(MAGIC[1])); try!(reader.expect_u8(MAGIC[1]));
let (t, len) = ser_multiread!(reader, read_u8, read_u64); let (t, len) = ser_multiread!(reader, read_u8, read_u64);
match Type::from_u8(t) { match Type::from_u8(t) {
Some(ty) => { Some(ty) => Ok(MsgHeader {
Ok(MsgHeader {
magic: MAGIC, magic: MAGIC,
msg_type: ty, msg_type: ty,
msg_len: len, msg_len: len,
}) }),
}
None => Err(ser::Error::CorruptedData), None => Err(ser::Error::CorruptedData),
} }
} }
@ -226,9 +224,7 @@ impl Readable for Hand {
let receiver_addr = try!(SockAddr::read(reader)); let receiver_addr = try!(SockAddr::read(reader));
let ua = try!(reader.read_vec()); let ua = try!(reader.read_vec());
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)); let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
let capabilities = try!(Capabilities::from_bits(capab).ok_or( let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData,));
ser::Error::CorruptedData,
));
Ok(Hand { Ok(Hand {
version: version, version: version,
capabilities: capabilities, capabilities: capabilities,
@ -275,9 +271,7 @@ impl Readable for Shake {
let total_diff = try!(Difficulty::read(reader)); let total_diff = try!(Difficulty::read(reader));
let ua = try!(reader.read_vec()); let ua = try!(reader.read_vec());
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)); let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
let capabilities = try!(Capabilities::from_bits(capab).ok_or( let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData,));
ser::Error::CorruptedData,
));
Ok(Shake { Ok(Shake {
version: version, version: version,
capabilities: capabilities, capabilities: capabilities,
@ -302,10 +296,10 @@ impl Writeable for GetPeerAddrs {
impl Readable for GetPeerAddrs { impl Readable for GetPeerAddrs {
fn read(reader: &mut Reader) -> Result<GetPeerAddrs, ser::Error> { fn read(reader: &mut Reader) -> Result<GetPeerAddrs, ser::Error> {
let capab = try!(reader.read_u32()); let capab = try!(reader.read_u32());
let capabilities = try!(Capabilities::from_bits(capab).ok_or( let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData,));
ser::Error::CorruptedData, Ok(GetPeerAddrs {
)); capabilities: capabilities,
Ok(GetPeerAddrs { capabilities: capabilities }) })
} }
} }
@ -361,9 +355,7 @@ impl Writeable for PeerError {
impl Readable for PeerError { impl Readable for PeerError {
fn read(reader: &mut Reader) -> Result<PeerError, ser::Error> { fn read(reader: &mut Reader) -> Result<PeerError, ser::Error> {
let (code, msg) = ser_multiread!(reader, read_u32, read_vec); let (code, msg) = ser_multiread!(reader, read_u32, read_vec);
let message = try!(String::from_utf8(msg).map_err( let message = try!(String::from_utf8(msg).map_err(|_| ser::Error::CorruptedData,));
|_| ser::Error::CorruptedData,
));
Ok(PeerError { Ok(PeerError {
code: code, code: code,
message: message, message: message,
@ -413,16 +405,7 @@ impl Readable for SockAddr {
let ip = try_map_vec!([0..8], |_| reader.read_u16()); let ip = try_map_vec!([0..8], |_| reader.read_u16());
let port = try!(reader.read_u16()); let port = try!(reader.read_u16());
Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new( Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new(
Ipv6Addr::new( Ipv6Addr::new(ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]),
ip[0],
ip[1],
ip[2],
ip[3],
ip[4],
ip[5],
ip[6],
ip[7],
),
port, port,
0, 0,
0, 0,

View file

@ -13,7 +13,7 @@
// limitations under the License. // limitations under the License.
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::{RwLock, Arc}; use std::sync::{Arc, RwLock};
use futures::Future; use futures::Future;
use tokio_core::net::TcpStream; use tokio_core::net::TcpStream;
@ -80,20 +80,16 @@ impl Peer {
hs: &Handshake, hs: &Handshake,
na: Arc<NetAdapter>, na: Arc<NetAdapter>,
) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> { ) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
let hs_peer = hs.handshake(capab, total_difficulty, conn).and_then( let hs_peer = hs.handshake(capab, total_difficulty, conn)
|(conn, .and_then(|(conn, proto, info)| {
proto,
info)| {
Ok((conn, Peer::new(info, Box::new(proto), na))) Ok((conn, Peer::new(info, Box::new(proto), na)))
}, });
);
Box::new(hs_peer) Box::new(hs_peer)
} }
/// Main peer loop listening for messages and forwarding to the rest of the /// Main peer loop listening for messages and forwarding to the rest of the
/// system. /// system.
pub fn run(&self, conn: TcpStream) -> Box<Future<Item = (), Error = Error>> { pub fn run(&self, conn: TcpStream) -> Box<Future<Item = (), Error = Error>> {
let addr = self.info.addr; let addr = self.info.addr;
let state = self.state.clone(); let state = self.state.clone();
let adapter = Arc::new(self.tracking_adapter.clone()); let adapter = Arc::new(self.tracking_adapter.clone());

View file

@ -34,7 +34,9 @@ pub struct ProtocolV1 {
impl ProtocolV1 { impl ProtocolV1 {
pub fn new() -> ProtocolV1 { pub fn new() -> ProtocolV1 {
ProtocolV1 { conn: OneTime::new() } ProtocolV1 {
conn: OneTime::new(),
}
} }
} }
@ -45,7 +47,6 @@ impl Protocol for ProtocolV1 {
conn: TcpStream, conn: TcpStream,
adapter: Arc<NetAdapter>, adapter: Arc<NetAdapter>,
) -> Box<Future<Item = (), Error = Error>> { ) -> Box<Future<Item = (), Error = Error>> {
let (conn, listener) = TimeoutConnection::listen(conn, move |sender, header, data| { let (conn, listener) = TimeoutConnection::listen(conn, move |sender, header, data| {
let adapt = adapter.as_ref(); let adapt = adapter.as_ref();
handle_payload(adapt, sender, header, data) handle_payload(adapt, sender, header, data)
@ -94,7 +95,9 @@ impl Protocol for ProtocolV1 {
self.send_request( self.send_request(
Type::GetPeerAddrs, Type::GetPeerAddrs,
Type::PeerAddrs, Type::PeerAddrs,
&GetPeerAddrs { capabilities: capab }, &GetPeerAddrs {
capabilities: capab,
},
None, None,
) )
} }

View file

@ -330,12 +330,11 @@ fn with_timeout<T: 'static>(
h: &reactor::Handle, h: &reactor::Handle,
) -> Box<Future<Item = T, Error = Error>> { ) -> Box<Future<Item = T, Error = Error>> {
let timeout = reactor::Timeout::new(Duration::new(5, 0), h).unwrap(); let timeout = reactor::Timeout::new(Duration::new(5, 0), h).unwrap();
let timed = fut.select(timeout.map(Err).from_err()).then( let timed = fut.select(timeout.map(Err).from_err())
|res| match res { .then(|res| match res {
Ok((Ok(inner), _timeout)) => Ok(inner), Ok((Ok(inner), _timeout)) => Ok(inner),
Ok((_, _accept)) => Err(Error::Timeout), Ok((_, _accept)) => Err(Error::Timeout),
Err((e, _other)) => Err(e), Err((e, _other)) => Err(e),
}, });
);
Box::new(timed) Box::new(timed)
} }

View file

@ -17,8 +17,8 @@
use std::net::SocketAddr; use std::net::SocketAddr;
use num::FromPrimitive; use num::FromPrimitive;
use core::ser::{self, Readable, Writeable, Reader, Writer}; use core::ser::{self, Readable, Reader, Writeable, Writer};
use grin_store::{self, Error, to_key, option_to_not_found}; use grin_store::{self, option_to_not_found, to_key, Error};
use msg::SockAddr; use msg::SockAddr;
use types::Capabilities; use types::Capabilities;
@ -68,18 +68,14 @@ impl Readable for PeerData {
let addr = SockAddr::read(reader)?; let addr = SockAddr::read(reader)?;
let (capab, ua, fl) = ser_multiread!(reader, read_u32, read_vec, read_u8); let (capab, ua, fl) = ser_multiread!(reader, read_u32, read_vec, read_u8);
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?; let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
let capabilities = Capabilities::from_bits(capab).ok_or( let capabilities = Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)?;
ser::Error::CorruptedData,
)?;
match State::from_u8(fl) { match State::from_u8(fl) {
Some(flags) => { Some(flags) => Ok(PeerData {
Ok(PeerData {
addr: addr.0, addr: addr.0,
capabilities: capabilities, capabilities: capabilities,
user_agent: user_agent, user_agent: user_agent,
flags: flags, flags: flags,
}) }),
}
None => Err(ser::Error::CorruptedData), None => Err(ser::Error::CorruptedData),
} }
} }
@ -109,22 +105,18 @@ impl PeerStore {
} }
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> { pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
self.db.exists( self.db
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..], .exists(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..])
)
} }
pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> { pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> {
self.db.delete( self.db
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..], .delete(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..])
)
} }
pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> { pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> {
let peers_iter = self.db.iter::<PeerData>(&to_key( let peers_iter = self.db
PEER_PREFIX, .iter::<PeerData>(&to_key(PEER_PREFIX, &mut "".to_string().into_bytes()));
&mut "".to_string().into_bytes(),
));
let mut peers = Vec::with_capacity(count); let mut peers = Vec::with_capacity(count);
for p in peers_iter { for p in peers_iter {
if p.flags == state && p.capabilities.contains(cap) { if p.flags == state && p.capabilities.contains(cap) {

View file

@ -14,7 +14,7 @@
use std::convert::From; use std::convert::From;
use std::io; use std::io;
use std::net::{SocketAddr, IpAddr}; use std::net::{IpAddr, SocketAddr};
use std::sync::Arc; use std::sync::Arc;
use futures::Future; use futures::Future;

View file

@ -12,9 +12,9 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
extern crate futures;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_p2p as p2p; extern crate grin_p2p as p2p;
extern crate futures;
extern crate tokio_core; extern crate tokio_core;
use std::net::SocketAddr; use std::net::SocketAddr;
@ -32,7 +32,6 @@ use p2p::Peer;
// followed by a ping/pong exchange to make sure the connection is live. // followed by a ping/pong exchange to make sure the connection is live.
#[test] #[test]
fn peer_handshake() { fn peer_handshake() {
let mut evtlp = Core::new().unwrap(); let mut evtlp = Core::new().unwrap();
let handle = evtlp.handle(); let handle = evtlp.handle();
let p2p_conf = p2p::P2PConfig::default(); let p2p_conf = p2p::P2PConfig::default();
@ -89,5 +88,4 @@ fn peer_handshake() {
); );
evtlp.run(run_server).unwrap(); evtlp.run(run_server).unwrap();
} }

View file

@ -49,7 +49,9 @@ pub struct DummyUtxoSet {
#[allow(dead_code)] #[allow(dead_code)]
impl DummyUtxoSet { impl DummyUtxoSet {
pub fn empty() -> DummyUtxoSet { pub fn empty() -> DummyUtxoSet {
DummyUtxoSet { outputs: HashMap::new() } DummyUtxoSet {
outputs: HashMap::new(),
}
} }
pub fn root(&self) -> hash::Hash { pub fn root(&self) -> hash::Hash {
hash::ZERO_HASH hash::ZERO_HASH
@ -62,7 +64,9 @@ impl DummyUtxoSet {
for output in &b.outputs { for output in &b.outputs {
new_hashmap.insert(output.commitment(), output.clone()); new_hashmap.insert(output.commitment(), output.clone());
} }
DummyUtxoSet { outputs: new_hashmap } DummyUtxoSet {
outputs: new_hashmap,
}
} }
pub fn with_block(&mut self, b: &block::Block) { pub fn with_block(&mut self, b: &block::Block) {
for input in &b.inputs { for input in &b.inputs {
@ -73,14 +77,18 @@ impl DummyUtxoSet {
} }
} }
pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet { pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet {
DummyUtxoSet { outputs: HashMap::new() } DummyUtxoSet {
outputs: HashMap::new(),
}
} }
pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> { pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> {
self.outputs.get(output_ref) self.outputs.get(output_ref)
} }
fn clone(&self) -> DummyUtxoSet { fn clone(&self) -> DummyUtxoSet {
DummyUtxoSet { outputs: self.outputs.clone() } DummyUtxoSet {
outputs: self.outputs.clone(),
}
} }
// only for testing: add an output to the map // only for testing: add an output to the map
@ -108,8 +116,12 @@ pub struct DummyChainImpl {
impl DummyChainImpl { impl DummyChainImpl {
pub fn new() -> DummyChainImpl { pub fn new() -> DummyChainImpl {
DummyChainImpl { DummyChainImpl {
utxo: RwLock::new(DummyUtxoSet { outputs: HashMap::new() }), utxo: RwLock::new(DummyUtxoSet {
block_headers: RwLock::new(DummyBlockHeaderIndex { block_headers: HashMap::new() }), outputs: HashMap::new(),
}),
block_headers: RwLock::new(DummyBlockHeaderIndex {
block_headers: HashMap::new(),
}),
head_header: RwLock::new(vec![]), head_header: RwLock::new(vec![]),
} }
} }
@ -131,7 +143,8 @@ impl BlockChain for DummyChainImpl {
match self.block_headers match self.block_headers
.read() .read()
.unwrap() .unwrap()
.get_block_header_by_output_commit(*commit) { .get_block_header_by_output_commit(*commit)
{
Ok(h) => Ok(h.clone()), Ok(h) => Ok(h.clone()),
Err(e) => Err(e), Err(e) => Err(e),
} }
@ -159,10 +172,10 @@ impl DummyChain for DummyChainImpl {
commitment: Commitment, commitment: Commitment,
block_header: &block::BlockHeader, block_header: &block::BlockHeader,
) { ) {
self.block_headers.write().unwrap().insert( self.block_headers
commitment, .write()
block_header.clone(), .unwrap()
); .insert(commitment, block_header.clone());
} }
fn store_head_header(&self, block_header: &block::BlockHeader) { fn store_head_header(&self, block_header: &block::BlockHeader) {
let mut h = self.head_header.write().unwrap(); let mut h = self.head_header.write().unwrap();

View file

@ -165,23 +165,24 @@ impl DirectedGraph {
/// Remove a vertex by its hash /// Remove a vertex by its hash
pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> { pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> {
match self.roots.iter().position( match self.roots
|x| x.transaction_hash == tx_hash, .iter()
) { .position(|x| x.transaction_hash == tx_hash)
{
Some(i) => Some(self.roots.swap_remove(i)), Some(i) => Some(self.roots.swap_remove(i)),
None => { None => match self.vertices
match self.vertices.iter().position( .iter()
|x| x.transaction_hash == tx_hash, .position(|x| x.transaction_hash == tx_hash)
) { {
Some(i) => Some(self.vertices.swap_remove(i)), Some(i) => Some(self.vertices.swap_remove(i)),
None => None, None => None,
} },
}
} }
} }
/// Promote any non-root vertices to roots based on current edges. /// Promote any non-root vertices to roots based on current edges.
/// For a given tx, if there are no edges with that tx as destination then it is a root. /// For a given tx, if there are no edges with that tx as destination then
/// it is a root.
pub fn update_roots(&mut self) { pub fn update_roots(&mut self) {
let mut new_vertices: Vec<PoolEntry> = vec![]; let mut new_vertices: Vec<PoolEntry> = vec![];
@ -272,7 +273,7 @@ impl DirectedGraph {
.map(|x| x.transaction_hash) .map(|x| x.transaction_hash)
.collect::<Vec<_>>(); .collect::<Vec<_>>();
hashes.extend(&non_root_hashes); hashes.extend(&non_root_hashes);
return hashes return hashes;
} }
} }
@ -313,7 +314,9 @@ mod tests {
features: core::transaction::DEFAULT_OUTPUT, features: core::transaction::DEFAULT_OUTPUT,
commit: output_commit, commit: output_commit,
switch_commit_hash: switch_commit_hash, switch_commit_hash: switch_commit_hash,
proof: keychain.range_proof(100, &key_id1, output_commit, msg).unwrap(), proof: keychain
.range_proof(100, &key_id1, output_commit, msg)
.unwrap(),
}, },
]; ];
let test_transaction = core::transaction::Transaction::new(inputs, outputs, 5, 0); let test_transaction = core::transaction::Transaction::new(inputs, outputs, 5, 0);

View file

@ -26,15 +26,15 @@ mod types;
mod blockchain; mod blockchain;
mod pool; mod pool;
extern crate time;
extern crate rand;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate blake2_rfc as blake2; extern crate blake2_rfc as blake2;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_keychain as keychain; extern crate grin_keychain as keychain;
extern crate grin_util as util; extern crate grin_util as util;
extern crate rand;
extern crate serde;
#[macro_use]
extern crate serde_derive;
extern crate time;
pub use pool::TransactionPool; pub use pool::TransactionPool;
pub use types::{BlockChain, PoolAdapter, TxSource, PoolError, PoolConfig}; pub use types::{BlockChain, PoolAdapter, PoolConfig, PoolError, TxSource};

View file

@ -76,7 +76,9 @@ where
self.pool self.pool
.get_available_output(output_commitment) .get_available_output(output_commitment)
.map(|x| { .map(|x| {
Parent::PoolTransaction { tx_ref: x.source_hash().unwrap() } Parent::PoolTransaction {
tx_ref: x.source_hash().unwrap(),
}
}) })
.or(self.search_blockchain_unspents(output_commitment)) .or(self.search_blockchain_unspents(output_commitment))
.or(self.search_pool_spents(output_commitment)) .or(self.search_pool_spents(output_commitment))
@ -87,12 +89,17 @@ where
// unspent set, represented by blockchain unspents - pool spents, for an // unspent set, represented by blockchain unspents - pool spents, for an
// output designated by output_commitment. // output designated by output_commitment.
fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option<Parent> { fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option<Parent> {
self.blockchain.get_unspent(output_commitment).ok().map( self.blockchain
|output| match self.pool.get_blockchain_spent(output_commitment) { .get_unspent(output_commitment)
Some(x) => Parent::AlreadySpent { other_tx: x.destination_hash().unwrap() }, .ok()
None => Parent::BlockTransaction { output }, .map(|output| {
match self.pool.get_blockchain_spent(output_commitment) {
Some(x) => Parent::AlreadySpent {
other_tx: x.destination_hash().unwrap(),
}, },
) None => Parent::BlockTransaction { output },
}
})
} }
// search_pool_spents is the second half of pool input detection, after the // search_pool_spents is the second half of pool input detection, after the
@ -100,9 +107,10 @@ where
// Parent::AlreadySpent or None. // Parent::AlreadySpent or None.
fn search_pool_spents(&self, output_commitment: &Commitment) -> Option<Parent> { fn search_pool_spents(&self, output_commitment: &Commitment) -> Option<Parent> {
self.pool.get_internal_spent(output_commitment).map(|x| { self.pool.get_internal_spent(output_commitment).map(|x| {
Parent::AlreadySpent { other_tx: x.destination_hash().unwrap() } Parent::AlreadySpent {
other_tx: x.destination_hash().unwrap(),
}
}) })
} }
/// Get the number of transactions in the pool /// Get the number of transactions in the pool
@ -131,7 +139,6 @@ where
_: TxSource, _: TxSource,
tx: transaction::Transaction, tx: transaction::Transaction,
) -> Result<(), PoolError> { ) -> Result<(), PoolError> {
// Do we have the capacity to accept this transaction? // Do we have the capacity to accept this transaction?
if let Err(e) = self.is_acceptable(&tx) { if let Err(e) = self.is_acceptable(&tx) {
return Err(e); return Err(e);
@ -235,18 +242,13 @@ where
// In the non-orphan (pool) case, we've ensured that every input // In the non-orphan (pool) case, we've ensured that every input
// maps one-to-one with an unspent (available) output, and each // maps one-to-one with an unspent (available) output, and each
// output is unique. No further checks are necessary. // output is unique. No further checks are necessary.
self.pool.add_pool_transaction( self.pool
pool_entry, .add_pool_transaction(pool_entry, blockchain_refs, pool_refs, new_unspents);
blockchain_refs,
pool_refs,
new_unspents,
);
self.reconcile_orphans().unwrap(); self.reconcile_orphans().unwrap();
self.adapter.tx_accepted(&tx); self.adapter.tx_accepted(&tx);
self.transactions.insert(tx_hash, Box::new(tx)); self.transactions.insert(tx_hash, Box::new(tx));
Ok(()) Ok(())
} else { } else {
// At this point, we're pretty sure the transaction is an orphan, // At this point, we're pretty sure the transaction is an orphan,
// but we have to explicitly check for double spends against the // but we have to explicitly check for double spends against the
@ -257,9 +259,9 @@ where
// Note that pool_connections here also does double duty to // Note that pool_connections here also does double duty to
// account for blockchain connections. // account for blockchain connections.
for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) { for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) {
match self.orphans.get_external_spent_output( match self.orphans
&pool_ref.output_commitment(), .get_external_spent_output(&pool_ref.output_commitment())
) { {
// Should the below err be subtyped to orphans somehow? // Should the below err be subtyped to orphans somehow?
Some(x) => { Some(x) => {
return Err(PoolError::DoubleSpend { return Err(PoolError::DoubleSpend {
@ -289,7 +291,6 @@ where
Err(PoolError::OrphanTransaction) Err(PoolError::OrphanTransaction)
} }
} }
/// Check the output for a conflict with an existing output. /// Check the output for a conflict with an existing output.
@ -469,14 +470,15 @@ where
.filter_map(|x| x.destination_hash()) .filter_map(|x| x.destination_hash())
.collect(); .collect();
// find all outputs that conflict - potential for duplicates so use a HashSet here // find all outputs that conflict - potential for duplicates so use a HashSet
// here
let conflicting_outputs: HashSet<hash::Hash> = block let conflicting_outputs: HashSet<hash::Hash> = block
.outputs .outputs
.iter() .iter()
.filter_map(|x: &transaction::Output| { .filter_map(|x: &transaction::Output| {
self.pool.get_internal_spent_output(&x.commitment()).or( self.pool
self.pool.get_available_output(&x.commitment()), .get_internal_spent_output(&x.commitment())
) .or(self.pool.get_available_output(&x.commitment()))
}) })
.filter_map(|x| x.source_hash()) .filter_map(|x| x.source_hash())
.collect(); .collect();
@ -504,11 +506,7 @@ where
/// ///
/// Marked transactions are added to the mutable marked_txs HashMap which /// Marked transactions are added to the mutable marked_txs HashMap which
/// is supplied by the calling function. /// is supplied by the calling function.
fn mark_transaction( fn mark_transaction(&self, conflicting_tx: hash::Hash, marked_txs: &mut HashSet<hash::Hash>) {
&self,
conflicting_tx: hash::Hash,
marked_txs: &mut HashSet<hash::Hash>,
) {
// we can stop recursively visiting txs if we have already seen this one // we can stop recursively visiting txs if we have already seen this one
if marked_txs.contains(&conflicting_tx) { if marked_txs.contains(&conflicting_tx) {
return; return;
@ -520,11 +518,9 @@ where
for output in &tx_ref.unwrap().outputs { for output in &tx_ref.unwrap().outputs {
match self.pool.get_internal_spent_output(&output.commitment()) { match self.pool.get_internal_spent_output(&output.commitment()) {
Some(x) => { Some(x) => if self.blockchain.get_unspent(&x.output_commitment()).is_err() {
if self.blockchain.get_unspent(&x.output_commitment()).is_err() {
self.mark_transaction(x.destination_hash().unwrap(), marked_txs); self.mark_transaction(x.destination_hash().unwrap(), marked_txs);
} },
}
None => {} None => {}
}; };
} }
@ -544,16 +540,13 @@ where
&mut self, &mut self,
marked_transactions: HashSet<hash::Hash>, marked_transactions: HashSet<hash::Hash>,
) -> Vec<Box<transaction::Transaction>> { ) -> Vec<Box<transaction::Transaction>> {
let mut removed_txs = Vec::new(); let mut removed_txs = Vec::new();
for tx_hash in &marked_transactions { for tx_hash in &marked_transactions {
let removed_tx = self.transactions.remove(&tx_hash).unwrap(); let removed_tx = self.transactions.remove(&tx_hash).unwrap();
self.pool.remove_pool_transaction( self.pool
&removed_tx, .remove_pool_transaction(&removed_tx, &marked_transactions);
&marked_transactions,
);
removed_txs.push(removed_tx); removed_txs.push(removed_tx);
} }
@ -683,7 +676,6 @@ mod tests {
child_result.err().unwrap() child_result.err().unwrap()
); );
} }
} }
// Now take the read lock and use a few exposed methods to check // Now take the read lock and use a few exposed methods to check
@ -721,7 +713,7 @@ mod tests {
assert_eq!(write_pool.total_size(), 0); assert_eq!(write_pool.total_size(), 0);
// First expected failure: duplicate output // First expected failure: duplicate output
let duplicate_tx = test_transaction(vec![5,6], vec![7]); let duplicate_tx = test_transaction(vec![5, 6], vec![7]);
match write_pool.add_to_memory_pool(test_source(), duplicate_tx) { match write_pool.add_to_memory_pool(test_source(), duplicate_tx) {
Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"), Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"),
@ -731,23 +723,22 @@ mod tests {
other_tx, other_tx,
in_chain, in_chain,
output, output,
} => { } => if other_tx.is_some() || !in_chain
if other_tx.is_some() || !in_chain || || output != test_output(7).commitment()
output != test_output(7).commitment()
{ {
panic!("Unexpected parameter in DuplicateOutput: {:?}", x); panic!("Unexpected parameter in DuplicateOutput: {:?}", x);
} },
} _ => panic!(
_ => { "Unexpected error when adding duplicate output transaction: {:?}",
panic!("Unexpected error when adding duplicate output transaction: {:?}", x) x
} ),
}; };
} }
}; };
// To test DoubleSpend and AlreadyInPool conditions, we need to add // To test DoubleSpend and AlreadyInPool conditions, we need to add
// a valid transaction. // a valid transaction.
let valid_transaction = test_transaction(vec![5,6], vec![9]); let valid_transaction = test_transaction(vec![5, 6], vec![9]);
match write_pool.add_to_memory_pool(test_source(), valid_transaction) { match write_pool.add_to_memory_pool(test_source(), valid_transaction) {
Ok(_) => {} Ok(_) => {}
@ -765,19 +756,18 @@ mod tests {
PoolError::DoubleSpend { PoolError::DoubleSpend {
other_tx: _, other_tx: _,
spent_output, spent_output,
} => { } => if spent_output != test_output(6).commitment() {
if spent_output != test_output(6).commitment() {
panic!("Unexpected parameter in DoubleSpend: {:?}", x); panic!("Unexpected parameter in DoubleSpend: {:?}", x);
} },
} _ => panic!(
_ => { "Unexpected error when adding double spend transaction: {:?}",
panic!("Unexpected error when adding double spend transaction: {:?}", x) x
} ),
}; };
} }
}; };
let already_in_pool = test_transaction(vec![5,6], vec![9]); let already_in_pool = test_transaction(vec![5, 6], vec![9]);
match write_pool.add_to_memory_pool(test_source(), already_in_pool) { match write_pool.add_to_memory_pool(test_source(), already_in_pool) {
Ok(_) => panic!("Expected error when adding already in pool, got Ok"), Ok(_) => panic!("Expected error when adding already in pool, got Ok"),
@ -795,7 +785,9 @@ mod tests {
// should fail as invalid based on current height // should fail as invalid based on current height
let timelocked_tx_1 = timelocked_transaction(vec![9], vec![5], 10); let timelocked_tx_1 = timelocked_transaction(vec![9], vec![5], 10);
match write_pool.add_to_memory_pool(test_source(), timelocked_tx_1) { match write_pool.add_to_memory_pool(test_source(), timelocked_tx_1) {
Err(PoolError::ImmatureTransaction { lock_height: height }) => { Err(PoolError::ImmatureTransaction {
lock_height: height,
}) => {
assert_eq!(height, 10); assert_eq!(height, 10);
} }
Err(e) => panic!("expected ImmatureTransaction error here - {:?}", e), Err(e) => panic!("expected ImmatureTransaction error here - {:?}", e),
@ -821,10 +813,8 @@ mod tests {
height: 1, height: 1,
..block::BlockHeader::default() ..block::BlockHeader::default()
}; };
chain_ref.store_header_by_output_commitment( chain_ref
coinbase_output.commitment(), .store_header_by_output_commitment(coinbase_output.commitment(), &coinbase_header);
&coinbase_header,
);
let head_header = block::BlockHeader { let head_header = block::BlockHeader {
height: 2, height: 2,
@ -893,8 +883,7 @@ mod tests {
dummy_chain.store_head_header(&head_header); dummy_chain.store_head_header(&head_header);
// single UTXO // single UTXO
let new_utxo = DummyUtxoSet::empty() let new_utxo = DummyUtxoSet::empty().with_output(test_output(100));
.with_output(test_output(100));
dummy_chain.update_utxo_set(new_utxo); dummy_chain.update_utxo_set(new_utxo);
let chain_ref = Arc::new(dummy_chain); let chain_ref = Arc::new(dummy_chain);
@ -989,13 +978,13 @@ mod tests {
// consumed in the block, although it is not exactly consumed. // consumed in the block, although it is not exactly consumed.
// 3. A transaction that should remain after block reconciliation. // 3. A transaction that should remain after block reconciliation.
let block_transaction = test_transaction(vec![10], vec![8]); let block_transaction = test_transaction(vec![10], vec![8]);
let conflict_transaction = test_transaction(vec![20], vec![12,6]); let conflict_transaction = test_transaction(vec![20], vec![12, 6]);
let valid_transaction = test_transaction(vec![30], vec![13,15]); let valid_transaction = test_transaction(vec![30], vec![13, 15]);
// We will also introduce a few children: // We will also introduce a few children:
// 4. A transaction that descends from transaction 1, that is in // 4. A transaction that descends from transaction 1, that is in
// turn exactly contained in the block. // turn exactly contained in the block.
let block_child = test_transaction(vec![8], vec![5,1]); let block_child = test_transaction(vec![8], vec![5, 1]);
// 5. A transaction that descends from transaction 4, that is not // 5. A transaction that descends from transaction 4, that is not
// contained in the block at all and should be valid after // contained in the block at all and should be valid after
// reconciliation. // reconciliation.
@ -1015,7 +1004,7 @@ mod tests {
let valid_child_valid = test_transaction(vec![15], vec![11]); let valid_child_valid = test_transaction(vec![15], vec![11]);
// 10. A transaction that descends from both transaction 6 and // 10. A transaction that descends from both transaction 6 and
// transaction 9 // transaction 9
let mixed_child = test_transaction(vec![2,11], vec![7]); let mixed_child = test_transaction(vec![2, 11], vec![7]);
// Add transactions. // Add transactions.
// Note: There are some ordering constraints that must be followed here // Note: There are some ordering constraints that must be followed here
@ -1055,9 +1044,9 @@ mod tests {
// - Conflict w/ 2, satisfies 7 // - Conflict w/ 2, satisfies 7
let block_tx_2 = test_transaction(vec![20], vec![6]); let block_tx_2 = test_transaction(vec![20], vec![6]);
// - Copy of 4 // - Copy of 4
let block_tx_3 = test_transaction(vec![8], vec![5,1]); let block_tx_3 = test_transaction(vec![8], vec![5, 1]);
// - Output conflict w/ 8 // - Output conflict w/ 8
let block_tx_4 = test_transaction(vec![40], vec![9,1]); let block_tx_4 = test_transaction(vec![40], vec![9, 1]);
let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3, &block_tx_4]; let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3, &block_tx_4];
let keychain = Keychain::from_random_seed().unwrap(); let keychain = Keychain::from_random_seed().unwrap();
@ -1147,11 +1136,31 @@ mod tests {
let mut write_pool = pool.write().unwrap(); let mut write_pool = pool.write().unwrap();
assert_eq!(write_pool.total_size(), 0); assert_eq!(write_pool.total_size(), 0);
assert!(write_pool.add_to_memory_pool(test_source(), root_tx_1).is_ok()); assert!(
assert!(write_pool.add_to_memory_pool(test_source(), root_tx_2).is_ok()); write_pool
assert!(write_pool.add_to_memory_pool(test_source(), root_tx_3).is_ok()); .add_to_memory_pool(test_source(), root_tx_1)
assert!(write_pool.add_to_memory_pool(test_source(), child_tx_1).is_ok()); .is_ok()
assert!(write_pool.add_to_memory_pool(test_source(), child_tx_2).is_ok()); );
assert!(
write_pool
.add_to_memory_pool(test_source(), root_tx_2)
.is_ok()
);
assert!(
write_pool
.add_to_memory_pool(test_source(), root_tx_3)
.is_ok()
);
assert!(
write_pool
.add_to_memory_pool(test_source(), child_tx_1)
.is_ok()
);
assert!(
write_pool
.add_to_memory_pool(test_source(), child_tx_2)
.is_ok()
);
assert_eq!(write_pool.total_size(), 5); assert_eq!(write_pool.total_size(), 5);
} }
@ -1199,7 +1208,7 @@ mod tests {
pool: Pool::empty(), pool: Pool::empty(),
orphans: Orphans::empty(), orphans: Orphans::empty(),
blockchain: dummy_chain.clone(), blockchain: dummy_chain.clone(),
adapter: Arc::new(NoopAdapter{}), adapter: Arc::new(NoopAdapter {}),
} }
} }
@ -1217,8 +1226,8 @@ mod tests {
) -> transaction::Transaction { ) -> transaction::Transaction {
let keychain = keychain_for_tests(); let keychain = keychain_for_tests();
let fees: i64 = input_values.iter().sum::<u64>() as i64 - let fees: i64 =
output_values.iter().sum::<u64>() as i64; input_values.iter().sum::<u64>() as i64 - output_values.iter().sum::<u64>() as i64;
assert!(fees >= 0); assert!(fees >= 0);
let mut tx_elements = Vec::new(); let mut tx_elements = Vec::new();
@ -1245,8 +1254,8 @@ mod tests {
) -> transaction::Transaction { ) -> transaction::Transaction {
let keychain = keychain_for_tests(); let keychain = keychain_for_tests();
let fees: i64 = input_values.iter().sum::<u64>() as i64 - let fees: i64 =
output_values.iter().sum::<u64>() as i64; input_values.iter().sum::<u64>() as i64 - output_values.iter().sum::<u64>() as i64;
assert!(fees >= 0); assert!(fees >= 0);
let mut tx_elements = Vec::new(); let mut tx_elements = Vec::new();

View file

@ -34,11 +34,11 @@ pub struct PoolConfig {
/// Base fee for a transaction to be accepted by the pool. The transaction /// Base fee for a transaction to be accepted by the pool. The transaction
/// weight is computed from its number of inputs, outputs and kernels and /// weight is computed from its number of inputs, outputs and kernels and
/// multipled by the base fee to compare to the actual transaction fee. /// multipled by the base fee to compare to the actual transaction fee.
#[serde="default_accept_fee_base"] #[serde = "default_accept_fee_base"]
pub accept_fee_base: u64, pub accept_fee_base: u64,
/// Maximum capacity of the pool in number of transactions /// Maximum capacity of the pool in number of transactions
#[serde="default_max_pool_size"] #[serde = "default_max_pool_size"]
pub max_pool_size: usize, pub max_pool_size: usize,
} }
@ -51,8 +51,12 @@ impl Default for PoolConfig {
} }
} }
fn default_accept_fee_base() -> u64 { 10 } fn default_accept_fee_base() -> u64 {
fn default_max_pool_size() -> usize { 50_000 } 10
}
fn default_max_pool_size() -> usize {
50_000
}
/// Placeholder: the data representing where we heard about a tx from. /// Placeholder: the data representing where we heard about a tx from.
/// ///
@ -240,7 +244,6 @@ impl Pool {
pool_refs: Vec<graph::Edge>, pool_refs: Vec<graph::Edge>,
mut new_unspents: Vec<graph::Edge>, mut new_unspents: Vec<graph::Edge>,
) { ) {
// Removing consumed available_outputs // Removing consumed available_outputs
for new_edge in &pool_refs { for new_edge in &pool_refs {
// All of these should correspond to an existing unspent // All of these should correspond to an existing unspent
@ -253,11 +256,8 @@ impl Pool {
// Accounting for consumed blockchain outputs // Accounting for consumed blockchain outputs
for new_blockchain_edge in blockchain_refs.drain(..) { for new_blockchain_edge in blockchain_refs.drain(..) {
self.consumed_blockchain_outputs.insert( self.consumed_blockchain_outputs
new_blockchain_edge .insert(new_blockchain_edge.output_commitment(), new_blockchain_edge);
.output_commitment(),
new_blockchain_edge,
);
} }
// Adding the transaction to the vertices list along with internal // Adding the transaction to the vertices list along with internal
@ -266,10 +266,8 @@ impl Pool {
// Adding the new unspents to the unspent map // Adding the new unspents to the unspent map
for unspent_output in new_unspents.drain(..) { for unspent_output in new_unspents.drain(..) {
self.available_outputs.insert( self.available_outputs
unspent_output.output_commitment(), .insert(unspent_output.output_commitment(), unspent_output);
unspent_output,
);
} }
} }
@ -282,19 +280,14 @@ impl Pool {
tx: &transaction::Transaction, tx: &transaction::Transaction,
marked_txs: &HashSet<hash::Hash>, marked_txs: &HashSet<hash::Hash>,
) { ) {
self.graph.remove_vertex(graph::transaction_identifier(tx)); self.graph.remove_vertex(graph::transaction_identifier(tx));
for input in tx.inputs.iter().map(|x| x.commitment()) { for input in tx.inputs.iter().map(|x| x.commitment()) {
match self.graph.remove_edge_by_commitment(&input) { match self.graph.remove_edge_by_commitment(&input) {
Some(x) => { Some(x) => if !marked_txs.contains(&x.source_hash().unwrap()) {
if !marked_txs.contains(&x.source_hash().unwrap()) { self.available_outputs
self.available_outputs.insert( .insert(x.output_commitment(), x.with_destination(None));
x.output_commitment(), },
x.with_destination(None),
);
}
}
None => { None => {
self.consumed_blockchain_outputs.remove(&input); self.consumed_blockchain_outputs.remove(&input);
} }
@ -303,15 +296,10 @@ impl Pool {
for output in tx.outputs.iter().map(|x| x.commitment()) { for output in tx.outputs.iter().map(|x| x.commitment()) {
match self.graph.remove_edge_by_commitment(&output) { match self.graph.remove_edge_by_commitment(&output) {
Some(x) => { Some(x) => if !marked_txs.contains(&x.destination_hash().unwrap()) {
if !marked_txs.contains(&x.destination_hash().unwrap()) { self.consumed_blockchain_outputs
.insert(x.output_commitment(), x.with_source(None));
self.consumed_blockchain_outputs.insert( },
x.output_commitment(),
x.with_source(None),
);
}
}
None => { None => {
self.available_outputs.remove(&output); self.available_outputs.remove(&output);
} }
@ -413,14 +401,11 @@ impl Orphans {
is_missing: HashMap<usize, ()>, is_missing: HashMap<usize, ()>,
mut new_unspents: Vec<graph::Edge>, mut new_unspents: Vec<graph::Edge>,
) { ) {
// Removing consumed available_outputs // Removing consumed available_outputs
for (i, new_edge) in orphan_refs.drain(..).enumerate() { for (i, new_edge) in orphan_refs.drain(..).enumerate() {
if is_missing.contains_key(&i) { if is_missing.contains_key(&i) {
self.missing_outputs.insert( self.missing_outputs
new_edge.output_commitment(), .insert(new_edge.output_commitment(), new_edge);
new_edge,
);
} else { } else {
assert!( assert!(
self.available_outputs self.available_outputs
@ -433,27 +418,21 @@ impl Orphans {
// Accounting for consumed blockchain and pool outputs // Accounting for consumed blockchain and pool outputs
for external_edge in pool_refs.drain(..) { for external_edge in pool_refs.drain(..) {
self.pool_connections.insert( self.pool_connections
external_edge.output_commitment(), .insert(external_edge.output_commitment(), external_edge);
external_edge,
);
} }
// if missing_refs is the same length as orphan_refs, we have // if missing_refs is the same length as orphan_refs, we have
// no orphan-orphan links for this transaction and it is a // no orphan-orphan links for this transaction and it is a
// root transaction of the orphans set // root transaction of the orphans set
self.graph.add_vertex_only( self.graph
orphan_entry, .add_vertex_only(orphan_entry, is_missing.len() == orphan_refs.len());
is_missing.len() == orphan_refs.len(),
);
// Adding the new unspents to the unspent map // Adding the new unspents to the unspent map
for unspent_output in new_unspents.drain(..) { for unspent_output in new_unspents.drain(..) {
self.available_outputs.insert( self.available_outputs
unspent_output.output_commitment(), .insert(unspent_output.output_commitment(), unspent_output);
unspent_output,
);
} }
} }
} }

View file

@ -313,9 +313,9 @@ impl Miner {
/// Utility to transform a 8 bytes of a byte array into a u64. /// Utility to transform a 8 bytes of a byte array into a u64.
fn u8_to_u64(p: &[u8], i: usize) -> u64 { fn u8_to_u64(p: &[u8], i: usize) -> u64 {
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24
(p[i + 3] as u64) << 24 | (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 | | (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 | (p[i + 6] as u64) << 48
(p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56 | (p[i + 7] as u64) << 56
} }
#[cfg(test)] #[cfg(test)]
@ -324,32 +324,183 @@ mod test {
use core::core::Proof; use core::core::Proof;
static V1: [u32; 42] = [0x1fe9, 0x2050, 0x4581, 0x6322, 0x65ab, 0xb3c1, 0xc1a4, 0xe257, static V1: [u32; 42] = [
0x106ae, 0x17b11, 0x202d4, 0x2705d, 0x2deb2, 0x2f80e, 0x32298, 0x1fe9,
0x34782, 0x35c5a, 0x37458, 0x38f28, 0x406b2, 0x40e34, 0x40fc6, 0x2050,
0x42220, 0x42d13, 0x46c0f, 0x4fd47, 0x55ad2, 0x598f7, 0x5aa8f, 0x4581,
0x62aa3, 0x65725, 0x65dcb, 0x671c7, 0x6eb20, 0x752fe, 0x7594f, 0x6322,
0x79b9c, 0x7f775, 0x81635, 0x8401c, 0x844e5, 0x89fa8]; 0x65ab,
static V2: [u32; 42] = [0x2a37, 0x7557, 0xa3c3, 0xfce6, 0x1248e, 0x15837, 0x1827f, 0x18a93, 0xb3c1,
0x1a7dd, 0x1b56b, 0x1ceb4, 0x1f962, 0x1fe2a, 0x29cb9, 0x2f30e, 0xc1a4,
0x2f771, 0x336bf, 0x34355, 0x391d7, 0x39495, 0x3be0c, 0x463be, 0xe257,
0x4d0c2, 0x4eead, 0x50214, 0x520de, 0x52a86, 0x53818, 0x53b3b, 0x106ae,
0x54c0b, 0x572fa, 0x5d79c, 0x5e3c2, 0x6769e, 0x6a0fe, 0x6d835, 0x17b11,
0x6fc7c, 0x70f03, 0x79d4a, 0x7b03e, 0x81e09, 0x9bd44]; 0x202d4,
static V3: [u32; 42] = [0x8158, 0x9f18, 0xc4ba, 0x108c7, 0x11caa, 0x13b82, 0x1618f, 0x1c83b, 0x2705d,
0x1ec89, 0x24354, 0x28864, 0x2a0fb, 0x2ce50, 0x2e8fa, 0x32b36, 0x2deb2,
0x343e6, 0x34dc9, 0x36881, 0x3ffca, 0x40f79, 0x42721, 0x43b8c, 0x2f80e,
0x44b9d, 0x47ed3, 0x4cd34, 0x5278a, 0x5ab64, 0x5b4d4, 0x5d842, 0x32298,
0x5fa33, 0x6464e, 0x676ee, 0x685d6, 0x69df0, 0x6a5fd, 0x6bda3, 0x34782,
0x72544, 0x77974, 0x7908c, 0x80e67, 0x81ef4, 0x8d882]; 0x35c5a,
0x37458,
0x38f28,
0x406b2,
0x40e34,
0x40fc6,
0x42220,
0x42d13,
0x46c0f,
0x4fd47,
0x55ad2,
0x598f7,
0x5aa8f,
0x62aa3,
0x65725,
0x65dcb,
0x671c7,
0x6eb20,
0x752fe,
0x7594f,
0x79b9c,
0x7f775,
0x81635,
0x8401c,
0x844e5,
0x89fa8,
];
static V2: [u32; 42] = [
0x2a37,
0x7557,
0xa3c3,
0xfce6,
0x1248e,
0x15837,
0x1827f,
0x18a93,
0x1a7dd,
0x1b56b,
0x1ceb4,
0x1f962,
0x1fe2a,
0x29cb9,
0x2f30e,
0x2f771,
0x336bf,
0x34355,
0x391d7,
0x39495,
0x3be0c,
0x463be,
0x4d0c2,
0x4eead,
0x50214,
0x520de,
0x52a86,
0x53818,
0x53b3b,
0x54c0b,
0x572fa,
0x5d79c,
0x5e3c2,
0x6769e,
0x6a0fe,
0x6d835,
0x6fc7c,
0x70f03,
0x79d4a,
0x7b03e,
0x81e09,
0x9bd44,
];
static V3: [u32; 42] = [
0x8158,
0x9f18,
0xc4ba,
0x108c7,
0x11caa,
0x13b82,
0x1618f,
0x1c83b,
0x1ec89,
0x24354,
0x28864,
0x2a0fb,
0x2ce50,
0x2e8fa,
0x32b36,
0x343e6,
0x34dc9,
0x36881,
0x3ffca,
0x40f79,
0x42721,
0x43b8c,
0x44b9d,
0x47ed3,
0x4cd34,
0x5278a,
0x5ab64,
0x5b4d4,
0x5d842,
0x5fa33,
0x6464e,
0x676ee,
0x685d6,
0x69df0,
0x6a5fd,
0x6bda3,
0x72544,
0x77974,
0x7908c,
0x80e67,
0x81ef4,
0x8d882,
];
// cuckoo28 at 50% edges of letter 'u' // cuckoo28 at 50% edges of letter 'u'
static V4: [u32; 42] = [0x1CBBFD, 0x2C5452, 0x520338, 0x6740C5, 0x8C6997, 0xC77150, 0xFD4972, static V4: [u32; 42] = [
0x1060FA7, 0x11BFEA0, 0x1343E8D, 0x14CE02A, 0x1533515, 0x1715E61, 0x1CBBFD,
0x1996D9B, 0x1CB296B, 0x1FCA180, 0x209A367, 0x20AD02E, 0x23CD2E4, 0x2C5452,
0x2A3B360, 0x2DD1C0C, 0x333A200, 0x33D77BC, 0x3620C78, 0x3DD7FB8, 0x520338,
0x3FBFA49, 0x41BDED2, 0x4A86FD9, 0x570DE24, 0x57CAB86, 0x594B886, 0x6740C5,
0x5C74C94, 0x5DE7572, 0x60ADD6F, 0x635918B, 0x6C9E120, 0x6EFA583, 0x8C6997,
0x7394ACA, 0x7556A23, 0x77F70AA, 0x7CF750A, 0x7F60790]; 0xC77150,
0xFD4972,
0x1060FA7,
0x11BFEA0,
0x1343E8D,
0x14CE02A,
0x1533515,
0x1715E61,
0x1996D9B,
0x1CB296B,
0x1FCA180,
0x209A367,
0x20AD02E,
0x23CD2E4,
0x2A3B360,
0x2DD1C0C,
0x333A200,
0x33D77BC,
0x3620C78,
0x3DD7FB8,
0x3FBFA49,
0x41BDED2,
0x4A86FD9,
0x570DE24,
0x57CAB86,
0x594B886,
0x5C74C94,
0x5DE7572,
0x60ADD6F,
0x635918B,
0x6C9E120,
0x6EFA583,
0x7394ACA,
0x7556A23,
0x77F70AA,
0x7CF750A,
0x7F60790,
];
/// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few /// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few
/// known cycle proofs /// known cycle proofs
@ -384,13 +535,15 @@ mod test {
fn validate_fail() { fn validate_fail() {
// edge checks // edge checks
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75)); assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75));
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0xffff; 42]), 75)); assert!(!Cuckoo::new(&[49], 20)
.verify(Proof::new(vec![0xffff; 42]), 75));
// wrong data for proof // wrong data for proof
assert!(!Cuckoo::new(&[50], 20).verify(Proof::new(V1.to_vec().clone()), 75)); assert!(!Cuckoo::new(&[50], 20)
.verify(Proof::new(V1.to_vec().clone()), 75));
let mut test_header = [0; 32]; let mut test_header = [0; 32];
test_header[0] = 24; test_header[0] = 24;
assert!(!Cuckoo::new(&test_header, 20).verify(Proof::new(V4.to_vec().clone()), 50)); assert!(!Cuckoo::new(&test_header, 20)
.verify(Proof::new(V4.to_vec().clone()), 50));
} }
#[test] #[test]

View file

@ -29,15 +29,15 @@
#![warn(missing_docs)] #![warn(missing_docs)]
extern crate blake2_rfc as blake2; extern crate blake2_rfc as blake2;
extern crate rand;
extern crate time;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
#[macro_use] extern crate rand;
extern crate slog;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
#[macro_use]
extern crate slog;
extern crate time;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_util as util; extern crate grin_util as util;
@ -63,7 +63,9 @@ use cuckoo::{Cuckoo, Error};
pub trait MiningWorker { pub trait MiningWorker {
/// This only sets parameters and does initialisation work now /// This only sets parameters and does initialisation work now
fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self where Self: Sized; fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self
where
Self: Sized;
/// Actually perform a mining attempt on the given input and /// Actually perform a mining attempt on the given input and
/// return a proof if found /// return a proof if found
@ -83,10 +85,11 @@ pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
/// Uses the much easier Cuckoo20 (mostly for /// Uses the much easier Cuckoo20 (mostly for
/// tests). /// tests).
pub fn pow20<T: MiningWorker>(miner: &mut T, pub fn pow20<T: MiningWorker>(
miner: &mut T,
bh: &mut BlockHeader, bh: &mut BlockHeader,
diff: Difficulty) diff: Difficulty,
-> Result<(), Error> { ) -> Result<(), Error> {
pow_size(miner, bh, diff, 20) pow_size(miner, bh, diff, 20)
} }
@ -104,16 +107,13 @@ pub fn mine_genesis_block(miner_config: Option<types::MinerConfig>) -> Option<co
let proof_size = global::proofsize(); let proof_size = global::proofsize();
let mut miner: Box<MiningWorker> = match miner_config { let mut miner: Box<MiningWorker> = match miner_config {
Some(c) => { Some(c) => if c.use_cuckoo_miner {
if c.use_cuckoo_miner {
let mut p = plugin::PluginMiner::new(consensus::EASINESS, sz, proof_size); let mut p = plugin::PluginMiner::new(consensus::EASINESS, sz, proof_size);
p.init(c.clone()); p.init(c.clone());
Box::new(p) Box::new(p)
} else { } else {
Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)) Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size))
} },
}
None => Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)), None => Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)),
}; };
pow_size(&mut *miner, &mut gen.header, diff, sz as u32).unwrap(); pow_size(&mut *miner, &mut gen.header, diff, sz as u32).unwrap();
@ -124,11 +124,12 @@ pub fn mine_genesis_block(miner_config: Option<types::MinerConfig>) -> Option<co
/// Mining Worker, /// Mining Worker,
/// until the required difficulty target is reached. May take a while for a low /// until the required difficulty target is reached. May take a while for a low
/// target... /// target...
pub fn pow_size<T: MiningWorker + ?Sized>(miner: &mut T, pub fn pow_size<T: MiningWorker + ?Sized>(
miner: &mut T,
bh: &mut BlockHeader, bh: &mut BlockHeader,
diff: Difficulty, diff: Difficulty,
_: u32) _: u32,
-> Result<(), Error> { ) -> Result<(), Error> {
let start_nonce = bh.nonce; let start_nonce = bh.nonce;
// if we're in production mode, try the pre-mined solution first // if we're in production mode, try the pre-mined solution first

View file

@ -30,8 +30,8 @@ use util::LOGGER;
use std::sync::Mutex; use std::sync::Mutex;
use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution, use cuckoo_miner::{CuckooMiner, CuckooMinerConfig, CuckooMinerDeviceStats, CuckooMinerError,
CuckooMinerDeviceStats, CuckooMinerError}; CuckooMinerSolution, CuckooPluginManager};
// For now, we're just going to keep a static reference around to the loaded // For now, we're just going to keep a static reference around to the loaded
// config // config
@ -158,7 +158,6 @@ impl PluginMiner {
/// Get the miner /// Get the miner
pub fn get_consumable(&mut self) -> CuckooMiner { pub fn get_consumable(&mut self) -> CuckooMiner {
// this will load the associated plugin // this will load the associated plugin
let result = CuckooMiner::new(self.config.clone()); let result = CuckooMiner::new(self.config.clone());
if let Err(e) = result { if let Err(e) = result {

View file

@ -77,7 +77,7 @@ impl Default for MinerConfig {
cuckoo_miner_async_mode: None, cuckoo_miner_async_mode: None,
cuckoo_miner_plugin_dir: None, cuckoo_miner_plugin_dir: None,
cuckoo_miner_plugin_config: None, cuckoo_miner_plugin_config: None,
wallet_receiver_url: "http://localhost:13416".to_string(), wallet_receiver_url: "http://localhost:13415".to_string(),
burn_reward: false, burn_reward: false,
slow_down_in_millis: Some(0), slow_down_in_millis: Some(0),
attempt_time_per_block: 2, attempt_time_per_block: 2,

View file

@ -14,34 +14,34 @@
//! Main for building the binary of a Grin peer-to-peer node. //! Main for building the binary of a Grin peer-to-peer node.
#[macro_use] extern crate blake2_rfc as blake2;
extern crate slog;
extern crate clap; extern crate clap;
extern crate daemonize; extern crate daemonize;
extern crate serde; extern crate serde;
extern crate serde_json; extern crate serde_json;
extern crate blake2_rfc as blake2; #[macro_use]
extern crate slog;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_grin as grin;
extern crate grin_wallet as wallet;
extern crate grin_keychain as keychain;
extern crate grin_config as config; extern crate grin_config as config;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_grin as grin;
extern crate grin_keychain as keychain;
extern crate grin_util as util; extern crate grin_util as util;
extern crate grin_wallet as wallet;
use std::thread; use std::thread;
use std::io::Read; use std::io::Read;
use std::fs::File; use std::fs::File;
use std::time::Duration; use std::time::Duration;
use clap::{Arg, App, SubCommand, ArgMatches}; use clap::{App, Arg, ArgMatches, SubCommand};
use daemonize::Daemonize; use daemonize::Daemonize;
use config::GlobalConfig; use config::GlobalConfig;
use wallet::WalletConfig; use wallet::WalletConfig;
use core::global; use core::global;
use util::{LoggingConfig, LOGGER, init_logger}; use util::{init_logger, LoggingConfig, LOGGER};
fn start_from_config_file(mut global_config: GlobalConfig) { fn start_from_config_file(mut global_config: GlobalConfig) {
info!( info!(
@ -68,7 +68,6 @@ fn start_from_config_file(mut global_config: GlobalConfig) {
} }
fn main() { fn main() {
// First, load a global config object, // First, load a global config object,
// then modify that object with any switches // then modify that object with any switches
// found so that the switches override the // found so that the switches override the
@ -241,14 +240,12 @@ fn main() {
} }
// client commands and options // client commands and options
("client", Some(client_args)) => { ("client", Some(client_args)) => match client_args.subcommand() {
match client_args.subcommand() {
("status", _) => { ("status", _) => {
println!("status info..."); println!("status info...");
} }
_ => panic!("Unknown client command, use 'grin help client' for details"), _ => panic!("Unknown client command, use 'grin help client' for details"),
} },
}
// client commands and options // client commands and options
("wallet", Some(wallet_args)) => { ("wallet", Some(wallet_args)) => {
@ -354,42 +351,38 @@ fn wallet_command(wallet_args: &ArgMatches) {
// Derive the keychain based on seed from seed file and specified passphrase. // Derive the keychain based on seed from seed file and specified passphrase.
// Generate the initial wallet seed if we are running "wallet init". // Generate the initial wallet seed if we are running "wallet init".
if let ("init", Some(_)) = wallet_args.subcommand() { if let ("init", Some(_)) = wallet_args.subcommand() {
wallet::WalletSeed::init_file(&wallet_config) wallet::WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file.");
.expect("Failed to init wallet seed file.");
// we are done here with creating the wallet, so just return // we are done here with creating the wallet, so just return
return; return;
} }
let wallet_seed = wallet::WalletSeed::from_file(&wallet_config) let wallet_seed =
.expect("Failed to read wallet seed file."); wallet::WalletSeed::from_file(&wallet_config).expect("Failed to read wallet seed file.");
let passphrase = wallet_args let passphrase = wallet_args
.value_of("pass") .value_of("pass")
.expect("Failed to read passphrase."); .expect("Failed to read passphrase.");
let keychain = wallet_seed.derive_keychain(&passphrase) let keychain = wallet_seed
.derive_keychain(&passphrase)
.expect("Failed to derive keychain from seed file and passphrase."); .expect("Failed to derive keychain from seed file and passphrase.");
match wallet_args.subcommand() { match wallet_args.subcommand() {
("receive", Some(receive_args)) => { ("receive", Some(receive_args)) => if let Some(f) = receive_args.value_of("input") {
if let Some(f) = receive_args.value_of("input") {
let mut file = File::open(f).expect("Unable to open transaction file."); let mut file = File::open(f).expect("Unable to open transaction file.");
let mut contents = String::new(); let mut contents = String::new();
file.read_to_string(&mut contents).expect( file.read_to_string(&mut contents)
"Unable to read transaction file.", .expect("Unable to read transaction file.");
);
wallet::receive_json_tx(&wallet_config, &keychain, contents.as_str()).unwrap(); wallet::receive_json_tx(&wallet_config, &keychain, contents.as_str()).unwrap();
} else { } else {
wallet::server::start_rest_apis(wallet_config, keychain); wallet::server::start_rest_apis(wallet_config, keychain);
} },
}
("send", Some(send_args)) => { ("send", Some(send_args)) => {
let amount = send_args let amount = send_args
.value_of("amount") .value_of("amount")
.expect("Amount to send required") .expect("Amount to send required")
.parse() .parse()
.expect("Could not parse amount as a whole number."); .expect("Could not parse amount as a whole number.");
let minimum_confirmations: u64 = let minimum_confirmations: u64 = send_args
send_args
.value_of("minimum_confirmations") .value_of("minimum_confirmations")
.unwrap_or("1") .unwrap_or("1")
.parse() .parse()
@ -412,8 +405,7 @@ fn wallet_command(wallet_args: &ArgMatches) {
.expect("Amount to burn required") .expect("Amount to burn required")
.parse() .parse()
.expect("Could not parse amount as a whole number."); .expect("Could not parse amount as a whole number.");
let minimum_confirmations: u64 = let minimum_confirmations: u64 = send_args
send_args
.value_of("minimum_confirmations") .value_of("minimum_confirmations")
.unwrap_or("1") .unwrap_or("1")
.parse() .parse()

View file

@ -21,14 +21,14 @@
#![warn(missing_docs)] #![warn(missing_docs)]
extern crate byteorder; extern crate byteorder;
extern crate env_logger;
extern crate grin_core as core; extern crate grin_core as core;
extern crate grin_util as util; extern crate grin_util as util;
extern crate libc; extern crate libc;
#[macro_use]
extern crate slog;
extern crate env_logger;
extern crate memmap; extern crate memmap;
extern crate rocksdb; extern crate rocksdb;
#[macro_use]
extern crate slog;
pub mod sumtree; pub mod sumtree;
@ -39,8 +39,8 @@ use std::iter::Iterator;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::sync::RwLock; use std::sync::RwLock;
use byteorder::{WriteBytesExt, BigEndian}; use byteorder::{BigEndian, WriteBytesExt};
use rocksdb::{DB, WriteBatch, DBCompactionStyle, DBIterator, IteratorMode, Direction}; use rocksdb::{DBCompactionStyle, DBIterator, Direction, IteratorMode, WriteBatch, DB};
use core::ser; use core::ser;
@ -89,7 +89,9 @@ impl Store {
opts.set_max_open_files(256); opts.set_max_open_files(256);
opts.set_use_fsync(false); opts.set_use_fsync(false);
let db = try!(DB::open(&opts, &path)); let db = try!(DB::open(&opts, &path));
Ok(Store { rdb: RwLock::new(db) }) Ok(Store {
rdb: RwLock::new(db),
})
} }
/// Writes a single key/value pair to the db /// Writes a single key/value pair to the db
@ -125,10 +127,11 @@ impl Store {
/// Gets a `Readable` value from the db, provided its key, allowing to /// Gets a `Readable` value from the db, provided its key, allowing to
/// extract only partial data. The underlying Readable size must align /// extract only partial data. The underlying Readable size must align
/// accordingly. Encapsulates serialization. /// accordingly. Encapsulates serialization.
pub fn get_ser_limited<T: ser::Readable>(&self, pub fn get_ser_limited<T: ser::Readable>(
&self,
key: &[u8], key: &[u8],
len: usize) len: usize,
-> Result<Option<T>, Error> { ) -> Result<Option<T>, Error> {
let data = try!(self.get(key)); let data = try!(self.get(key));
match data { match data {
Some(val) => { Some(val) => {
@ -213,14 +216,16 @@ impl<'a> Batch<'a> {
/// An iterator thad produces Readable instances back. Wraps the lower level /// An iterator thad produces Readable instances back. Wraps the lower level
/// DBIterator and deserializes the returned values. /// DBIterator and deserializes the returned values.
pub struct SerIterator<T> pub struct SerIterator<T>
where T: ser::Readable where
T: ser::Readable,
{ {
iter: DBIterator, iter: DBIterator,
_marker: PhantomData<T>, _marker: PhantomData<T>,
} }
impl<T> Iterator for SerIterator<T> impl<T> Iterator for SerIterator<T>
where T: ser::Readable where
T: ser::Readable,
{ {
type Item = T; type Item = T;

View file

@ -17,17 +17,17 @@ use memmap;
use std::cmp; use std::cmp;
use std::fs::{self, File, OpenOptions}; use std::fs::{self, File, OpenOptions};
use std::io::{self, Write, BufReader, BufRead, ErrorKind}; use std::io::{self, BufRead, BufReader, ErrorKind, Write};
use std::os::unix::io::AsRawFd; use std::os::unix::io::AsRawFd;
use std::path::Path; use std::path::Path;
use std::io::Read; use std::io::Read;
#[cfg(any(target_os = "linux"))] #[cfg(any(target_os = "linux"))]
use libc::{off64_t, ftruncate64}; use libc::{ftruncate64, off64_t};
#[cfg(not(any(target_os = "linux", target_os = "android")))] #[cfg(not(any(target_os = "linux", target_os = "android")))]
use libc::{off_t as off64_t, ftruncate as ftruncate64}; use libc::{ftruncate as ftruncate64, off_t as off64_t};
use core::core::pmmr::{self, Summable, Backend, HashSum, VecBackend}; use core::core::pmmr::{self, Backend, HashSum, Summable, VecBackend};
use core::ser; use core::ser;
use util::LOGGER; use util::LOGGER;
@ -282,10 +282,8 @@ where
/// Append the provided HashSums to the backend storage. /// Append the provided HashSums to the backend storage.
#[allow(unused_variables)] #[allow(unused_variables)]
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> { fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> {
self.buffer.append( self.buffer
position - (self.buffer_index as u64), .append(position - (self.buffer_index as u64), data.clone())?;
data.clone(),
)?;
Ok(()) Ok(())
} }
@ -330,9 +328,9 @@ where
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> { fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
assert!(self.buffer.len() == 0, "Rewind on non empty buffer."); assert!(self.buffer.len() == 0, "Rewind on non empty buffer.");
self.remove_log.truncate(index).map_err(|e| { self.remove_log
format!("Could not truncate remove log: {}", e) .truncate(index)
})?; .map_err(|e| format!("Could not truncate remove log: {}", e))?;
self.rewind = Some((position, index, self.buffer_index)); self.rewind = Some((position, index, self.buffer_index));
self.buffer_index = position as usize; self.buffer_index = position as usize;
Ok(()) Ok(())
@ -343,8 +341,7 @@ where
if self.buffer.used_size() > 0 { if self.buffer.used_size() > 0 {
for position in &positions { for position in &positions {
let pos_sz = *position as usize; let pos_sz = *position as usize;
if pos_sz > self.buffer_index && if pos_sz > self.buffer_index && pos_sz - 1 < self.buffer_index + self.buffer.len()
pos_sz - 1 < self.buffer_index + self.buffer.len()
{ {
self.buffer.remove(vec![*position], index).unwrap(); self.buffer.remove(vec![*position], index).unwrap();
} }
@ -375,7 +372,9 @@ where
remove_log: rm_log, remove_log: rm_log,
buffer: VecBackend::new(), buffer: VecBackend::new(),
buffer_index: (sz as usize) / record_len, buffer_index: (sz as usize) / record_len,
pruned_nodes: pmmr::PruneList { pruned_nodes: prune_list }, pruned_nodes: pmmr::PruneList {
pruned_nodes: prune_list,
},
rewind: None, rewind: None,
}) })
} }
@ -403,10 +402,7 @@ where
if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) { if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::Interrupted, io::ErrorKind::Interrupted,
format!( format!("Could not write to log storage, disk full? {:?}", e),
"Could not write to log storage, disk full? {:?}",
e
),
)); ));
} }
} }
@ -442,8 +438,8 @@ where
/// TODO whatever is calling this should also clean up the commit to /// TODO whatever is calling this should also clean up the commit to
/// position index in db /// position index in db
pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> { pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> {
if !(max_len > 0 && self.remove_log.len() > max_len || if !(max_len > 0 && self.remove_log.len() > max_len
max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES) || max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES)
{ {
return Ok(()); return Ok(());
} }
@ -474,11 +470,8 @@ where
(pos - 1 - shift.unwrap()) * record_len (pos - 1 - shift.unwrap()) * record_len
}) })
.collect(); .collect();
self.hashsum_file.save_prune( self.hashsum_file
tmp_prune_file.clone(), .save_prune(tmp_prune_file.clone(), to_rm, record_len)?;
to_rm,
record_len,
)?;
// 2. update the prune list and save it in place // 2. update the prune list and save it in place
for &(rm_pos, _) in &self.remove_log.removed[..] { for &(rm_pos, _) in &self.remove_log.removed[..] {
@ -510,7 +503,6 @@ fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
where where
T: ser::Readable + cmp::Ord, T: ser::Readable + cmp::Ord,
{ {
let file_path = Path::new(&path); let file_path = Path::new(&path);
let mut ovec = Vec::with_capacity(1000); let mut ovec = Vec::with_capacity(1000);
if file_path.exists() { if file_path.exists() {
@ -524,20 +516,15 @@ where
} }
let elmts_res: Result<Vec<T>, ser::Error> = ser::deserialize(&mut &buf[..]); let elmts_res: Result<Vec<T>, ser::Error> = ser::deserialize(&mut &buf[..]);
match elmts_res { match elmts_res {
Ok(elmts) => { Ok(elmts) => for elmt in elmts {
for elmt in elmts {
if let Err(idx) = ovec.binary_search(&elmt) { if let Err(idx) = ovec.binary_search(&elmt) {
ovec.insert(idx, elmt); ovec.insert(idx, elmt);
} }
} },
}
Err(_) => { Err(_) => {
return Err(io::Error::new( return Err(io::Error::new(
io::ErrorKind::InvalidData, io::ErrorKind::InvalidData,
format!( format!("Corrupted storage, could not read file at {}", path),
"Corrupted storage, could not read file at {}",
path
),
)); ));
} }
} }
@ -553,7 +540,6 @@ fn write_vec<T>(path: String, v: &Vec<T>) -> io::Result<()>
where where
T: ser::Writeable, T: ser::Writeable,
{ {
let mut file_path = File::create(&path)?; let mut file_path = File::create(&path)?;
ser::serialize(&mut file_path, v).map_err(|_| { ser::serialize(&mut file_path, v).map_err(|_| {
io::Error::new( io::Error::new(

View file

@ -20,7 +20,7 @@ extern crate time;
use std::fs; use std::fs;
use core::ser::*; use core::ser::*;
use core::core::pmmr::{PMMR, Summable, HashSum, Backend}; use core::core::pmmr::{Backend, HashSum, Summable, PMMR};
use core::core::hash::Hashed; use core::core::hash::Hashed;
#[test] #[test]
@ -48,11 +48,16 @@ fn sumtree_append() {
}) })
); );
let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>) + HashSum::from_summable(2, &elems[1], None::<TestElem>); let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>)
let sum4 = sum2 + (HashSum::from_summable(4, &elems[2], None::<TestElem>) + HashSum::from_summable(5, &elems[3], None::<TestElem>)); + HashSum::from_summable(2, &elems[1], None::<TestElem>);
let sum8 = sum4 + let sum4 = sum2
((HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>)) + + (HashSum::from_summable(4, &elems[2], None::<TestElem>)
(HashSum::from_summable(11, &elems[6], None::<TestElem>) + HashSum::from_summable(12, &elems[7], None::<TestElem>))); + HashSum::from_summable(5, &elems[3], None::<TestElem>));
let sum8 = sum4
+ ((HashSum::from_summable(8, &elems[4], None::<TestElem>)
+ HashSum::from_summable(9, &elems[5], None::<TestElem>))
+ (HashSum::from_summable(11, &elems[6], None::<TestElem>)
+ HashSum::from_summable(12, &elems[7], None::<TestElem>)));
let sum9 = sum8 + HashSum::from_summable(16, &elems[8], None::<TestElem>); let sum9 = sum8 + HashSum::from_summable(16, &elems[8], None::<TestElem>);
{ {
@ -223,7 +228,6 @@ fn setup() -> (String, Vec<TestElem>) {
} }
fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 { fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 {
let mut pmmr = PMMR::at(backend, pos); let mut pmmr = PMMR::at(backend, pos);
for elem in elems { for elem in elems {
pmmr.push(elem.clone(), None::<TestElem>).unwrap(); pmmr.push(elem.clone(), None::<TestElem>).unwrap();
@ -238,8 +242,8 @@ impl Summable for TestElem {
fn sum(&self) -> u64 { fn sum(&self) -> u64 {
// sums are not allowed to overflow, so we use this simple // sums are not allowed to overflow, so we use this simple
// non-injective "sum" function that will still be homomorphic // non-injective "sum" function that will still be homomorphic
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 + self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
self.0[3] as u64 + self.0[3] as u64
} }
fn sum_len() -> usize { fn sum_len() -> usize {
8 8

View file

@ -23,8 +23,8 @@
#[macro_use] #[macro_use]
extern crate slog; extern crate slog;
extern crate slog_term;
extern crate slog_async; extern crate slog_async;
extern crate slog_term;
#[macro_use] #[macro_use]
extern crate lazy_static; extern crate lazy_static;
@ -39,13 +39,13 @@ pub use secp_ as secp;
// Logging related // Logging related
pub mod logger; pub mod logger;
pub use logger::{LOGGER, init_logger, init_test_logger}; pub use logger::{init_logger, init_test_logger, LOGGER};
pub mod types; pub mod types;
pub use types::LoggingConfig; pub use types::LoggingConfig;
// other utils // other utils
use std::cell::{RefCell, Ref}; use std::cell::{Ref, RefCell};
#[allow(unused_imports)] #[allow(unused_imports)]
use std::ops::Deref; use std::ops::Deref;
@ -68,7 +68,9 @@ unsafe impl<T> Send for OneTime<T> {}
impl<T> OneTime<T> { impl<T> OneTime<T> {
/// Builds a new uninitialized OneTime. /// Builds a new uninitialized OneTime.
pub fn new() -> OneTime<T> { pub fn new() -> OneTime<T> {
OneTime { inner: RefCell::new(None) } OneTime {
inner: RefCell::new(None),
}
} }
/// Initializes the OneTime, should only be called once after construction. /// Initializes the OneTime, should only be called once after construction.

View file

@ -15,7 +15,7 @@
use std::fs::OpenOptions; use std::fs::OpenOptions;
use std::sync::Mutex; use std::sync::Mutex;
use std::ops::Deref; use std::ops::Deref;
use slog::{Logger, Drain, Level, LevelFilter, Duplicate, Discard}; use slog::{Discard, Drain, Duplicate, Level, LevelFilter, Logger};
use slog_term; use slog_term;
use slog_async; use slog_async;
@ -101,4 +101,3 @@ pub fn init_test_logger() {
*config_ref = LoggingConfig::default(); *config_ref = LoggingConfig::default();
*was_init_ref = true; *was_init_ref = true;
} }

View file

@ -34,29 +34,25 @@ fn refresh_output(out: &mut OutputData, api_out: &api::Output) {
match out.status { match out.status {
OutputStatus::Unconfirmed => { OutputStatus::Unconfirmed => {
out.status = OutputStatus::Unspent; out.status = OutputStatus::Unspent;
}, }
_ => (), _ => (),
} }
} }
// Transitions a local wallet output (based on it not being in the node utxo set) - // Transitions a local wallet output (based on it not being in the node utxo
// set) -
// Unspent -> Spent // Unspent -> Spent
// Locked -> Spent // Locked -> Spent
fn mark_spent_output(out: &mut OutputData) { fn mark_spent_output(out: &mut OutputData) {
match out.status { match out.status {
OutputStatus::Unspent | OutputStatus::Locked => { OutputStatus::Unspent | OutputStatus::Locked => out.status = OutputStatus::Spent,
out.status = OutputStatus::Spent
},
_ => (), _ => (),
} }
} }
/// Builds a single api query to retrieve the latest output data from the node. /// Builds a single api query to retrieve the latest output data from the node.
/// So we can refresh the local wallet outputs. /// So we can refresh the local wallet outputs.
pub fn refresh_outputs( pub fn refresh_outputs(config: &WalletConfig, keychain: &Keychain) -> Result<(), Error> {
config: &WalletConfig,
keychain: &Keychain,
) -> Result<(), Error> {
debug!(LOGGER, "Refreshing wallet outputs"); debug!(LOGGER, "Refreshing wallet outputs");
let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new(); let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new();
let mut commits: Vec<pedersen::Commitment> = vec![]; let mut commits: Vec<pedersen::Commitment> = vec![];
@ -64,7 +60,8 @@ pub fn refresh_outputs(
// build a local map of wallet outputs by commits // build a local map of wallet outputs by commits
// and a list of outputs we wantot query the node for // and a list of outputs we wantot query the node for
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| { let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
for out in wallet_data.outputs for out in wallet_data
.outputs
.values() .values()
.filter(|out| out.root_key_id == keychain.root_key_id()) .filter(|out| out.root_key_id == keychain.root_key_id())
.filter(|out| out.status != OutputStatus::Spent) .filter(|out| out.status != OutputStatus::Spent)
@ -88,7 +85,7 @@ pub fn refresh_outputs(
let query_string = query_params.join("&"); let query_string = query_params.join("&");
let url = format!( let url = format!(
"{}/v2/chain/utxos?{}", "{}/v1/chain/utxos?{}",
config.check_node_api_http_addr, config.check_node_api_http_addr,
query_string, query_string,
); );
@ -96,20 +93,17 @@ pub fn refresh_outputs(
// build a map of api outputs by commit so we can look them up efficiently // build a map of api outputs by commit so we can look them up efficiently
let mut api_outputs: HashMap<pedersen::Commitment, api::Output> = HashMap::new(); let mut api_outputs: HashMap<pedersen::Commitment, api::Output> = HashMap::new();
match api::client::get::<Vec<api::Output>>(url.as_str()) { match api::client::get::<Vec<api::Output>>(url.as_str()) {
Ok(outputs) => { Ok(outputs) => for out in outputs {
for out in outputs {
api_outputs.insert(out.commit, out); api_outputs.insert(out.commit, out);
}
}, },
Err(_) => {}, Err(_) => {}
}; };
// now for each commit, find the output in the wallet and // now for each commit, find the output in the wallet and
// the corresponding api output (if it exists) // the corresponding api output (if it exists)
// and refresh it in-place in the wallet. // and refresh it in-place in the wallet.
// Note: minimizing the time we spend holding the wallet lock. // Note: minimizing the time we spend holding the wallet lock.
WalletData::with_wallet(&config.data_file_dir, |wallet_data| { WalletData::with_wallet(&config.data_file_dir, |wallet_data| for commit in commits {
for commit in commits {
let id = wallet_outputs.get(&commit).unwrap(); let id = wallet_outputs.get(&commit).unwrap();
if let Entry::Occupied(mut output) = wallet_data.outputs.entry(id.to_hex()) { if let Entry::Occupied(mut output) = wallet_data.outputs.entry(id.to_hex()) {
match api_outputs.get(&commit) { match api_outputs.get(&commit) {
@ -117,11 +111,10 @@ pub fn refresh_outputs(
None => mark_spent_output(&mut output.get_mut()), None => mark_spent_output(&mut output.get_mut()),
}; };
} }
}
}) })
} }
pub fn get_tip_from_node(config: &WalletConfig) -> Result<api::Tip, Error> { pub fn get_tip_from_node(config: &WalletConfig) -> Result<api::Tip, Error> {
let url = format!("{}/v2/chain", config.check_node_api_http_addr); let url = format!("{}/v1/chain", config.check_node_api_http_addr);
api::client::get::<api::Tip>(url.as_str()).map_err(|e| Error::Node(e)) api::client::get::<api::Tip>(url.as_str()).map_err(|e| Error::Node(e))
} }

View file

@ -33,7 +33,10 @@ pub fn create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Erro
retry_backoff_forever(|| { retry_backoff_forever(|| {
let res = single_create_coinbase(&url, &block_fees); let res = single_create_coinbase(&url, &block_fees);
if let Err(_) = res { if let Err(_) = res {
error!(LOGGER, "Failed to get coinbase via wallet API (will retry)..."); error!(
LOGGER,
"Failed to get coinbase via wallet API (will retry)..."
);
} }
res res
}) })
@ -41,11 +44,12 @@ pub fn create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Erro
/// Runs the specified function wrapped in some basic retry logic. /// Runs the specified function wrapped in some basic retry logic.
fn retry_backoff_forever<F, R>(f: F) -> Result<R, Error> fn retry_backoff_forever<F, R>(f: F) -> Result<R, Error>
where F: (FnMut() -> Result<R, Error>) where
F: FnMut() -> Result<R, Error>,
{ {
let mut core = reactor::Core::new()?; let mut core = reactor::Core::new()?;
let retry_strategy = FibonacciBackoff::from_millis(100) let retry_strategy =
.max_delay(time::Duration::from_secs(10)); FibonacciBackoff::from_millis(100).max_delay(time::Duration::from_secs(10));
let retry_future = Retry::spawn(core.handle(), retry_strategy, f); let retry_future = Retry::spawn(core.handle(), retry_strategy, f);
let res = core.run(retry_future).unwrap(); let res = core.run(retry_future).unwrap();
Ok(res) Ok(res)
@ -63,8 +67,8 @@ fn single_create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, E
let work = client.request(req).and_then(|res| { let work = client.request(req).and_then(|res| {
res.body().concat2().and_then(move |body| { res.body().concat2().and_then(move |body| {
let coinbase: CbData = serde_json::from_slice(&body) let coinbase: CbData =
.map_err(|e| {io::Error::new(io::ErrorKind::Other, e)})?; serde_json::from_slice(&body).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
Ok(coinbase) Ok(coinbase)
}) })
}); });

View file

@ -33,11 +33,8 @@ pub struct CoinbaseHandler {
impl CoinbaseHandler { impl CoinbaseHandler {
fn build_coinbase(&self, block_fees: &BlockFees) -> Result<CbData, Error> { fn build_coinbase(&self, block_fees: &BlockFees) -> Result<CbData, Error> {
let (out, kern, block_fees) = receive_coinbase( let (out, kern, block_fees) = receive_coinbase(&self.config, &self.keychain, block_fees)
&self.config, .map_err(|e| {
&self.keychain,
block_fees,
).map_err(|e| {
api::Error::Internal(format!("Error building coinbase: {:?}", e)) api::Error::Internal(format!("Error building coinbase: {:?}", e))
})?; })?;
@ -50,13 +47,9 @@ impl CoinbaseHandler {
})?; })?;
let key_id_bin = match block_fees.key_id { let key_id_bin = match block_fees.key_id {
Some(key_id) => { Some(key_id) => ser::ser_vec(&key_id).map_err(|e| {
ser::ser_vec(&key_id).map_err(|e| { api::Error::Internal(format!("Error serializing kernel: {:?}", e))
api::Error::Internal( })?,
format!("Error serializing kernel: {:?}", e),
)
})?
}
None => vec![], None => vec![],
}; };
@ -68,7 +61,8 @@ impl CoinbaseHandler {
} }
} }
// TODO - error handling - what to return if we fail to get the wallet lock for some reason... // TODO - error handling - what to return if we fail to get the wallet lock for
// some reason...
impl Handler for CoinbaseHandler { impl Handler for CoinbaseHandler {
fn handle(&self, req: &mut Request) -> IronResult<Response> { fn handle(&self, req: &mut Request) -> IronResult<Response> {
let struct_body = req.get::<bodyparser::Struct<BlockFees>>(); let struct_body = req.get::<bodyparser::Struct<BlockFees>>();

View file

@ -22,17 +22,14 @@ pub fn show_info(config: &WalletConfig, keychain: &Keychain) {
// just read the wallet here, no need for a write lock // just read the wallet here, no need for a write lock
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| { let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
// get the current height via the api // get the current height via the api
// if we cannot get the current height use the max height known to the wallet // if we cannot get the current height use the max height known to the wallet
let current_height = match checker::get_tip_from_node(config) { let current_height = match checker::get_tip_from_node(config) {
Ok(tip) => tip.height, Ok(tip) => tip.height,
Err(_) => { Err(_) => match wallet_data.outputs.values().map(|out| out.height).max() {
match wallet_data.outputs.values().map(|out| out.height).max() {
Some(height) => height, Some(height) => height,
None => 0, None => 0,
} },
}
}; };
println!("Outputs - "); println!("Outputs - ");

View file

@ -14,24 +14,24 @@
//! Library module for the main wallet functionalities provided by Grin. //! Library module for the main wallet functionalities provided by Grin.
extern crate byteorder;
extern crate blake2_rfc as blake2; extern crate blake2_rfc as blake2;
#[macro_use] extern crate byteorder;
extern crate slog;
extern crate rand; extern crate rand;
extern crate serde; extern crate serde;
#[macro_use] #[macro_use]
extern crate serde_derive; extern crate serde_derive;
extern crate serde_json; extern crate serde_json;
#[macro_use]
extern crate slog;
extern crate bodyparser; extern crate bodyparser;
extern crate futures; extern crate futures;
extern crate tokio_core;
extern crate tokio_retry;
extern crate hyper; extern crate hyper;
extern crate iron; extern crate iron;
#[macro_use] #[macro_use]
extern crate router; extern crate router;
extern crate tokio_core;
extern crate tokio_retry;
extern crate grin_api as api; extern crate grin_api as api;
extern crate grin_core as core; extern crate grin_core as core;
@ -48,6 +48,6 @@ pub mod client;
pub mod server; pub mod server;
pub use info::show_info; pub use info::show_info;
pub use receiver::{WalletReceiver, receive_json_tx}; pub use receiver::{receive_json_tx, WalletReceiver};
pub use sender::{issue_send_tx, issue_burn_tx}; pub use sender::{issue_burn_tx, issue_send_tx};
pub use types::{BlockFees, CbData, Error, WalletConfig, WalletReceiveRequest, WalletSeed}; pub use types::{BlockFees, CbData, Error, WalletConfig, WalletReceiveRequest, WalletSeed};

View file

@ -15,45 +15,18 @@
//! Provides the JSON/HTTP API for wallets to receive payments. Because //! Provides the JSON/HTTP API for wallets to receive payments. Because
//! receiving money in MimbleWimble requires an interactive exchange, a //! receiving money in MimbleWimble requires an interactive exchange, a
//! wallet server that's running at all time is required in many cases. //! wallet server that's running at all time is required in many cases.
//!
//! The API looks like this: use std::io::Read;
//!
//! POST /v1/wallet/receive
//! > {
//! > "amount": 10,
//! > "blind_sum": "a12b7f...",
//! > "tx": "f083de...",
//! > }
//!
//! < {
//! < "tx": "f083de...",
//! < "status": "ok"
//! < }
//!
//! POST /v1/wallet/finalize
//! > {
//! > "tx": "f083de...",
//! > }
//!
//! POST /v1/wallet/receive_coinbase
//! > {
//! > "amount": 1,
//! > }
//!
//! < {
//! < "output": "8a90bc...",
//! < "kernel": "f083de...",
//! < }
//!
//! Note that while at this point the finalize call is completely unecessary, a
//! double-exchange will be required as soon as we support Schnorr signatures.
//! So we may as well have it in place already.
use core::consensus::reward; use core::consensus::reward;
use core::core::{Block, Transaction, TxKernel, Output, build}; use core::core::{build, Block, Output, Transaction, TxKernel};
use core::ser; use core::ser;
use api::{self, ApiEndpoint, Operation, ApiResult}; use api;
use iron::prelude::*;
use iron::Handler;
use iron::status;
use keychain::{BlindingFactor, Identifier, Keychain}; use keychain::{BlindingFactor, Identifier, Keychain};
use serde_json;
use types::*; use types::*;
use util; use util;
use util::LOGGER; use util::LOGGER;
@ -77,8 +50,8 @@ pub fn receive_json_tx(
let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap()); let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap());
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str()); let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }) let _: () =
.map_err(|e| Error::Node(e))?; api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).map_err(|e| Error::Node(e))?;
Ok(()) Ok(())
} }
@ -90,27 +63,14 @@ pub struct WalletReceiver {
pub config: WalletConfig, pub config: WalletConfig,
} }
impl ApiEndpoint for WalletReceiver { impl Handler for WalletReceiver {
type ID = String; fn handle(&self, req: &mut Request) -> IronResult<Response> {
type T = String; let receive: WalletReceiveRequest = serde_json::from_reader(req.body.by_ref())
type OP_IN = WalletReceiveRequest; .map_err(|e| IronError::new(e, status::BadRequest))?;
type OP_OUT = CbData;
fn operations(&self) -> Vec<Operation> { match receive {
vec![Operation::Custom("receive_json_tx".to_string())]
}
fn operation(&self, op: String, input: WalletReceiveRequest) -> ApiResult<CbData> {
match op.as_str() {
"receive_json_tx" => {
match input {
WalletReceiveRequest::PartialTransaction(partial_tx_str) => { WalletReceiveRequest::PartialTransaction(partial_tx_str) => {
debug!( debug!(LOGGER, "Receive with transaction {}", &partial_tx_str,);
LOGGER,
"Operation {} with transaction {}",
op,
&partial_tx_str,
);
receive_json_tx(&self.config, &self.keychain, &partial_tx_str) receive_json_tx(&self.config, &self.keychain, &partial_tx_str)
.map_err(|e| { .map_err(|e| {
api::Error::Internal( api::Error::Internal(
@ -119,17 +79,11 @@ impl ApiEndpoint for WalletReceiver {
}) })
.unwrap(); .unwrap();
// TODO: Return emptiness for now, should be a proper enum return type Ok(Response::with(status::Ok))
Ok(CbData {
output: String::from(""),
kernel: String::from(""),
key_id: String::from(""),
})
} }
_ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))), _ => Ok(Response::with(
} (status::BadRequest, format!("Incorrect request data.")),
} )),
_ => Err(api::Error::Argument(format!("Unknown operation: {}", op))),
} }
} }
} }
@ -168,7 +122,7 @@ fn next_available_key(
pub fn receive_coinbase( pub fn receive_coinbase(
config: &WalletConfig, config: &WalletConfig,
keychain: &Keychain, keychain: &Keychain,
block_fees: &BlockFees block_fees: &BlockFees,
) -> Result<(Output, TxKernel, BlockFees), Error> { ) -> Result<(Output, TxKernel, BlockFees), Error> {
let root_key_id = keychain.root_key_id(); let root_key_id = keychain.root_key_id();
let key_id = block_fees.key_id(); let key_id = block_fees.key_id();
@ -208,11 +162,7 @@ pub fn receive_coinbase(
debug!(LOGGER, "block_fees updated - {:?}", block_fees); debug!(LOGGER, "block_fees updated - {:?}", block_fees);
let (out, kern) = Block::reward_output( let (out, kern) = Block::reward_output(&keychain, &key_id, block_fees.fees)?;
&keychain,
&key_id,
block_fees.fees,
)?;
Ok((out, kern, block_fees)) Ok((out, kern, block_fees))
} }
@ -241,14 +191,18 @@ fn receive_transaction(
let out_amount = amount - fee; let out_amount = amount - fee;
let (tx_final, _) = build::transaction(vec![ let (tx_final, _) = build::transaction(
vec![
build::initial_tx(partial), build::initial_tx(partial),
build::with_excess(blinding), build::with_excess(blinding),
build::output(out_amount, key_id.clone()), build::output(out_amount, key_id.clone()),
// build::with_fee(fee_amount), // build::with_fee(fee_amount),
], keychain)?; ],
keychain,
)?;
// make sure the resulting transaction is valid (could have been lied to on excess). // make sure the resulting transaction is valid (could have been lied to on
// excess).
tx_final.validate(&keychain.secp())?; tx_final.validate(&keychain.secp())?;
// operate within a lock on wallet data // operate within a lock on wallet data

View file

@ -14,9 +14,9 @@
use api; use api;
use checker; use checker;
use core::core::{Transaction, build}; use core::core::{build, Transaction};
use core::ser; use core::ser;
use keychain::{BlindingFactor, Keychain, Identifier}; use keychain::{BlindingFactor, Identifier, Keychain};
use receiver::TxWrapper; use receiver::TxWrapper;
use types::*; use types::*;
use util::LOGGER; use util::LOGGER;
@ -55,7 +55,7 @@ pub fn issue_send_tx(
if dest == "stdout" { if dest == "stdout" {
println!("{}", json_tx); println!("{}", json_tx);
} else if &dest[..4] == "http" { } else if &dest[..4] == "http" {
let url = format!("{}/v1/receive/receive_json_tx", &dest); let url = format!("{}/v1/receive/transaction", &dest);
debug!(LOGGER, "Posting partial transaction to {}", url); debug!(LOGGER, "Posting partial transaction to {}", url);
let request = WalletReceiveRequest::PartialTransaction(json_tx); let request = WalletReceiveRequest::PartialTransaction(json_tx);
let _: CbData = api::client::post(url.as_str(), &request).expect(&format!( let _: CbData = api::client::post(url.as_str(), &request).expect(&format!(
@ -130,8 +130,8 @@ pub fn issue_burn_tx(
let tx_hex = util::to_hex(ser::ser_vec(&tx_burn).unwrap()); let tx_hex = util::to_hex(ser::ser_vec(&tx_burn).unwrap());
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str()); let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }) let _: () =
.map_err(|e| Error::Node(e))?; api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).map_err(|e| Error::Node(e))?;
Ok(()) Ok(())
} }
@ -200,7 +200,8 @@ fn inputs_and_change(
is_coinbase: false, is_coinbase: false,
}); });
// now lock the ouputs we're spending so we avoid accidental double spend attempt // now lock the ouputs we're spending so we avoid accidental double spend
// attempt
for coin in coins { for coin in coins {
wallet_data.lock_output(coin); wallet_data.lock_output(coin);
} }

View file

@ -27,28 +27,22 @@ pub fn start_rest_apis(wallet_config: WalletConfig, keychain: Keychain) {
wallet_config.api_http_addr wallet_config.api_http_addr
); );
let mut apis = ApiServer::new("/v1".to_string()); let receive_tx_handler = WalletReceiver {
apis.register_endpoint(
"/receive".to_string(),
WalletReceiver {
config: wallet_config.clone(), config: wallet_config.clone(),
keychain: keychain.clone(), keychain: keychain.clone(),
}, };
);
let coinbase_handler = CoinbaseHandler { let coinbase_handler = CoinbaseHandler {
config: wallet_config.clone(), config: wallet_config.clone(),
keychain: keychain.clone(), keychain: keychain.clone(),
}; };
// let tx_handler = TxHandler{};
let router = router!( let router = router!(
receive_tx: get "/receive/transaction" => receive_tx_handler,
receive_coinbase: post "/receive/coinbase" => coinbase_handler, receive_coinbase: post "/receive/coinbase" => coinbase_handler,
// receive_tx: post "/receive/tx" => tx_handler,
); );
apis.register_handler("/v2", router);
let mut apis = ApiServer::new("/v1".to_string());
apis.register_handler(router);
apis.start(wallet_config.api_http_addr).unwrap_or_else(|e| { apis.start(wallet_config.api_http_addr).unwrap_or_else(|e| {
error!(LOGGER, "Failed to start Grin wallet receiver: {}.", e); error!(LOGGER, "Failed to start Grin wallet receiver: {}.", e);
}); });

View file

@ -14,7 +14,7 @@
use blake2; use blake2;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use std::{fmt, num, error}; use std::{error, fmt, num};
use std::convert::From; use std::convert::From;
use std::fs::{self, File, OpenOptions}; use std::fs::{self, File, OpenOptions};
use std::io::{self, Read, Write}; use std::io::{self, Read, Write};
@ -32,7 +32,7 @@ use tokio_retry::strategy::FibonacciBackoff;
use api; use api;
use core::core::{Transaction, transaction}; use core::core::{transaction, Transaction};
use core::ser; use core::ser;
use keychain; use keychain;
use util; use util;
@ -62,7 +62,7 @@ pub fn tx_fee(input_len: usize, output_len: usize, base_fee: Option<u64>) -> u64
#[derive(Debug)] #[derive(Debug)]
pub enum Error { pub enum Error {
NotEnoughFunds(u64), NotEnoughFunds(u64),
FeeDispute{sender_fee: u64, recipient_fee: u64}, FeeDispute { sender_fee: u64, recipient_fee: u64 },
Keychain(keychain::Error), Keychain(keychain::Error),
Transaction(transaction::Error), Transaction(transaction::Error),
Secp(secp::Error), Secp(secp::Error),
@ -166,7 +166,7 @@ impl Default for WalletConfig {
fn default() -> WalletConfig { fn default() -> WalletConfig {
WalletConfig { WalletConfig {
enable_wallet: false, enable_wallet: false,
api_http_addr: "0.0.0.0:13416".to_string(), api_http_addr: "0.0.0.0:13415".to_string(),
check_node_api_http_addr: "http://127.0.0.1:13413".to_string(), check_node_api_http_addr: "http://127.0.0.1:13413".to_string(),
data_file_dir: ".".to_string(), data_file_dir: ".".to_string(),
} }
@ -226,8 +226,10 @@ impl OutputData {
} }
/// How many confirmations has this output received? /// How many confirmations has this output received?
/// If height == 0 then we are either Unconfirmed or the output was cut-through /// If height == 0 then we are either Unconfirmed or the output was
/// so we do not actually know how many confirmations this output had (and never will). /// cut-through
/// so we do not actually know how many confirmations this output had (and
/// never will).
pub fn num_confirmations(&self, current_height: u64) -> u64 { pub fn num_confirmations(&self, current_height: u64) -> u64 {
if self.status == OutputStatus::Unconfirmed { if self.status == OutputStatus::Unconfirmed {
0 0
@ -239,21 +241,16 @@ impl OutputData {
} }
/// Check if output is eligible for spending based on state and height. /// Check if output is eligible for spending based on state and height.
pub fn eligible_to_spend( pub fn eligible_to_spend(&self, current_height: u64, minimum_confirmations: u64) -> bool {
&self, if [OutputStatus::Spent, OutputStatus::Locked].contains(&self.status) {
current_height: u64,
minimum_confirmations: u64
) -> bool {
if [
OutputStatus::Spent,
OutputStatus::Locked,
].contains(&self.status) {
return false; return false;
} else if self.status == OutputStatus::Unconfirmed && self.is_coinbase { } else if self.status == OutputStatus::Unconfirmed && self.is_coinbase {
return false; return false;
} else if self.lock_height > current_height { } else if self.lock_height > current_height {
return false; return false;
} else if self.status == OutputStatus::Unspent && self.height + minimum_confirmations <= current_height { } else if self.status == OutputStatus::Unspent
&& self.height + minimum_confirmations <= current_height
{
return true; return true;
} else if self.status == OutputStatus::Unconfirmed && minimum_confirmations == 0 { } else if self.status == OutputStatus::Unconfirmed && minimum_confirmations == 0 {
return true; return true;
@ -306,11 +303,7 @@ impl WalletSeed {
SEED_FILE, SEED_FILE,
); );
debug!( debug!(LOGGER, "Generating wallet seed file at: {}", seed_file_path,);
LOGGER,
"Generating wallet seed file at: {}",
seed_file_path,
);
if Path::new(seed_file_path).exists() { if Path::new(seed_file_path).exists() {
panic!("wallet seed file already exists"); panic!("wallet seed file already exists");
@ -333,11 +326,7 @@ impl WalletSeed {
SEED_FILE, SEED_FILE,
); );
debug!( debug!(LOGGER, "Using wallet seed file at: {}", seed_file_path,);
LOGGER,
"Using wallet seed file at: {}",
seed_file_path,
);
if Path::new(seed_file_path).exists() { if Path::new(seed_file_path).exists() {
let mut file = File::open(seed_file_path)?; let mut file = File::open(seed_file_path)?;
@ -369,10 +358,11 @@ pub struct WalletData {
} }
impl WalletData { impl WalletData {
/// Allows for reading wallet data (without needing to acquire the write
/// Allows for reading wallet data (without needing to acquire the write lock). /// lock).
pub fn read_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error> pub fn read_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error>
where F: FnOnce(&WalletData) -> T where
F: FnOnce(&WalletData) -> T,
{ {
// open the wallet readonly and do what needs to be done with it // open the wallet readonly and do what needs to be done with it
let data_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, DAT_FILE); let data_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, DAT_FILE);
@ -388,7 +378,8 @@ impl WalletData {
/// across operating systems, this just creates a lock file with a "should /// across operating systems, this just creates a lock file with a "should
/// not exist" option. /// not exist" option.
pub fn with_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error> pub fn with_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error>
where F: FnOnce(&mut WalletData) -> T where
F: FnOnce(&mut WalletData) -> T,
{ {
// create directory if it doesn't exist // create directory if it doesn't exist
fs::create_dir_all(data_file_dir).unwrap_or_else(|why| { fs::create_dir_all(data_file_dir).unwrap_or_else(|why| {
@ -415,7 +406,7 @@ impl WalletData {
let retry_result = core.run(retry_future); let retry_result = core.run(retry_future);
match retry_result { match retry_result {
Ok(_) => {}, Ok(_) => {}
Err(_) => { Err(_) => {
error!( error!(
LOGGER, LOGGER,
@ -448,31 +439,33 @@ impl WalletData {
WalletData::read(data_file_path) WalletData::read(data_file_path)
} else { } else {
// just create a new instance, it will get written afterward // just create a new instance, it will get written afterward
Ok(WalletData { outputs: HashMap::new() }) Ok(WalletData {
outputs: HashMap::new(),
})
} }
} }
/// Read the wallet data from disk. /// Read the wallet data from disk.
fn read(data_file_path: &str) -> Result<WalletData, Error> { fn read(data_file_path: &str) -> Result<WalletData, Error> {
let data_file = let data_file = File::open(data_file_path).map_err(|e| {
File::open(data_file_path) Error::WalletData(format!("Could not open {}: {}", data_file_path, e))
.map_err(|e| Error::WalletData(format!("Could not open {}: {}", data_file_path, e)))?; })?;
serde_json::from_reader(data_file) serde_json::from_reader(data_file).map_err(|e| {
.map_err(|e| Error::WalletData(format!("Error reading {}: {}", data_file_path, e))) Error::WalletData(format!("Error reading {}: {}", data_file_path, e))
})
} }
/// Write the wallet data to disk. /// Write the wallet data to disk.
fn write(&self, data_file_path: &str) -> Result<(), Error> { fn write(&self, data_file_path: &str) -> Result<(), Error> {
let mut data_file = let mut data_file = File::create(data_file_path).map_err(|e| {
File::create(data_file_path)
.map_err(|e| {
Error::WalletData(format!("Could not create {}: {}", data_file_path, e)) Error::WalletData(format!("Could not create {}: {}", data_file_path, e))
})?; })?;
let res_json = serde_json::to_vec_pretty(self) let res_json = serde_json::to_vec_pretty(self).map_err(|e| {
.map_err(|e| Error::WalletData(format!("Error serializing wallet data: {}", e)))?; Error::WalletData(format!("Error serializing wallet data: {}", e))
data_file })?;
.write_all(res_json.as_slice()) data_file.write_all(res_json.as_slice()).map_err(|e| {
.map_err(|e| Error::WalletData(format!("Error writing {}: {}", data_file_path, e))) Error::WalletData(format!("Error writing {}: {}", data_file_path, e))
})
} }
/// Append a new output data to the wallet data. /// Append a new output data to the wallet data.
@ -503,7 +496,6 @@ impl WalletData {
current_height: u64, current_height: u64,
minimum_confirmations: u64, minimum_confirmations: u64,
) -> Vec<OutputData> { ) -> Vec<OutputData> {
self.outputs self.outputs
.values() .values()
.filter(|out| { .filter(|out| {
@ -537,10 +529,11 @@ struct JSONPartialTx {
/// Encodes the information for a partial transaction (not yet completed by the /// Encodes the information for a partial transaction (not yet completed by the
/// receiver) into JSON. /// receiver) into JSON.
pub fn partial_tx_to_json(receive_amount: u64, pub fn partial_tx_to_json(
receive_amount: u64,
blind_sum: keychain::BlindingFactor, blind_sum: keychain::BlindingFactor,
tx: Transaction) tx: Transaction,
-> String { ) -> String {
let partial_tx = JSONPartialTx { let partial_tx = JSONPartialTx {
amount: receive_amount, amount: receive_amount,
blind_sum: util::to_hex(blind_sum.secret_key().as_ref().to_vec()), blind_sum: util::to_hex(blind_sum.secret_key().as_ref().to_vec()),
@ -551,9 +544,10 @@ pub fn partial_tx_to_json(receive_amount: u64,
/// Reads a partial transaction encoded as JSON into the amount, sum of blinding /// Reads a partial transaction encoded as JSON into the amount, sum of blinding
/// factors and the transaction itself. /// factors and the transaction itself.
pub fn partial_tx_from_json(keychain: &keychain::Keychain, pub fn partial_tx_from_json(
json_str: &str) keychain: &keychain::Keychain,
-> Result<(u64, keychain::BlindingFactor, Transaction), Error> { json_str: &str,
) -> Result<(u64, keychain::BlindingFactor, Transaction), Error> {
let partial_tx: JSONPartialTx = serde_json::from_str(json_str)?; let partial_tx: JSONPartialTx = serde_json::from_str(json_str)?;
let blind_bin = util::from_hex(partial_tx.blind_sum)?; let blind_bin = util::from_hex(partial_tx.blind_sum)?;
@ -563,8 +557,7 @@ pub fn partial_tx_from_json(keychain: &keychain::Keychain,
let blinding = keychain::BlindingFactor::from_slice(keychain.secp(), &blind_bin[..])?; let blinding = keychain::BlindingFactor::from_slice(keychain.secp(), &blind_bin[..])?;
let tx_bin = util::from_hex(partial_tx.tx)?; let tx_bin = util::from_hex(partial_tx.tx)?;
let tx = ser::deserialize(&mut &tx_bin[..]) let tx = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
.map_err(|_| {
Error::Format("Could not deserialize transaction, invalid format.".to_string()) Error::Format("Could not deserialize transaction, invalid format.".to_string())
})?; })?;