mirror of
https://github.com/mimblewimble/grin.git
synced 2025-01-20 19:11:08 +03:00
Cleanup HTTP APIs, update ports to avoid gap, rustfmt
Moved the HTTP APIs away from the REST endpoint abstraction and to simpler Hyper handlers. Re-established all routes as v1. Changed wallet receiver port to 13415 to avoid a gap in port numbers. Finally, rustfmt seems to have ignored specific files arguments, running on everything.
This commit is contained in:
parent
05d22cb632
commit
e4ebb7c7cb
78 changed files with 1705 additions and 1928 deletions
|
@ -17,7 +17,7 @@
|
|||
use hyper;
|
||||
use hyper::client::Response;
|
||||
use hyper::status::{StatusClass, StatusCode};
|
||||
use serde::{Serialize, Deserialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json;
|
||||
|
||||
use rest::Error;
|
||||
|
@ -26,12 +26,14 @@ use rest::Error;
|
|||
/// returns a JSON object. Handles request building, JSON deserialization and
|
||||
/// response code checking.
|
||||
pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
|
||||
where for<'de> T: Deserialize<'de>
|
||||
where
|
||||
for<'de> T: Deserialize<'de>,
|
||||
{
|
||||
let client = hyper::Client::new();
|
||||
let res = check_error(client.get(url).send())?;
|
||||
serde_json::from_reader(res)
|
||||
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e)))
|
||||
serde_json::from_reader(res).map_err(|e| {
|
||||
Error::Internal(format!("Server returned invalid JSON: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
/// Helper function to easily issue a HTTP POST request with the provided JSON
|
||||
|
@ -39,15 +41,18 @@ pub fn get<'a, T>(url: &'a str) -> Result<T, Error>
|
|||
/// building, JSON serialization and deserialization, and response code
|
||||
/// checking.
|
||||
pub fn post<'a, IN, OUT>(url: &'a str, input: &IN) -> Result<OUT, Error>
|
||||
where IN: Serialize,
|
||||
for<'de> OUT: Deserialize<'de>
|
||||
where
|
||||
IN: Serialize,
|
||||
for<'de> OUT: Deserialize<'de>,
|
||||
{
|
||||
let in_json = serde_json::to_string(input)
|
||||
.map_err(|e| Error::Internal(format!("Could not serialize data to JSON: {}", e)))?;
|
||||
let in_json = serde_json::to_string(input).map_err(|e| {
|
||||
Error::Internal(format!("Could not serialize data to JSON: {}", e))
|
||||
})?;
|
||||
let client = hyper::Client::new();
|
||||
let res = check_error(client.post(url).body(&mut in_json.as_bytes()).send())?;
|
||||
serde_json::from_reader(res)
|
||||
.map_err(|e| Error::Internal(format!("Server returned invalid JSON: {}", e)))
|
||||
serde_json::from_reader(res).map_err(|e| {
|
||||
Error::Internal(format!("Server returned invalid JSON: {}", e))
|
||||
})
|
||||
}
|
||||
|
||||
// convert hyper error and check for non success response codes
|
||||
|
@ -59,13 +64,11 @@ fn check_error(res: hyper::Result<Response>) -> Result<Response, Error> {
|
|||
match response.status.class() {
|
||||
StatusClass::Success => Ok(response),
|
||||
StatusClass::ServerError => Err(Error::Internal(format!("Server error."))),
|
||||
StatusClass::ClientError => {
|
||||
if response.status == StatusCode::NotFound {
|
||||
Err(Error::NotFound)
|
||||
} else {
|
||||
Err(Error::Argument(format!("Argument error")))
|
||||
}
|
||||
}
|
||||
StatusClass::ClientError => if response.status == StatusCode::NotFound {
|
||||
Err(Error::NotFound)
|
||||
} else {
|
||||
Err(Error::Argument(format!("Argument error")))
|
||||
},
|
||||
_ => Err(Error::Internal(format!("Unrecognized error."))),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,127 +0,0 @@
|
|||
// Copyright 2016 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
|
||||
use chain;
|
||||
use core::core::Transaction;
|
||||
use core::ser;
|
||||
use pool;
|
||||
use handlers::{UtxoHandler, ChainHandler, SumTreeHandler};
|
||||
use rest::*;
|
||||
use types::*;
|
||||
use util;
|
||||
use util::LOGGER;
|
||||
|
||||
/// ApiEndpoint implementation for the transaction pool, to check its status
|
||||
/// and size as well as push new transactions.
|
||||
#[derive(Clone)]
|
||||
pub struct PoolApi<T> {
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
|
||||
}
|
||||
|
||||
impl<T> ApiEndpoint for PoolApi<T>
|
||||
where
|
||||
T: pool::BlockChain + Clone + Send + Sync + 'static,
|
||||
{
|
||||
type ID = String;
|
||||
type T = PoolInfo;
|
||||
type OP_IN = TxWrapper;
|
||||
type OP_OUT = ();
|
||||
|
||||
fn operations(&self) -> Vec<Operation> {
|
||||
vec![Operation::Get, Operation::Custom("push".to_string())]
|
||||
}
|
||||
|
||||
fn get(&self, _: String) -> ApiResult<PoolInfo> {
|
||||
let pool = self.tx_pool.read().unwrap();
|
||||
Ok(PoolInfo {
|
||||
pool_size: pool.pool_size(),
|
||||
orphans_size: pool.orphans_size(),
|
||||
total_size: pool.total_size(),
|
||||
})
|
||||
}
|
||||
|
||||
fn operation(&self, _: String, input: TxWrapper) -> ApiResult<()> {
|
||||
let tx_bin = util::from_hex(input.tx_hex).map_err(|_| {
|
||||
Error::Argument(format!("Invalid hex in transaction wrapper."))
|
||||
})?;
|
||||
|
||||
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
|
||||
Error::Argument(
|
||||
"Could not deserialize transaction, invalid format.".to_string(),
|
||||
)
|
||||
})?;
|
||||
|
||||
let source = pool::TxSource {
|
||||
debug_name: "push-api".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
info!(
|
||||
LOGGER,
|
||||
"Pushing transaction with {} inputs and {} outputs to pool.",
|
||||
tx.inputs.len(),
|
||||
tx.outputs.len()
|
||||
);
|
||||
self.tx_pool
|
||||
.write()
|
||||
.unwrap()
|
||||
.add_to_memory_pool(source, tx)
|
||||
.map_err(|e| {
|
||||
Error::Internal(format!("Addition to transaction pool failed: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Dummy wrapper for the hex-encoded serialized transaction.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct TxWrapper {
|
||||
tx_hex: String,
|
||||
}
|
||||
|
||||
/// Start all server REST APIs. Just register all of them on a ApiServer
|
||||
/// instance and runs the corresponding HTTP server.
|
||||
pub fn start_rest_apis<T>(
|
||||
addr: String,
|
||||
chain: Arc<chain::Chain>,
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
|
||||
) where
|
||||
T: pool::BlockChain + Clone + Send + Sync + 'static,
|
||||
{
|
||||
|
||||
thread::spawn(move || {
|
||||
let mut apis = ApiServer::new("/v1".to_string());
|
||||
apis.register_endpoint("/pool".to_string(), PoolApi {tx_pool: tx_pool});
|
||||
|
||||
// register a nested router at "/v2" for flexibility
|
||||
// so we can experiment with raw iron handlers
|
||||
let utxo_handler = UtxoHandler {chain: chain.clone()};
|
||||
let chain_tip_handler = ChainHandler {chain: chain.clone()};
|
||||
let sumtree_handler = SumTreeHandler {chain: chain.clone()};
|
||||
let router = router!(
|
||||
chain_tip: get "/chain" => chain_tip_handler,
|
||||
chain_utxos: get "/chain/utxos" => utxo_handler,
|
||||
sumtree_roots: get "/sumtrees/*" => sumtree_handler,
|
||||
);
|
||||
apis.register_handler("/v2", router);
|
||||
|
||||
apis.start(&addr[..]).unwrap_or_else(|e| {
|
||||
error!(LOGGER, "Failed to start API HTTP server: {}.", e);
|
||||
});
|
||||
});
|
||||
}
|
|
@ -12,37 +12,45 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::io::Read;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::thread;
|
||||
|
||||
use iron::prelude::*;
|
||||
use iron::Handler;
|
||||
use iron::status;
|
||||
use urlencoded::UrlEncodedQuery;
|
||||
use serde::Serialize;
|
||||
use serde_json;
|
||||
|
||||
use chain;
|
||||
use core::core::Transaction;
|
||||
use core::ser;
|
||||
use pool;
|
||||
use rest::*;
|
||||
use types::*;
|
||||
use util::secp::pedersen::Commitment;
|
||||
use types::*;
|
||||
use util;
|
||||
use util::LOGGER;
|
||||
|
||||
|
||||
pub struct UtxoHandler {
|
||||
pub chain: Arc<chain::Chain>,
|
||||
// Supports retrieval of multiple outputs in a single request -
|
||||
// GET /v1/chain/utxos?id=xxx,yyy,zzz
|
||||
// GET /v1/chain/utxos?id=xxx&id=yyy&id=zzz
|
||||
struct UtxoHandler {
|
||||
chain: Arc<chain::Chain>,
|
||||
}
|
||||
|
||||
impl UtxoHandler {
|
||||
fn get_utxo(&self, id: &str) -> Result<Output, Error> {
|
||||
debug!(LOGGER, "getting utxo: {}", id);
|
||||
let c = util::from_hex(String::from(id))
|
||||
.map_err(|_| {
|
||||
Error::Argument(format!("Not a valid commitment: {}", id))
|
||||
})?;
|
||||
let c = util::from_hex(String::from(id)).map_err(|_| {
|
||||
Error::Argument(format!("Not a valid commitment: {}", id))
|
||||
})?;
|
||||
let commit = Commitment::from_vec(c);
|
||||
|
||||
let out = self.chain.get_unspent(&commit)
|
||||
let out = self.chain
|
||||
.get_unspent(&commit)
|
||||
.map_err(|_| Error::NotFound)?;
|
||||
|
||||
let header = self.chain
|
||||
|
@ -53,11 +61,6 @@ impl UtxoHandler {
|
|||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Supports retrieval of multiple outputs in a single request -
|
||||
// GET /v2/chain/utxos?id=xxx,yyy,zzz
|
||||
// GET /v2/chain/utxos?id=xxx&id=yyy&id=zzz
|
||||
//
|
||||
impl Handler for UtxoHandler {
|
||||
fn handle(&self, req: &mut Request) -> IronResult<Response> {
|
||||
let mut commitments: Vec<&str> = vec![];
|
||||
|
@ -72,60 +75,49 @@ impl Handler for UtxoHandler {
|
|||
}
|
||||
|
||||
let mut utxos: Vec<Output> = vec![];
|
||||
|
||||
for commit in commitments {
|
||||
if let Ok(out) = self.get_utxo(commit) {
|
||||
utxos.push(out);
|
||||
}
|
||||
}
|
||||
|
||||
match serde_json::to_string(&utxos) {
|
||||
Ok(json) => Ok(Response::with((status::Ok, json))),
|
||||
Err(_) => Ok(Response::with((status::BadRequest, ""))),
|
||||
}
|
||||
json_response(&utxos)
|
||||
}
|
||||
}
|
||||
|
||||
// Sum tree handler
|
||||
|
||||
pub struct SumTreeHandler {
|
||||
pub chain: Arc<chain::Chain>,
|
||||
// Sum tree handler. Retrieve the roots:
|
||||
// GET /v1/sumtrees/roots
|
||||
//
|
||||
// Last inserted nodes::
|
||||
// GET /v1/sumtrees/lastutxos (gets last 10)
|
||||
// GET /v1/sumtrees/lastutxos?n=5
|
||||
// GET /v1/sumtrees/lastrangeproofs
|
||||
// GET /v1/sumtrees/lastkernels
|
||||
struct SumTreeHandler {
|
||||
chain: Arc<chain::Chain>,
|
||||
}
|
||||
|
||||
impl SumTreeHandler {
|
||||
//gets roots
|
||||
// gets roots
|
||||
fn get_roots(&self) -> SumTrees {
|
||||
SumTrees::from_head(self.chain.clone())
|
||||
}
|
||||
|
||||
// gets last n utxos inserted in to the tree
|
||||
fn get_last_n_utxo(&self, distance:u64) -> Vec<SumTreeNode> {
|
||||
fn get_last_n_utxo(&self, distance: u64) -> Vec<SumTreeNode> {
|
||||
SumTreeNode::get_last_n_utxo(self.chain.clone(), distance)
|
||||
}
|
||||
|
||||
// gets last n utxos inserted in to the tree
|
||||
fn get_last_n_rangeproof(&self, distance:u64) -> Vec<SumTreeNode> {
|
||||
fn get_last_n_rangeproof(&self, distance: u64) -> Vec<SumTreeNode> {
|
||||
SumTreeNode::get_last_n_rangeproof(self.chain.clone(), distance)
|
||||
}
|
||||
|
||||
// gets last n utxos inserted in to the tree
|
||||
fn get_last_n_kernel(&self, distance:u64) -> Vec<SumTreeNode> {
|
||||
fn get_last_n_kernel(&self, distance: u64) -> Vec<SumTreeNode> {
|
||||
SumTreeNode::get_last_n_kernel(self.chain.clone(), distance)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
//
|
||||
// Retrieve the roots:
|
||||
// GET /v2/sumtrees/roots
|
||||
//
|
||||
// Last inserted nodes::
|
||||
// GET /v2/sumtrees/lastutxos (gets last 10)
|
||||
// GET /v2/sumtrees/lastutxos?n=5
|
||||
// GET /v2/sumtrees/lastrangeproofs
|
||||
// GET /v2/sumtrees/lastkernels
|
||||
//
|
||||
|
||||
impl Handler for SumTreeHandler {
|
||||
fn handle(&self, req: &mut Request) -> IronResult<Response> {
|
||||
let url = req.url.clone();
|
||||
|
@ -133,40 +125,29 @@ impl Handler for SumTreeHandler {
|
|||
if *path_elems.last().unwrap() == "" {
|
||||
path_elems.pop();
|
||||
}
|
||||
//TODO: probably need to set a reasonable max limit here
|
||||
let mut last_n=10;
|
||||
// TODO: probably need to set a reasonable max limit here
|
||||
let mut last_n = 10;
|
||||
if let Ok(params) = req.get_ref::<UrlEncodedQuery>() {
|
||||
if let Some(nums) = params.get("n") {
|
||||
for num in nums {
|
||||
if let Ok(n) = str::parse(num) {
|
||||
last_n=n;
|
||||
last_n = n;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
match *path_elems.last().unwrap(){
|
||||
"roots" => match serde_json::to_string_pretty(&self.get_roots()) {
|
||||
Ok(json) => Ok(Response::with((status::Ok, json))),
|
||||
Err(_) => Ok(Response::with((status::BadRequest, ""))),
|
||||
},
|
||||
"lastutxos" => match serde_json::to_string_pretty(&self.get_last_n_utxo(last_n)) {
|
||||
Ok(json) => Ok(Response::with((status::Ok, json))),
|
||||
Err(_) => Ok(Response::with((status::BadRequest, ""))),
|
||||
},
|
||||
"lastrangeproofs" => match serde_json::to_string_pretty(&self.get_last_n_rangeproof(last_n)) {
|
||||
Ok(json) => Ok(Response::with((status::Ok, json))),
|
||||
Err(_) => Ok(Response::with((status::BadRequest, ""))),
|
||||
},
|
||||
"lastkernels" => match serde_json::to_string_pretty(&self.get_last_n_kernel(last_n)) {
|
||||
Ok(json) => Ok(Response::with((status::Ok, json))),
|
||||
Err(_) => Ok(Response::with((status::BadRequest, ""))),
|
||||
},_ => Ok(Response::with((status::BadRequest, "")))
|
||||
match *path_elems.last().unwrap() {
|
||||
"roots" => json_response(&self.get_roots()),
|
||||
"lastutxos" => json_response(&self.get_last_n_utxo(last_n)),
|
||||
"lastrangeproofs" => json_response(&self.get_last_n_rangeproof(last_n)),
|
||||
"lastkernels" => json_response(&self.get_last_n_kernel(last_n)),
|
||||
_ => Ok(Response::with((status::BadRequest, ""))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Chain Handler
|
||||
|
||||
// Chain handler. Get the head details.
|
||||
// GET /v1/chain
|
||||
pub struct ChainHandler {
|
||||
pub chain: Arc<chain::Chain>,
|
||||
}
|
||||
|
@ -177,16 +158,134 @@ impl ChainHandler {
|
|||
}
|
||||
}
|
||||
|
||||
//
|
||||
// Get the head details
|
||||
// GET /v2/chain
|
||||
//
|
||||
|
||||
impl Handler for ChainHandler {
|
||||
fn handle(&self, _req: &mut Request) -> IronResult<Response> {
|
||||
match serde_json::to_string_pretty(&self.get_tip()) {
|
||||
Ok(json) => Ok(Response::with((status::Ok, json))),
|
||||
Err(_) => Ok(Response::with((status::BadRequest, ""))),
|
||||
}
|
||||
json_response(&self.get_tip())
|
||||
}
|
||||
}
|
||||
|
||||
// Get basic information about the transaction pool.
|
||||
struct PoolInfoHandler<T> {
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Handler for PoolInfoHandler<T>
|
||||
where
|
||||
T: pool::BlockChain + Send + Sync + 'static,
|
||||
{
|
||||
fn handle(&self, _req: &mut Request) -> IronResult<Response> {
|
||||
let pool = self.tx_pool.read().unwrap();
|
||||
json_response(&PoolInfo {
|
||||
pool_size: pool.pool_size(),
|
||||
orphans_size: pool.orphans_size(),
|
||||
total_size: pool.total_size(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Dummy wrapper for the hex-encoded serialized transaction.
|
||||
#[derive(Serialize, Deserialize)]
|
||||
struct TxWrapper {
|
||||
tx_hex: String,
|
||||
}
|
||||
|
||||
// Push new transactions to our transaction pool, that should broadcast it
|
||||
// to the network if valid.
|
||||
struct PoolPushHandler<T> {
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
|
||||
}
|
||||
|
||||
impl<T> Handler for PoolPushHandler<T>
|
||||
where
|
||||
T: pool::BlockChain + Send + Sync + 'static,
|
||||
{
|
||||
fn handle(&self, req: &mut Request) -> IronResult<Response> {
|
||||
let wrapper: TxWrapper = serde_json::from_reader(req.body.by_ref())
|
||||
.map_err(|e| IronError::new(e, status::BadRequest))?;
|
||||
|
||||
let tx_bin = util::from_hex(wrapper.tx_hex).map_err(|_| {
|
||||
Error::Argument(format!("Invalid hex in transaction wrapper."))
|
||||
})?;
|
||||
|
||||
let tx: Transaction = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
|
||||
Error::Argument("Could not deserialize transaction, invalid format.".to_string())
|
||||
})?;
|
||||
|
||||
let source = pool::TxSource {
|
||||
debug_name: "push-api".to_string(),
|
||||
identifier: "?.?.?.?".to_string(),
|
||||
};
|
||||
info!(
|
||||
LOGGER,
|
||||
"Pushing transaction with {} inputs and {} outputs to pool.",
|
||||
tx.inputs.len(),
|
||||
tx.outputs.len()
|
||||
);
|
||||
self.tx_pool
|
||||
.write()
|
||||
.unwrap()
|
||||
.add_to_memory_pool(source, tx)
|
||||
.map_err(|e| {
|
||||
Error::Internal(format!("Addition to transaction pool failed: {:?}", e))
|
||||
})?;
|
||||
|
||||
Ok(Response::with(status::Ok))
|
||||
}
|
||||
}
|
||||
|
||||
// Utility to serialize a struct into JSON and produce a sensible IronResult
|
||||
// out of it.
|
||||
fn json_response<T>(s: &T) -> IronResult<Response>
|
||||
where
|
||||
T: Serialize,
|
||||
{
|
||||
match serde_json::to_string_pretty(s) {
|
||||
Ok(json) => Ok(Response::with((status::Ok, json))),
|
||||
Err(_) => Ok(Response::with((status::InternalServerError, ""))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start all server HTTP handlers. Register all of them with Iron
|
||||
/// and runs the corresponding HTTP server.
|
||||
pub fn start_rest_apis<T>(
|
||||
addr: String,
|
||||
chain: Arc<chain::Chain>,
|
||||
tx_pool: Arc<RwLock<pool::TransactionPool<T>>>,
|
||||
) where
|
||||
T: pool::BlockChain + Send + Sync + 'static,
|
||||
{
|
||||
thread::spawn(move || {
|
||||
// build handlers and register them under the appropriate endpoint
|
||||
let utxo_handler = UtxoHandler {
|
||||
chain: chain.clone(),
|
||||
};
|
||||
let chain_tip_handler = ChainHandler {
|
||||
chain: chain.clone(),
|
||||
};
|
||||
let sumtree_handler = SumTreeHandler {
|
||||
chain: chain.clone(),
|
||||
};
|
||||
let pool_info_handler = PoolInfoHandler {
|
||||
tx_pool: tx_pool.clone(),
|
||||
};
|
||||
let pool_push_handler = PoolPushHandler {
|
||||
tx_pool: tx_pool.clone(),
|
||||
};
|
||||
|
||||
let router = router!(
|
||||
chain_tip: get "/chain" => chain_tip_handler,
|
||||
chain_utxos: get "/chain/utxos" => utxo_handler,
|
||||
sumtree_roots: get "/sumtrees/*" => sumtree_handler,
|
||||
pool_info: get "/pool" => pool_info_handler,
|
||||
pool_push: post "/pool/push" => pool_push_handler,
|
||||
);
|
||||
|
||||
let mut apis = ApiServer::new("/v1".to_string());
|
||||
apis.register_handler(router);
|
||||
|
||||
info!(LOGGER, "Starting HTTP API server at {}.", addr);
|
||||
apis.start(&addr[..]).unwrap_or_else(|e| {
|
||||
error!(LOGGER, "Failed to start API HTTP server: {}.", e);
|
||||
});
|
||||
});
|
||||
}
|
||||
|
|
|
@ -12,31 +12,30 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_pool as pool;
|
||||
extern crate grin_store as store;
|
||||
extern crate grin_util as util;
|
||||
|
||||
extern crate hyper;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate iron;
|
||||
extern crate urlencoded;
|
||||
extern crate mount;
|
||||
#[macro_use]
|
||||
extern crate router;
|
||||
extern crate mount;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate urlencoded;
|
||||
|
||||
pub mod client;
|
||||
mod endpoints;
|
||||
mod handlers;
|
||||
mod rest;
|
||||
mod types;
|
||||
|
||||
pub use endpoints::start_rest_apis;
|
||||
pub use handlers::start_rest_apis;
|
||||
pub use types::*;
|
||||
pub use rest::*;
|
||||
|
|
283
api/src/rest.rs
283
api/src/rest.rs
|
@ -19,26 +19,18 @@
|
|||
//! register them on a ApiServer.
|
||||
|
||||
use std::error;
|
||||
use std::fmt::{self, Display, Debug, Formatter};
|
||||
use std::io::Read;
|
||||
use std::fmt::{self, Display, Formatter};
|
||||
use std::net::ToSocketAddrs;
|
||||
use std::string::ToString;
|
||||
use std::str::FromStr;
|
||||
use std::mem;
|
||||
|
||||
use iron::prelude::*;
|
||||
use iron::{status, headers, Listening};
|
||||
use iron::method::Method;
|
||||
use iron::modifiers::Header;
|
||||
use iron::{status, Listening};
|
||||
use iron::middleware::Handler;
|
||||
use router::Router;
|
||||
use mount::Mount;
|
||||
use serde::Serialize;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde_json;
|
||||
|
||||
use store;
|
||||
use util::LOGGER;
|
||||
|
||||
/// Errors that can be returned by an ApiEndpoint implementation.
|
||||
#[derive(Debug)]
|
||||
|
@ -87,161 +79,6 @@ impl From<store::Error> for Error {
|
|||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[allow(dead_code)]
|
||||
pub enum Operation {
|
||||
Create,
|
||||
Delete,
|
||||
Update,
|
||||
Get,
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
impl Operation {
|
||||
fn to_method(&self) -> Method {
|
||||
match *self {
|
||||
Operation::Create => Method::Post,
|
||||
Operation::Delete => Method::Delete,
|
||||
Operation::Update => Method::Put,
|
||||
Operation::Get => Method::Get,
|
||||
Operation::Custom(_) => Method::Post,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub type ApiResult<T> = ::std::result::Result<T, Error>;
|
||||
|
||||
/// Trait to implement to expose a service as a RESTful HTTP endpoint. Each
|
||||
/// method corresponds to a specific relative URL and HTTP method following
|
||||
/// basic REST principles:
|
||||
///
|
||||
/// * create: POST /
|
||||
/// * get: GET /:id
|
||||
/// * update: PUT /:id
|
||||
/// * delete: DELETE /:id
|
||||
///
|
||||
/// The methods method defines which operation the endpoint implements, they're
|
||||
/// all optional by default. It also allows the framework to automatically
|
||||
/// define the OPTIONS HTTP method.
|
||||
///
|
||||
/// The type accepted by create and update, and returned by get, must implement
|
||||
/// the serde Serialize and Deserialize traits. The identifier type returned by
|
||||
/// create and accepted by all other methods must have a string representation.
|
||||
pub trait ApiEndpoint: Clone + Send + Sync + 'static {
|
||||
type ID: ToString + FromStr;
|
||||
type T: Serialize + DeserializeOwned;
|
||||
type OP_IN: Serialize + DeserializeOwned;
|
||||
type OP_OUT: Serialize + DeserializeOwned;
|
||||
|
||||
fn operations(&self) -> Vec<Operation>;
|
||||
|
||||
#[allow(unused_variables)]
|
||||
fn create(&self, o: Self::T) -> ApiResult<Self::ID> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
fn delete(&self, id: Self::ID) -> ApiResult<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
fn update(&self, id: Self::ID, o: Self::T) -> ApiResult<()> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
fn get(&self, id: Self::ID) -> ApiResult<Self::T> {
|
||||
unimplemented!()
|
||||
}
|
||||
|
||||
#[allow(unused_variables)]
|
||||
fn operation(&self, op: String, input: Self::OP_IN) -> ApiResult<Self::OP_OUT> {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
// Wrapper required to define the implementation below, Rust doesn't let us
|
||||
// define the parametric implementation for trait from another crate.
|
||||
struct ApiWrapper<E>(E);
|
||||
|
||||
impl<E> Handler for ApiWrapper<E>
|
||||
where E: ApiEndpoint,
|
||||
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error
|
||||
{
|
||||
fn handle(&self, req: &mut Request) -> IronResult<Response> {
|
||||
match req.method {
|
||||
Method::Get => {
|
||||
let res = self.0.get(extract_param(req, "id")?)?;
|
||||
let res_json = serde_json::to_string(&res)
|
||||
.map_err(|e| IronError::new(e, status::InternalServerError))?;
|
||||
Ok(Response::with((status::Ok, res_json)))
|
||||
}
|
||||
Method::Put => {
|
||||
let id = extract_param(req, "id")?;
|
||||
let t: E::T = serde_json::from_reader(req.body.by_ref())
|
||||
.map_err(|e| IronError::new(e, status::BadRequest))?;
|
||||
self.0.update(id, t)?;
|
||||
Ok(Response::with(status::NoContent))
|
||||
}
|
||||
Method::Delete => {
|
||||
let id = extract_param(req, "id")?;
|
||||
self.0.delete(id)?;
|
||||
Ok(Response::with(status::NoContent))
|
||||
}
|
||||
Method::Post => {
|
||||
let t: E::T = serde_json::from_reader(req.body.by_ref())
|
||||
.map_err(|e| IronError::new(e, status::BadRequest))?;
|
||||
let id = self.0.create(t)?;
|
||||
Ok(Response::with((status::Created, id.to_string())))
|
||||
}
|
||||
_ => Ok(Response::with(status::MethodNotAllowed)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct OpWrapper<E> {
|
||||
operation: String,
|
||||
endpoint: E,
|
||||
}
|
||||
|
||||
impl<E> Handler for OpWrapper<E>
|
||||
where E: ApiEndpoint
|
||||
{
|
||||
fn handle(&self, req: &mut Request) -> IronResult<Response> {
|
||||
let t: E::OP_IN = serde_json::from_reader(req.body.by_ref()).map_err(|e| {
|
||||
IronError::new(e, status::BadRequest)
|
||||
})?;
|
||||
let res = self.endpoint.operation(self.operation.clone(), t);
|
||||
match res {
|
||||
Ok(resp) => {
|
||||
let res_json = serde_json::to_string(&resp).map_err(|e| {
|
||||
IronError::new(e, status::InternalServerError)
|
||||
})?;
|
||||
Ok(Response::with((status::Ok, res_json)))
|
||||
}
|
||||
Err(e) => {
|
||||
error!(LOGGER, "API operation: {:?}", e);
|
||||
Err(IronError::from(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_param<ID>(req: &mut Request, param: &'static str) -> IronResult<ID>
|
||||
where ID: ToString + FromStr,
|
||||
<ID as FromStr>::Err: Debug + Send + error::Error + 'static
|
||||
{
|
||||
|
||||
let id = req.extensions
|
||||
.get::<Router>()
|
||||
.unwrap()
|
||||
.find(param)
|
||||
.unwrap_or("");
|
||||
id.parse::<ID>()
|
||||
.map_err(|e| IronError::new(e, status::BadRequest))
|
||||
}
|
||||
|
||||
/// HTTP server allowing the registration of ApiEndpoint implementations.
|
||||
pub struct ApiServer {
|
||||
root: String,
|
||||
|
@ -281,119 +118,7 @@ impl ApiServer {
|
|||
}
|
||||
|
||||
/// Registers an iron handler (via mount)
|
||||
pub fn register_handler<H: Handler>(&mut self, route: &str, handler: H) -> &mut Mount {
|
||||
self.mount.mount(route, handler)
|
||||
}
|
||||
|
||||
/// Register a new API endpoint, providing a relative URL for the new
|
||||
/// endpoint.
|
||||
pub fn register_endpoint<E>(&mut self, subpath: String, endpoint: E)
|
||||
where E: ApiEndpoint,
|
||||
<<E as ApiEndpoint>::ID as FromStr>::Err: Debug + Send + error::Error
|
||||
{
|
||||
assert_eq!(subpath.chars().nth(0).unwrap(), '/');
|
||||
|
||||
// declare a route for each method actually implemented by the endpoint
|
||||
let route_postfix = &subpath[1..];
|
||||
let root = self.root.clone() + &subpath;
|
||||
for op in endpoint.operations() {
|
||||
let route_name = format!("{:?}_{}", op, route_postfix);
|
||||
|
||||
// special case of custom operations
|
||||
if let Operation::Custom(op_s) = op.clone() {
|
||||
let wrapper = OpWrapper {
|
||||
operation: op_s.clone(),
|
||||
endpoint: endpoint.clone(),
|
||||
};
|
||||
let full_path = format!("{}/{}", root.clone(), op_s.clone());
|
||||
self.router
|
||||
.route(op.to_method(), full_path.clone(), wrapper, route_name);
|
||||
info!(LOGGER, "route: POST {}", full_path);
|
||||
} else {
|
||||
|
||||
// regular REST operations
|
||||
let full_path = match op.clone() {
|
||||
Operation::Get => root.clone() + "/:id",
|
||||
Operation::Update => root.clone() + "/:id",
|
||||
Operation::Delete => root.clone() + "/:id",
|
||||
Operation::Create => root.clone(),
|
||||
_ => panic!("unreachable"),
|
||||
};
|
||||
let wrapper = ApiWrapper(endpoint.clone());
|
||||
self.router
|
||||
.route(op.to_method(), full_path.clone(), wrapper, route_name);
|
||||
info!(LOGGER, "route: {} {}", op.to_method(), full_path);
|
||||
}
|
||||
}
|
||||
|
||||
// support for the HTTP Options method by differentiating what's on the
|
||||
// root resource vs the id resource
|
||||
let (root_opts, sub_opts) = endpoint
|
||||
.operations()
|
||||
.iter()
|
||||
.fold((vec![], vec![]), |mut acc, op| {
|
||||
let m = op.to_method();
|
||||
if m == Method::Post {
|
||||
acc.0.push(m);
|
||||
} else {
|
||||
acc.1.push(m);
|
||||
}
|
||||
acc
|
||||
});
|
||||
self.router.options(
|
||||
root.clone(),
|
||||
move |_: &mut Request| {
|
||||
Ok(Response::with((status::Ok, Header(headers::Allow(root_opts.clone())))))
|
||||
},
|
||||
"option_".to_string() + route_postfix,
|
||||
);
|
||||
self.router.options(
|
||||
root.clone() + "/:id",
|
||||
move |_: &mut Request| {
|
||||
Ok(Response::with((status::Ok, Header(headers::Allow(sub_opts.clone())))))
|
||||
},
|
||||
"option_id_".to_string() + route_postfix,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Animal {
|
||||
name: String,
|
||||
legs: u32,
|
||||
lethal: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TestApi;
|
||||
|
||||
impl ApiEndpoint for TestApi {
|
||||
type ID = String;
|
||||
type T = Animal;
|
||||
type OP_IN = ();
|
||||
type OP_OUT = ();
|
||||
|
||||
fn operations(&self) -> Vec<Operation> {
|
||||
vec![Operation::Get]
|
||||
}
|
||||
|
||||
fn get(&self, name: String) -> ApiResult<Animal> {
|
||||
Ok(Animal {
|
||||
name: name,
|
||||
legs: 4,
|
||||
lethal: false,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn req_chain_json() {
|
||||
let mut apis = ApiServer::new("/v1".to_string());
|
||||
apis.register_endpoint("/animal".to_string(), TestApi);
|
||||
pub fn register_handler<H: Handler>(&mut self, handler: H) -> &mut Mount {
|
||||
self.mount.mount(&self.root, handler)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ impl Tip {
|
|||
}
|
||||
}
|
||||
|
||||
/// Sumtrees
|
||||
/// Sumtrees
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct SumTrees {
|
||||
/// UTXO Root Hash
|
||||
|
@ -59,7 +59,7 @@ pub struct SumTrees {
|
|||
|
||||
impl SumTrees {
|
||||
pub fn from_head(head: Arc<chain::Chain>) -> SumTrees {
|
||||
let roots=head.get_sumtree_roots();
|
||||
let roots = head.get_sumtree_roots();
|
||||
SumTrees {
|
||||
utxo_root_hash: util::to_hex(roots.0.hash.to_vec()),
|
||||
utxo_root_sum: util::to_hex(roots.0.sum.commit.0.to_vec()),
|
||||
|
@ -80,14 +80,13 @@ pub struct SumTreeNode {
|
|||
}
|
||||
|
||||
impl SumTreeNode {
|
||||
|
||||
pub fn get_last_n_utxo(chain: Arc<chain::Chain>, distance:u64) -> Vec<SumTreeNode> {
|
||||
pub fn get_last_n_utxo(chain: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
|
||||
let mut return_vec = Vec::new();
|
||||
let last_n = chain.get_last_n_utxo(distance);
|
||||
for elem_output in last_n {
|
||||
let header = chain
|
||||
.get_block_header_by_output_commit(&elem_output.1.commit)
|
||||
.map_err(|_| Error::NotFound);
|
||||
.get_block_header_by_output_commit(&elem_output.1.commit)
|
||||
.map_err(|_| Error::NotFound);
|
||||
// Need to call further method to check if output is spent
|
||||
let mut output = OutputPrintable::from_output(&elem_output.1, &header.unwrap());
|
||||
if let Ok(_) = chain.get_unspent(&elem_output.1.commit) {
|
||||
|
@ -101,7 +100,7 @@ impl SumTreeNode {
|
|||
return_vec
|
||||
}
|
||||
|
||||
pub fn get_last_n_rangeproof(head: Arc<chain::Chain>, distance:u64) -> Vec<SumTreeNode> {
|
||||
pub fn get_last_n_rangeproof(head: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
|
||||
let mut return_vec = Vec::new();
|
||||
let last_n = head.get_last_n_rangeproof(distance);
|
||||
for elem in last_n {
|
||||
|
@ -113,7 +112,7 @@ impl SumTreeNode {
|
|||
return_vec
|
||||
}
|
||||
|
||||
pub fn get_last_n_kernel(head: Arc<chain::Chain>, distance:u64) -> Vec<SumTreeNode> {
|
||||
pub fn get_last_n_kernel(head: Arc<chain::Chain>, distance: u64) -> Vec<SumTreeNode> {
|
||||
let mut return_vec = Vec::new();
|
||||
let last_n = head.get_last_n_kernel(distance);
|
||||
for elem in last_n {
|
||||
|
@ -149,9 +148,10 @@ pub struct Output {
|
|||
impl Output {
|
||||
pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> Output {
|
||||
let (output_type, lock_height) = match output.features {
|
||||
x if x.contains(core::transaction::COINBASE_OUTPUT) => {
|
||||
(OutputType::Coinbase, block_header.height + global::coinbase_maturity())
|
||||
}
|
||||
x if x.contains(core::transaction::COINBASE_OUTPUT) => (
|
||||
OutputType::Coinbase,
|
||||
block_header.height + global::coinbase_maturity(),
|
||||
),
|
||||
_ => (OutputType::Transaction, 0),
|
||||
};
|
||||
|
||||
|
@ -165,12 +165,13 @@ impl Output {
|
|||
}
|
||||
}
|
||||
|
||||
//As above, except formatted a bit better for human viewing
|
||||
// As above, except formatted a bit better for human viewing
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct OutputPrintable {
|
||||
/// The type of output Coinbase|Transaction
|
||||
pub output_type: OutputType,
|
||||
/// The homomorphic commitment representing the output's amount (as hex string)
|
||||
/// The homomorphic commitment representing the output's amount (as hex
|
||||
/// string)
|
||||
pub commit: String,
|
||||
/// The height of the block creating this output
|
||||
pub height: u64,
|
||||
|
@ -185,9 +186,10 @@ pub struct OutputPrintable {
|
|||
impl OutputPrintable {
|
||||
pub fn from_output(output: &core::Output, block_header: &core::BlockHeader) -> OutputPrintable {
|
||||
let (output_type, lock_height) = match output.features {
|
||||
x if x.contains(core::transaction::COINBASE_OUTPUT) => {
|
||||
(OutputType::Coinbase, block_header.height + global::coinbase_maturity())
|
||||
}
|
||||
x if x.contains(core::transaction::COINBASE_OUTPUT) => (
|
||||
OutputType::Coinbase,
|
||||
block_header.height + global::coinbase_maturity(),
|
||||
),
|
||||
_ => (OutputType::Transaction, 0),
|
||||
};
|
||||
OutputPrintable {
|
||||
|
|
|
@ -20,8 +20,8 @@ use std::sync::{Arc, Mutex, RwLock};
|
|||
|
||||
use util::secp::pedersen::{Commitment, RangeProof};
|
||||
|
||||
use core::core::{SumCommit};
|
||||
use core::core::pmmr::{NoSum, HashSum};
|
||||
use core::core::SumCommit;
|
||||
use core::core::pmmr::{HashSum, NoSum};
|
||||
|
||||
use core::core::{Block, BlockHeader, Output, TxKernel};
|
||||
use core::core::target::Difficulty;
|
||||
|
@ -119,8 +119,10 @@ impl Chain {
|
|||
/// has been added to the longest chain, None if it's added to an (as of
|
||||
/// now) orphan chain.
|
||||
pub fn process_block(&self, b: Block, opts: Options) -> Result<Option<Tip>, Error> {
|
||||
let head = self.store.head().map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?;
|
||||
let height = head.height;
|
||||
let head = self.store
|
||||
.head()
|
||||
.map_err(|e| Error::StoreErr(e, "chain load head".to_owned()))?;
|
||||
let height = head.height;
|
||||
let ctx = self.ctx_from_head(head, opts);
|
||||
|
||||
let res = pipe::process_block(&b, ctx);
|
||||
|
@ -143,13 +145,11 @@ impl Chain {
|
|||
self.check_orphans();
|
||||
}
|
||||
Ok(None) => {}
|
||||
Err(Error::Orphan) => {
|
||||
if b.header.height < height + (MAX_ORPHANS as u64) {
|
||||
let mut orphans = self.orphans.lock().unwrap();
|
||||
orphans.push_front((opts, b));
|
||||
orphans.truncate(MAX_ORPHANS);
|
||||
}
|
||||
}
|
||||
Err(Error::Orphan) => if b.header.height < height + (MAX_ORPHANS as u64) {
|
||||
let mut orphans = self.orphans.lock().unwrap();
|
||||
orphans.push_front((opts, b));
|
||||
orphans.truncate(MAX_ORPHANS);
|
||||
},
|
||||
Err(ref e) => {
|
||||
info!(
|
||||
LOGGER,
|
||||
|
@ -171,8 +171,9 @@ impl Chain {
|
|||
bh: &BlockHeader,
|
||||
opts: Options,
|
||||
) -> Result<Option<Tip>, Error> {
|
||||
|
||||
let head = self.store.get_header_head().map_err(|e| Error::StoreErr(e, "chain header head".to_owned()))?;
|
||||
let head = self.store
|
||||
.get_header_head()
|
||||
.map_err(|e| Error::StoreErr(e, "chain header head".to_owned()))?;
|
||||
let ctx = self.ctx_from_head(head, opts);
|
||||
|
||||
pipe::process_block_header(bh, ctx)
|
||||
|
@ -199,7 +200,7 @@ impl Chain {
|
|||
/// Pop orphans out of the queue and check if we can now accept them.
|
||||
fn check_orphans(&self) {
|
||||
// first check how many we have to retry, unfort. we can't extend the lock
|
||||
// in the loop as it needs to be freed before going in process_block
|
||||
// in the loop as it needs to be freed before going in process_block
|
||||
let orphan_count;
|
||||
{
|
||||
let orphans = self.orphans.lock().unwrap();
|
||||
|
@ -227,9 +228,9 @@ impl Chain {
|
|||
let sumtrees = self.sumtrees.read().unwrap();
|
||||
let is_unspent = sumtrees.is_unspent(output_ref)?;
|
||||
if is_unspent {
|
||||
self.store.get_output_by_commit(output_ref).map_err(|e|
|
||||
Error::StoreErr(e, "chain get unspent".to_owned())
|
||||
)
|
||||
self.store
|
||||
.get_output_by_commit(output_ref)
|
||||
.map_err(|e| Error::StoreErr(e, "chain get unspent".to_owned()))
|
||||
} else {
|
||||
Err(Error::OutputNotFound)
|
||||
}
|
||||
|
@ -254,9 +255,13 @@ impl Chain {
|
|||
}
|
||||
|
||||
/// returs sumtree roots
|
||||
pub fn get_sumtree_roots(&self) -> (HashSum<SumCommit>,
|
||||
pub fn get_sumtree_roots(
|
||||
&self,
|
||||
) -> (
|
||||
HashSum<SumCommit>,
|
||||
HashSum<NoSum<RangeProof>>,
|
||||
HashSum<NoSum<TxKernel>>) {
|
||||
HashSum<NoSum<TxKernel>>,
|
||||
) {
|
||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||
sumtrees.roots()
|
||||
}
|
||||
|
@ -264,10 +269,10 @@ impl Chain {
|
|||
/// returns the last n nodes inserted into the utxo sum tree
|
||||
/// returns sum tree hash plus output itself (as the sum is contained
|
||||
/// in the output anyhow)
|
||||
pub fn get_last_n_utxo(&self, distance: u64) -> Vec<(Hash, Output)>{
|
||||
pub fn get_last_n_utxo(&self, distance: u64) -> Vec<(Hash, Output)> {
|
||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||
let mut return_vec = Vec::new();
|
||||
let sum_nodes=sumtrees.last_n_utxo(distance);
|
||||
let sum_nodes = sumtrees.last_n_utxo(distance);
|
||||
for sum_commit in sum_nodes {
|
||||
let output = self.store.get_output_by_commit(&sum_commit.sum.commit);
|
||||
return_vec.push((sum_commit.hash, output.unwrap()));
|
||||
|
@ -276,13 +281,13 @@ impl Chain {
|
|||
}
|
||||
|
||||
/// as above, for rangeproofs
|
||||
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<HashSum<NoSum<RangeProof>>>{
|
||||
pub fn get_last_n_rangeproof(&self, distance: u64) -> Vec<HashSum<NoSum<RangeProof>>> {
|
||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||
sumtrees.last_n_rangeproof(distance)
|
||||
}
|
||||
|
||||
/// as above, for kernels
|
||||
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<HashSum<NoSum<TxKernel>>>{
|
||||
pub fn get_last_n_kernel(&self, distance: u64) -> Vec<HashSum<NoSum<TxKernel>>> {
|
||||
let mut sumtrees = self.sumtrees.write().unwrap();
|
||||
sumtrees.last_n_kernel(distance)
|
||||
}
|
||||
|
@ -299,24 +304,30 @@ impl Chain {
|
|||
|
||||
/// Block header for the chain head
|
||||
pub fn head_header(&self) -> Result<BlockHeader, Error> {
|
||||
self.store.head_header().map_err(|e| Error::StoreErr(e, "chain head header".to_owned()))
|
||||
self.store
|
||||
.head_header()
|
||||
.map_err(|e| Error::StoreErr(e, "chain head header".to_owned()))
|
||||
}
|
||||
|
||||
/// Gets a block header by hash
|
||||
pub fn get_block(&self, h: &Hash) -> Result<Block, Error> {
|
||||
self.store.get_block(h).map_err(|e| Error::StoreErr(e, "chain get block".to_owned()))
|
||||
self.store
|
||||
.get_block(h)
|
||||
.map_err(|e| Error::StoreErr(e, "chain get block".to_owned()))
|
||||
}
|
||||
|
||||
/// Gets a block header by hash
|
||||
pub fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
||||
self.store.get_block_header(h).map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
|
||||
self.store
|
||||
.get_block_header(h)
|
||||
.map_err(|e| Error::StoreErr(e, "chain get header".to_owned()))
|
||||
}
|
||||
|
||||
/// Gets the block header at the provided height
|
||||
pub fn get_header_by_height(&self, height: u64) -> Result<BlockHeader, Error> {
|
||||
self.store.get_header_by_height(height).map_err(|e|
|
||||
Error::StoreErr(e, "chain get header by height".to_owned()),
|
||||
)
|
||||
self.store.get_header_by_height(height).map_err(|e| {
|
||||
Error::StoreErr(e, "chain get header by height".to_owned())
|
||||
})
|
||||
}
|
||||
|
||||
/// Gets the block header by the provided output commitment
|
||||
|
@ -331,7 +342,9 @@ impl Chain {
|
|||
|
||||
/// Get the tip of the header chain
|
||||
pub fn get_header_head(&self) -> Result<Tip, Error> {
|
||||
self.store.get_header_head().map_err(|e |Error::StoreErr(e, "chain get header head".to_owned()))
|
||||
self.store
|
||||
.get_header_head()
|
||||
.map_err(|e| Error::StoreErr(e, "chain get header head".to_owned()))
|
||||
}
|
||||
|
||||
/// Builds an iterator on blocks starting from the current chain head and
|
||||
|
|
|
@ -23,16 +23,16 @@
|
|||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
extern crate byteorder;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate time;
|
||||
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_store;
|
||||
extern crate grin_util as util;
|
||||
|
||||
mod chain;
|
||||
pub mod pipe;
|
||||
|
@ -43,4 +43,4 @@ pub mod types;
|
|||
// Re-export the base interface
|
||||
|
||||
pub use chain::Chain;
|
||||
pub use types::{ChainStore, Tip, ChainAdapter, SYNC, NONE, SKIP_POW, EASY_POW, Options, Error};
|
||||
pub use types::{ChainAdapter, ChainStore, Error, Options, Tip, EASY_POW, NONE, SKIP_POW, SYNC};
|
||||
|
|
|
@ -21,7 +21,7 @@ use time;
|
|||
|
||||
use core::consensus;
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::{BlockHeader, Block};
|
||||
use core::core::{Block, BlockHeader};
|
||||
use core::core::transaction;
|
||||
use types::*;
|
||||
use store;
|
||||
|
@ -49,7 +49,7 @@ pub struct BlockContext {
|
|||
/// chain head if updated.
|
||||
pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
|
||||
// TODO should just take a promise for a block with a full header so we don't
|
||||
// spend resources reading the full block when its header is invalid
|
||||
// spend resources reading the full block when its header is invalid
|
||||
|
||||
info!(
|
||||
LOGGER,
|
||||
|
@ -68,13 +68,13 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
|
|||
let mut sumtrees = local_sumtrees.write().unwrap();
|
||||
|
||||
// update head now that we're in the lock
|
||||
ctx.head = ctx.store.head().
|
||||
map_err(|e| Error::StoreErr(e, "pipe reload head".to_owned()))?;
|
||||
ctx.head = ctx.store
|
||||
.head()
|
||||
.map_err(|e| Error::StoreErr(e, "pipe reload head".to_owned()))?;
|
||||
|
||||
// start a chain extension unit of work dependent on the success of the
|
||||
// internal validation and saving operations
|
||||
// internal validation and saving operations
|
||||
sumtree::extending(&mut sumtrees, |mut extension| {
|
||||
|
||||
validate_block(b, &mut ctx, &mut extension)?;
|
||||
debug!(
|
||||
LOGGER,
|
||||
|
@ -94,7 +94,6 @@ pub fn process_block(b: &Block, mut ctx: BlockContext) -> Result<Option<Tip>, Er
|
|||
|
||||
/// Process the block header
|
||||
pub fn process_block_header(bh: &BlockHeader, mut ctx: BlockContext) -> Result<Option<Tip>, Error> {
|
||||
|
||||
info!(
|
||||
LOGGER,
|
||||
"Starting validation pipeline for block header {} at {}.",
|
||||
|
@ -120,7 +119,7 @@ fn check_known(bh: Hash, ctx: &mut BlockContext) -> Result<(), Error> {
|
|||
}
|
||||
if let Ok(b) = ctx.store.get_block(&bh) {
|
||||
// there is a window where a block can be saved but the chain head not
|
||||
// updated yet, we plug that window here by re-accepting the block
|
||||
// updated yet, we plug that window here by re-accepting the block
|
||||
if b.header.total_difficulty <= ctx.head.total_difficulty {
|
||||
return Err(Error::Unfit("already in store".to_string()));
|
||||
}
|
||||
|
@ -147,11 +146,11 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
return Err(Error::InvalidBlockVersion(header.version));
|
||||
}
|
||||
|
||||
if header.timestamp >
|
||||
time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
|
||||
if header.timestamp
|
||||
> time::now_utc() + time::Duration::seconds(12 * (consensus::BLOCK_TIME_SEC as i64))
|
||||
{
|
||||
// refuse blocks more than 12 blocks intervals in future (as in bitcoin)
|
||||
// TODO add warning in p2p code if local time is too different from peers
|
||||
// TODO add warning in p2p code if local time is too different from peers
|
||||
return Err(Error::InvalidBlockTime);
|
||||
}
|
||||
|
||||
|
@ -168,16 +167,16 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
}
|
||||
|
||||
// first I/O cost, better as late as possible
|
||||
let prev = try!(ctx.store.get_block_header(&header.previous).map_err(|e|
|
||||
Error::StoreErr(e, format!("previous block header {}", header.previous)),
|
||||
));
|
||||
let prev = try!(ctx.store.get_block_header(&header.previous,).map_err(|e| {
|
||||
Error::StoreErr(e, format!("previous block header {}", header.previous))
|
||||
},));
|
||||
|
||||
if header.height != prev.height + 1 {
|
||||
return Err(Error::InvalidBlockHeight);
|
||||
}
|
||||
if header.timestamp <= prev.timestamp && !global::is_automated_testing_mode() {
|
||||
// prevent time warp attacks and some timestamp manipulations by forcing strict
|
||||
// time progression (but not in CI mode)
|
||||
// time progression (but not in CI mode)
|
||||
return Err(Error::InvalidBlockTime);
|
||||
}
|
||||
|
||||
|
@ -189,9 +188,8 @@ fn validate_header(header: &BlockHeader, ctx: &mut BlockContext) -> Result<(), E
|
|||
}
|
||||
|
||||
let diff_iter = store::DifficultyIter::from(header.previous, ctx.store.clone());
|
||||
let difficulty = consensus::next_difficulty(diff_iter).map_err(|e| {
|
||||
Error::Other(e.to_string())
|
||||
})?;
|
||||
let difficulty =
|
||||
consensus::next_difficulty(diff_iter).map_err(|e| Error::Other(e.to_string()))?;
|
||||
if header.difficulty < difficulty {
|
||||
return Err(Error::DifficultyTooLow);
|
||||
}
|
||||
|
@ -219,9 +217,8 @@ fn validate_block(
|
|||
// standard head extension
|
||||
ext.apply_block(b)?;
|
||||
} else {
|
||||
|
||||
// extending a fork, first identify the block where forking occurred
|
||||
// keeping the hashes of blocks along the fork
|
||||
// keeping the hashes of blocks along the fork
|
||||
let mut current = b.header.previous;
|
||||
let mut hashes = vec![];
|
||||
loop {
|
||||
|
@ -236,16 +233,12 @@ fn validate_block(
|
|||
}
|
||||
|
||||
// rewind the sum trees up the forking block, providing the height of the
|
||||
// forked block and the last commitment we want to rewind to
|
||||
// forked block and the last commitment we want to rewind to
|
||||
let forked_block = ctx.store.get_block(¤t)?;
|
||||
if forked_block.header.height > 0 {
|
||||
let last_output = &forked_block.outputs[forked_block.outputs.len() - 1];
|
||||
let last_kernel = &forked_block.kernels[forked_block.kernels.len() - 1];
|
||||
ext.rewind(
|
||||
forked_block.header.height,
|
||||
last_output,
|
||||
last_kernel,
|
||||
)?;
|
||||
ext.rewind(forked_block.header.height, last_output, last_kernel)?;
|
||||
}
|
||||
|
||||
// apply all forked blocks, including this new one
|
||||
|
@ -257,10 +250,9 @@ fn validate_block(
|
|||
}
|
||||
|
||||
let (utxo_root, rproof_root, kernel_root) = ext.roots();
|
||||
if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root ||
|
||||
kernel_root.hash != b.header.kernel_root
|
||||
if utxo_root.hash != b.header.utxo_root || rproof_root.hash != b.header.range_proof_root
|
||||
|| kernel_root.hash != b.header.kernel_root
|
||||
{
|
||||
|
||||
ext.dump(false);
|
||||
return Err(Error::InvalidRoot);
|
||||
}
|
||||
|
@ -269,14 +261,11 @@ fn validate_block(
|
|||
for input in &b.inputs {
|
||||
if let Ok(output) = ctx.store.get_output_by_commit(&input.commitment()) {
|
||||
if output.features.contains(transaction::COINBASE_OUTPUT) {
|
||||
if let Ok(output_header) =
|
||||
ctx.store.get_block_header_by_output_commit(
|
||||
&input.commitment(),
|
||||
)
|
||||
if let Ok(output_header) = ctx.store
|
||||
.get_block_header_by_output_commit(&input.commitment())
|
||||
{
|
||||
|
||||
// TODO - make sure we are not off-by-1 here vs. the equivalent tansaction
|
||||
// validation rule
|
||||
// validation rule
|
||||
if b.header.height <= output_header.height + global::coinbase_maturity() {
|
||||
return Err(Error::ImmatureCoinbase);
|
||||
}
|
||||
|
@ -290,12 +279,16 @@ fn validate_block(
|
|||
|
||||
/// Officially adds the block to our chain.
|
||||
fn add_block(b: &Block, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||
ctx.store.save_block(b).map_err(|e| Error::StoreErr(e, "pipe save block".to_owned()))
|
||||
ctx.store
|
||||
.save_block(b)
|
||||
.map_err(|e| Error::StoreErr(e, "pipe save block".to_owned()))
|
||||
}
|
||||
|
||||
/// Officially adds the block header to our header chain.
|
||||
fn add_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Error> {
|
||||
ctx.store.save_block_header(bh).map_err(|e| Error::StoreErr(e, "pipe save header".to_owned()))
|
||||
ctx.store
|
||||
.save_block_header(bh)
|
||||
.map_err(|e| Error::StoreErr(e, "pipe save header".to_owned()))
|
||||
}
|
||||
|
||||
/// Directly updates the head if we've just appended a new block to it or handle
|
||||
|
@ -303,23 +296,33 @@ fn add_block_header(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<(), Erro
|
|||
/// work than the head.
|
||||
fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
|
||||
// if we made a fork with more work than the head (which should also be true
|
||||
// when extending the head), update it
|
||||
// when extending the head), update it
|
||||
let tip = Tip::from_block(&b.header);
|
||||
if tip.total_difficulty > ctx.head.total_difficulty {
|
||||
|
||||
// update the block height index
|
||||
ctx.store.setup_height(&b.header).map_err(|e| Error::StoreErr(e, "pipe setup height".to_owned()))?;
|
||||
ctx.store
|
||||
.setup_height(&b.header)
|
||||
.map_err(|e| Error::StoreErr(e, "pipe setup height".to_owned()))?;
|
||||
|
||||
// in sync mode, only update the "body chain", otherwise update both the
|
||||
// "header chain" and "body chain", updating the header chain in sync resets
|
||||
// all additional "future" headers we've received
|
||||
if ctx.opts.intersects(SYNC) {
|
||||
ctx.store.save_body_head(&tip).map_err(|e| Error::StoreErr(e, "pipe save body".to_owned()))?;
|
||||
} else {
|
||||
ctx.store.save_head(&tip).map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?;
|
||||
}
|
||||
// in sync mode, only update the "body chain", otherwise update both the
|
||||
// "header chain" and "body chain", updating the header chain in sync resets
|
||||
// all additional "future" headers we've received
|
||||
if ctx.opts.intersects(SYNC) {
|
||||
ctx.store
|
||||
.save_body_head(&tip)
|
||||
.map_err(|e| Error::StoreErr(e, "pipe save body".to_owned()))?;
|
||||
} else {
|
||||
ctx.store
|
||||
.save_head(&tip)
|
||||
.map_err(|e| Error::StoreErr(e, "pipe save head".to_owned()))?;
|
||||
}
|
||||
ctx.head = tip.clone();
|
||||
info!(LOGGER, "Updated head to {} at {}.", b.hash(), b.header.height);
|
||||
info!(
|
||||
LOGGER,
|
||||
"Updated head to {} at {}.",
|
||||
b.hash(),
|
||||
b.header.height
|
||||
);
|
||||
Ok(Some(tip))
|
||||
} else {
|
||||
Ok(None)
|
||||
|
@ -331,10 +334,12 @@ fn update_head(b: &Block, ctx: &mut BlockContext) -> Result<Option<Tip>, Error>
|
|||
/// work than the head.
|
||||
fn update_header_head(bh: &BlockHeader, ctx: &mut BlockContext) -> Result<Option<Tip>, Error> {
|
||||
// if we made a fork with more work than the head (which should also be true
|
||||
// when extending the head), update it
|
||||
// when extending the head), update it
|
||||
let tip = Tip::from_block(bh);
|
||||
if tip.total_difficulty > ctx.head.total_difficulty {
|
||||
ctx.store.save_header_head(&tip).map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
|
||||
ctx.store
|
||||
.save_header_head(&tip)
|
||||
.map_err(|e| Error::StoreErr(e, "pipe save header head".to_owned()))?;
|
||||
|
||||
ctx.head = tip.clone();
|
||||
info!(
|
||||
|
|
|
@ -23,7 +23,7 @@ use core::core::hash::{Hash, Hashed};
|
|||
use core::core::{Block, BlockHeader, Output};
|
||||
use core::consensus::TargetError;
|
||||
use core::core::target::Difficulty;
|
||||
use grin_store::{self, Error, to_key, u64_to_key, option_to_not_found};
|
||||
use grin_store::{self, option_to_not_found, to_key, Error, u64_to_key};
|
||||
|
||||
const STORE_SUBPATH: &'static str = "chain";
|
||||
|
||||
|
@ -85,9 +85,10 @@ impl ChainStore for ChainKVStore {
|
|||
}
|
||||
|
||||
fn get_block_header(&self, h: &Hash) -> Result<BlockHeader, Error> {
|
||||
option_to_not_found(self.db.get_ser(
|
||||
&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec()),
|
||||
))
|
||||
option_to_not_found(
|
||||
self.db
|
||||
.get_ser(&to_key(BLOCK_HEADER_PREFIX, &mut h.to_vec())),
|
||||
)
|
||||
}
|
||||
|
||||
fn check_block_exists(&self, h: &Hash) -> Result<bool, Error> {
|
||||
|
@ -111,16 +112,14 @@ impl ChainStore for ChainKVStore {
|
|||
&to_key(
|
||||
OUTPUT_COMMIT_PREFIX,
|
||||
&mut out.commitment().as_ref().to_vec(),
|
||||
)
|
||||
[..],
|
||||
)[..],
|
||||
out,
|
||||
)?
|
||||
.put_ser(
|
||||
&to_key(
|
||||
HEADER_BY_OUTPUT_PREFIX,
|
||||
&mut out.commitment().as_ref().to_vec(),
|
||||
)
|
||||
[..],
|
||||
)[..],
|
||||
&b.hash(),
|
||||
)?;
|
||||
}
|
||||
|
@ -128,18 +127,18 @@ impl ChainStore for ChainKVStore {
|
|||
}
|
||||
|
||||
// lookup the block header hash by output commitment
|
||||
// lookup the block header based on this hash
|
||||
// to check the chain is correct compare this block header to
|
||||
// the block header currently indexed at the relevant block height (tbd if
|
||||
// actually necessary)
|
||||
//
|
||||
// NOTE: This index is not exhaustive.
|
||||
// This node may not have seen this full block, so may not have populated the
|
||||
// index.
|
||||
// Block headers older than some threshold (2 months?) will not necessarily be
|
||||
// included
|
||||
// in this index.
|
||||
//
|
||||
// lookup the block header based on this hash
|
||||
// to check the chain is correct compare this block header to
|
||||
// the block header currently indexed at the relevant block height (tbd if
|
||||
// actually necessary)
|
||||
//
|
||||
// NOTE: This index is not exhaustive.
|
||||
// This node may not have seen this full block, so may not have populated the
|
||||
// index.
|
||||
// Block headers older than some threshold (2 months?) will not necessarily be
|
||||
// included
|
||||
// in this index.
|
||||
//
|
||||
fn get_block_header_by_output_commit(&self, commit: &Commitment) -> Result<BlockHeader, Error> {
|
||||
let block_hash = self.db.get_ser(&to_key(
|
||||
HEADER_BY_OUTPUT_PREFIX,
|
||||
|
@ -172,10 +171,10 @@ impl ChainStore for ChainKVStore {
|
|||
}
|
||||
|
||||
fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(
|
||||
OUTPUT_COMMIT_PREFIX,
|
||||
&mut commit.as_ref().to_vec(),
|
||||
)))
|
||||
option_to_not_found(
|
||||
self.db
|
||||
.get_ser(&to_key(OUTPUT_COMMIT_PREFIX, &mut commit.as_ref().to_vec())),
|
||||
)
|
||||
}
|
||||
|
||||
fn save_output_pos(&self, commit: &Commitment, pos: u64) -> Result<(), Error> {
|
||||
|
@ -186,10 +185,10 @@ impl ChainStore for ChainKVStore {
|
|||
}
|
||||
|
||||
fn get_output_pos(&self, commit: &Commitment) -> Result<u64, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(
|
||||
COMMIT_POS_PREFIX,
|
||||
&mut commit.as_ref().to_vec(),
|
||||
)))
|
||||
option_to_not_found(
|
||||
self.db
|
||||
.get_ser(&to_key(COMMIT_POS_PREFIX, &mut commit.as_ref().to_vec())),
|
||||
)
|
||||
}
|
||||
|
||||
fn save_kernel_pos(&self, excess: &Commitment, pos: u64) -> Result<(), Error> {
|
||||
|
@ -200,10 +199,10 @@ impl ChainStore for ChainKVStore {
|
|||
}
|
||||
|
||||
fn get_kernel_pos(&self, excess: &Commitment) -> Result<u64, Error> {
|
||||
option_to_not_found(self.db.get_ser(&to_key(
|
||||
KERNEL_POS_PREFIX,
|
||||
&mut excess.as_ref().to_vec(),
|
||||
)))
|
||||
option_to_not_found(
|
||||
self.db
|
||||
.get_ser(&to_key(KERNEL_POS_PREFIX, &mut excess.as_ref().to_vec())),
|
||||
)
|
||||
}
|
||||
|
||||
/// Maintain consistency of the "header_by_height" index by traversing back
|
||||
|
@ -213,10 +212,8 @@ impl ChainStore for ChainKVStore {
|
|||
/// that is consistent with its height (everything prior to this will be
|
||||
/// consistent)
|
||||
fn setup_height(&self, bh: &BlockHeader) -> Result<(), Error> {
|
||||
self.db.put_ser(
|
||||
&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height),
|
||||
bh,
|
||||
)?;
|
||||
self.db
|
||||
.put_ser(&u64_to_key(HEADER_HEIGHT_PREFIX, bh.height), bh)?;
|
||||
if bh.height == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ use util::secp;
|
|||
use util::secp::pedersen::{RangeProof, Commitment};
|
||||
|
||||
use core::core::{Block, Output, SumCommit, TxKernel};
|
||||
use core::core::pmmr::{Summable, NoSum, PMMR, HashSum, Backend};
|
||||
use core::core::pmmr::{Backend, HashSum, NoSum, Summable, PMMR};
|
||||
use grin_store;
|
||||
use grin_store::sumtree::PMMRBackend;
|
||||
use types::ChainStore;
|
||||
|
@ -121,7 +121,11 @@ impl SumTrees {
|
|||
/// Get sum tree roots
|
||||
pub fn roots(
|
||||
&mut self,
|
||||
) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
|
||||
) -> (
|
||||
HashSum<SumCommit>,
|
||||
HashSum<NoSum<RangeProof>>,
|
||||
HashSum<NoSum<TxKernel>>,
|
||||
) {
|
||||
let output_pmmr = PMMR::at(&mut self.output_pmmr_h.backend, self.output_pmmr_h.last_pos);
|
||||
let rproof_pmmr = PMMR::at(&mut self.rproof_pmmr_h.backend, self.rproof_pmmr_h.last_pos);
|
||||
let kernel_pmmr = PMMR::at(&mut self.kernel_pmmr_h.backend, self.kernel_pmmr_h.last_pos);
|
||||
|
@ -140,7 +144,6 @@ pub fn extending<'a, F, T>(trees: &'a mut SumTrees, inner: F) -> Result<T, Error
|
|||
where
|
||||
F: FnOnce(&mut Extension) -> Result<T, Error>,
|
||||
{
|
||||
|
||||
let sizes: (u64, u64, u64);
|
||||
let res: Result<T, Error>;
|
||||
let rollback: bool;
|
||||
|
@ -229,7 +232,7 @@ impl<'a> Extension<'a> {
|
|||
let secp = secp::Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
|
||||
// doing inputs first guarantees an input can't spend an output in the
|
||||
// same block, enforcing block cut-through
|
||||
// same block, enforcing block cut-through
|
||||
for input in &b.inputs {
|
||||
let pos_res = self.commit_index.get_output_pos(&input.commitment());
|
||||
if let Ok(pos) = pos_res {
|
||||
|
@ -319,7 +322,11 @@ impl<'a> Extension<'a> {
|
|||
/// and kernel sum trees.
|
||||
pub fn roots(
|
||||
&self,
|
||||
) -> (HashSum<SumCommit>, HashSum<NoSum<RangeProof>>, HashSum<NoSum<TxKernel>>) {
|
||||
) -> (
|
||||
HashSum<SumCommit>,
|
||||
HashSum<NoSum<RangeProof>>,
|
||||
HashSum<NoSum<TxKernel>>,
|
||||
) {
|
||||
(
|
||||
self.output_pmmr.root(),
|
||||
self.rproof_pmmr.root(),
|
||||
|
|
|
@ -19,7 +19,7 @@ use std::io;
|
|||
use util::secp::pedersen::Commitment;
|
||||
|
||||
use grin_store as store;
|
||||
use core::core::{Block, BlockHeader, block, Output};
|
||||
use core::core::{block, Block, BlockHeader, Output};
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::target::Difficulty;
|
||||
use core::ser;
|
||||
|
@ -209,9 +209,10 @@ pub trait ChainStore: Send + Sync {
|
|||
fn get_output_by_commit(&self, commit: &Commitment) -> Result<Output, store::Error>;
|
||||
|
||||
/// Gets a block_header for the given input commit
|
||||
fn get_block_header_by_output_commit(&self,
|
||||
commit: &Commitment)
|
||||
-> Result<BlockHeader, store::Error>;
|
||||
fn get_block_header_by_output_commit(
|
||||
&self,
|
||||
commit: &Commitment,
|
||||
) -> Result<BlockHeader, store::Error>;
|
||||
|
||||
/// Saves the position of an output, represented by its commitment, in the
|
||||
/// UTXO MMR. Used as an index for spending and pruning.
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate env_logger;
|
||||
extern crate time;
|
||||
extern crate rand;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_pow as pow;
|
||||
extern crate rand;
|
||||
extern crate time;
|
||||
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
@ -34,7 +34,7 @@ use core::global::MiningParameterMode;
|
|||
|
||||
use keychain::Keychain;
|
||||
|
||||
use pow::{types, cuckoo, MiningWorker};
|
||||
use pow::{cuckoo, types, MiningWorker};
|
||||
|
||||
fn clean_output_dir(dir_name: &str) {
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
|
@ -180,7 +180,7 @@ fn mine_losing_fork() {
|
|||
let bfork = prepare_block(&b1head, &chain, 3);
|
||||
|
||||
// add higher difficulty first, prepare its successor, then fork
|
||||
// with lower diff
|
||||
// with lower diff
|
||||
chain.process_block(b2, chain::SKIP_POW).unwrap();
|
||||
assert_eq!(chain.head_header().unwrap().hash(), b2head.hash());
|
||||
let b3 = prepare_block(&b2head, &chain, 5);
|
||||
|
@ -195,13 +195,13 @@ fn mine_losing_fork() {
|
|||
#[test]
|
||||
fn longer_fork() {
|
||||
// to make it easier to compute the sumtree roots in the test, we
|
||||
// prepare 2 chains, the 2nd will be have the forked blocks we can
|
||||
// then send back on the 1st
|
||||
// prepare 2 chains, the 2nd will be have the forked blocks we can
|
||||
// then send back on the 1st
|
||||
let chain = setup(".grin4");
|
||||
let chain_fork = setup(".grin5");
|
||||
|
||||
// add blocks to both chains, 20 on the main one, only the first 5
|
||||
// for the forked chain
|
||||
// for the forked chain
|
||||
let mut prev = chain.head_header().unwrap();
|
||||
for n in 0..10 {
|
||||
let b = prepare_block(&prev, &chain, n + 2);
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
extern crate grin_core as core;
|
||||
extern crate env_logger;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_pow as pow;
|
||||
extern crate env_logger;
|
||||
extern crate time;
|
||||
extern crate rand;
|
||||
extern crate time;
|
||||
|
||||
use std::fs;
|
||||
use std::sync::Arc;
|
||||
|
@ -32,7 +32,7 @@ use core::global::MiningParameterMode;
|
|||
|
||||
use keychain::Keychain;
|
||||
|
||||
use pow::{types, cuckoo, MiningWorker};
|
||||
use pow::{cuckoo, types, MiningWorker};
|
||||
|
||||
fn clean_output_dir(dir_name: &str) {
|
||||
let _ = fs::remove_dir_all(dir_name);
|
||||
|
@ -91,9 +91,11 @@ fn test_coinbase_maturity() {
|
|||
).unwrap();
|
||||
|
||||
assert_eq!(block.outputs.len(), 1);
|
||||
assert!(block.outputs[0].features.contains(
|
||||
transaction::COINBASE_OUTPUT,
|
||||
));
|
||||
assert!(
|
||||
block.outputs[0]
|
||||
.features
|
||||
.contains(transaction::COINBASE_OUTPUT,)
|
||||
);
|
||||
|
||||
chain.process_block(block, chain::EASY_POW).unwrap();
|
||||
|
||||
|
@ -109,7 +111,8 @@ fn test_coinbase_maturity() {
|
|||
&keychain,
|
||||
).unwrap();
|
||||
|
||||
let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], &keychain, &key_id3).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, vec![&coinbase_txn], &keychain, &key_id3).unwrap();
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
let difficulty = consensus::next_difficulty(chain.difficulty_iter()).unwrap();
|
||||
|
@ -130,7 +133,7 @@ fn test_coinbase_maturity() {
|
|||
};
|
||||
|
||||
// mine enough blocks to increase the height sufficiently for
|
||||
// coinbase to reach maturity and be spendable in the next block
|
||||
// coinbase to reach maturity and be spendable in the next block
|
||||
for _ in 0..3 {
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
|
@ -156,7 +159,8 @@ fn test_coinbase_maturity() {
|
|||
|
||||
let prev = chain.head_header().unwrap();
|
||||
|
||||
let mut block = core::core::Block::new(&prev, vec![&coinbase_txn], &keychain, &key_id4).unwrap();
|
||||
let mut block =
|
||||
core::core::Block::new(&prev, vec![&coinbase_txn], &keychain, &key_id4).unwrap();
|
||||
|
||||
block.header.timestamp = prev.timestamp + time::Duration::seconds(60);
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ use toml;
|
|||
use grin::ServerConfig;
|
||||
use pow::types::MinerConfig;
|
||||
use util::LoggingConfig;
|
||||
use types::{ConfigMembers, GlobalConfig, ConfigError};
|
||||
use types::{ConfigError, ConfigMembers, GlobalConfig};
|
||||
|
||||
/// The default file name to use when trying to derive
|
||||
/// the config file location
|
||||
|
@ -86,7 +86,6 @@ impl GlobalConfig {
|
|||
|
||||
// Give up
|
||||
Err(ConfigError::FileNotFoundError(String::from("")))
|
||||
|
||||
}
|
||||
|
||||
/// Takes the path to a config file, or if NONE, tries
|
||||
|
@ -98,7 +97,7 @@ impl GlobalConfig {
|
|||
if let Some(fp) = file_path {
|
||||
return_value.config_file_path = Some(PathBuf::from(&fp));
|
||||
} else {
|
||||
let _result=return_value.derive_config_location();
|
||||
let _result = return_value.derive_config_location();
|
||||
}
|
||||
|
||||
// No attempt at a config file, just return defaults
|
||||
|
@ -120,8 +119,8 @@ impl GlobalConfig {
|
|||
}
|
||||
|
||||
// Try to parse the config file if it exists
|
||||
// explode if it does exist but something's wrong
|
||||
// with it
|
||||
// explode if it does exist but something's wrong
|
||||
// with it
|
||||
return_value.read_config()
|
||||
}
|
||||
|
||||
|
@ -134,7 +133,7 @@ impl GlobalConfig {
|
|||
match decoded {
|
||||
Ok(mut gc) => {
|
||||
// Put the struct back together, because the config
|
||||
// file was flattened a bit
|
||||
// file was flattened a bit
|
||||
gc.server.mining_config = gc.mining.clone();
|
||||
self.using_config_file = true;
|
||||
self.members = Some(gc);
|
||||
|
|
|
@ -28,11 +28,11 @@ extern crate toml;
|
|||
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_wallet as wallet;
|
||||
extern crate grin_pow as pow;
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_wallet as wallet;
|
||||
|
||||
pub mod config;
|
||||
pub mod types;
|
||||
|
||||
pub use types::{GlobalConfig, ConfigMembers, ConfigError};
|
||||
pub use types::{ConfigError, ConfigMembers, GlobalConfig};
|
||||
|
|
|
@ -41,14 +41,12 @@ pub enum ConfigError {
|
|||
impl fmt::Display for ConfigError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||
match *self {
|
||||
ConfigError::ParseError(ref file_name, ref message) => {
|
||||
write!(
|
||||
f,
|
||||
"Error parsing configuration file at {} - {}",
|
||||
file_name,
|
||||
message
|
||||
)
|
||||
}
|
||||
ConfigError::ParseError(ref file_name, ref message) => write!(
|
||||
f,
|
||||
"Error parsing configuration file at {} - {}",
|
||||
file_name,
|
||||
message
|
||||
),
|
||||
ConfigError::FileIOError(ref file_name, ref message) => {
|
||||
write!(f, "{} {}", message, file_name)
|
||||
}
|
||||
|
@ -102,7 +100,7 @@ pub struct ConfigMembers {
|
|||
pub mining: Option<MinerConfig>,
|
||||
/// Logging config
|
||||
pub logging: Option<LoggingConfig>,
|
||||
|
||||
|
||||
//removing wallet from here for now,
|
||||
//as its concerns are separate from the server's, really
|
||||
//given it needs to manage keys. It should probably
|
||||
|
|
|
@ -13,15 +13,15 @@
|
|||
|
||||
#![feature(test)]
|
||||
|
||||
extern crate test;
|
||||
extern crate rand;
|
||||
extern crate grin_core as core;
|
||||
extern crate rand;
|
||||
extern crate test;
|
||||
|
||||
use rand::Rng;
|
||||
use test::Bencher;
|
||||
|
||||
use core::core::sumtree::{self, SumTree, Summable};
|
||||
use core::ser::{Writeable, Writer, Error};
|
||||
use core::ser::{Error, Writeable, Writer};
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
struct TestElem([u32; 4]);
|
||||
|
@ -29,9 +29,9 @@ impl Summable for TestElem {
|
|||
type Sum = u64;
|
||||
fn sum(&self) -> u64 {
|
||||
// sums are not allowed to overflow, so we use this simple
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
|
||||
self.0[3] as u64
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
|
||||
+ self.0[3] as u64
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -51,7 +51,7 @@ fn bench_small_tree(b: &mut Bencher) {
|
|||
let mut big_tree = SumTree::new();
|
||||
for i in 0..1000 {
|
||||
// To avoid RNG overflow we generate random elements that are small.
|
||||
// Though to avoid repeat elements they have to be reasonably big.
|
||||
// Though to avoid repeat elements they have to be reasonably big.
|
||||
let new_elem;
|
||||
let word1 = rng.gen::<u16>() as u32;
|
||||
let word2 = rng.gen::<u16>() as u32;
|
||||
|
|
|
@ -28,7 +28,7 @@ use core::target::Difficulty;
|
|||
pub const GRIN_BASE: u64 = 1_000_000_000;
|
||||
|
||||
/// The block subsidy amount
|
||||
pub const REWARD: u64 = 50*GRIN_BASE;
|
||||
pub const REWARD: u64 = 50 * GRIN_BASE;
|
||||
|
||||
/// Actual block reward for a given total fee amount
|
||||
pub fn reward(fee: u64) -> u64 {
|
||||
|
@ -80,8 +80,8 @@ pub const MAX_BLOCK_WEIGHT: usize = 80_000;
|
|||
|
||||
/// Whether a block exceeds the maximum acceptable weight
|
||||
pub fn exceeds_weight(input_len: usize, output_len: usize, kernel_len: usize) -> bool {
|
||||
input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT +
|
||||
kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT
|
||||
input_len * BLOCK_INPUT_WEIGHT + output_len * BLOCK_OUTPUT_WEIGHT
|
||||
+ kernel_len * BLOCK_KERNEL_WEIGHT > MAX_BLOCK_WEIGHT
|
||||
}
|
||||
|
||||
/// Fork every 250,000 blocks for first 2 years, simple number and just a
|
||||
|
@ -150,9 +150,8 @@ pub fn next_difficulty<T>(cursor: T) -> Result<Difficulty, TargetError>
|
|||
where
|
||||
T: IntoIterator<Item = Result<(u64, Difficulty), TargetError>>,
|
||||
{
|
||||
|
||||
// Block times at the begining and end of the adjustment window, used to
|
||||
// calculate medians later.
|
||||
// calculate medians later.
|
||||
let mut window_begin = vec![];
|
||||
let mut window_end = vec![];
|
||||
|
||||
|
@ -165,8 +164,8 @@ where
|
|||
let (ts, diff) = head_info?;
|
||||
|
||||
// Sum each element in the adjustment window. In addition, retain
|
||||
// timestamps within median windows (at ]start;start-11] and ]end;end-11]
|
||||
// to later calculate medians.
|
||||
// timestamps within median windows (at ]start;start-11] and ]end;end-11]
|
||||
// to later calculate medians.
|
||||
if m < DIFFICULTY_ADJUST_WINDOW {
|
||||
diff_sum = diff_sum + diff;
|
||||
|
||||
|
@ -204,9 +203,7 @@ where
|
|||
ts_damp
|
||||
};
|
||||
|
||||
Ok(
|
||||
diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / Difficulty::from_num(adj_ts),
|
||||
)
|
||||
Ok(diff_avg * Difficulty::from_num(BLOCK_TIME_WINDOW) / Difficulty::from_num(adj_ts))
|
||||
}
|
||||
|
||||
/// Consensus rule that collections of items are sorted lexicographically over the wire.
|
||||
|
@ -225,7 +222,7 @@ mod test {
|
|||
use super::*;
|
||||
|
||||
// Builds an iterator for next difficulty calculation with the provided
|
||||
// constant time interval, difficulty and total length.
|
||||
// constant time interval, difficulty and total length.
|
||||
fn repeat(interval: u64, diff: u64, len: u64) -> Vec<Result<(u64, Difficulty), TargetError>> {
|
||||
// watch overflow here, length shouldn't be ridiculous anyhow
|
||||
assert!(len < std::usize::MAX as u64);
|
||||
|
@ -336,15 +333,15 @@ mod test {
|
|||
}
|
||||
|
||||
// #[test]
|
||||
// fn hard_fork_2() {
|
||||
// assert!(valid_header_version(0, 1));
|
||||
// assert!(valid_header_version(10, 1));
|
||||
// assert!(valid_header_version(10, 2));
|
||||
// assert!(valid_header_version(250_000, 1));
|
||||
// assert!(!valid_header_version(250_001, 1));
|
||||
// assert!(!valid_header_version(500_000, 1));
|
||||
// assert!(valid_header_version(250_001, 2));
|
||||
// assert!(valid_header_version(500_000, 2));
|
||||
// assert!(!valid_header_version(500_001, 2));
|
||||
// }
|
||||
// fn hard_fork_2() {
|
||||
// assert!(valid_header_version(0, 1));
|
||||
// assert!(valid_header_version(10, 1));
|
||||
// assert!(valid_header_version(10, 2));
|
||||
// assert!(valid_header_version(250_000, 1));
|
||||
// assert!(!valid_header_version(250_001, 1));
|
||||
// assert!(!valid_header_version(500_000, 1));
|
||||
// assert!(valid_header_version(250_001, 2));
|
||||
// assert!(valid_header_version(500_000, 2));
|
||||
// assert!(!valid_header_version(500_001, 2));
|
||||
// }
|
||||
}
|
||||
|
|
|
@ -20,12 +20,12 @@ use util::secp::{self, Secp256k1};
|
|||
use std::collections::HashSet;
|
||||
|
||||
use core::Committed;
|
||||
use core::{Input, Output, SwitchCommitHash, Proof, TxKernel, Transaction, COINBASE_KERNEL,
|
||||
use core::{Input, Output, Proof, SwitchCommitHash, Transaction, TxKernel, COINBASE_KERNEL,
|
||||
COINBASE_OUTPUT};
|
||||
use consensus::{MINIMUM_DIFFICULTY, REWARD, reward, exceeds_weight};
|
||||
use consensus::{exceeds_weight, reward, MINIMUM_DIFFICULTY, REWARD};
|
||||
use core::hash::{Hash, Hashed, ZERO_HASH};
|
||||
use core::target::Difficulty;
|
||||
use ser::{self, Readable, Reader, Writeable, Writer, WriteableSorted, read_and_verify_sorted};
|
||||
use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
|
||||
use util::LOGGER;
|
||||
use global;
|
||||
use keychain;
|
||||
|
@ -282,9 +282,8 @@ impl Block {
|
|||
reward_out: Output,
|
||||
reward_kern: TxKernel,
|
||||
) -> Result<Block, secp::Error> {
|
||||
|
||||
// note: the following reads easily but may not be the most efficient due to
|
||||
// repeated iterations, revisit if a problem
|
||||
// repeated iterations, revisit if a problem
|
||||
let secp = Secp256k1::with_caps(secp::ContextFlag::Commit);
|
||||
|
||||
// validate each transaction and gather their kernels
|
||||
|
@ -292,8 +291,8 @@ impl Block {
|
|||
kernels.push(reward_kern);
|
||||
|
||||
// build vectors with all inputs and all outputs, ordering them by hash
|
||||
// needs to be a fold so we don't end up with a vector of vectors and we
|
||||
// want to fully own the refs (not just a pointer like flat_map).
|
||||
// needs to be a fold so we don't end up with a vector of vectors and we
|
||||
// want to fully own the refs (not just a pointer like flat_map).
|
||||
let inputs = txs.iter().fold(vec![], |mut acc, ref tx| {
|
||||
let mut inputs = tx.inputs.clone();
|
||||
acc.append(&mut inputs);
|
||||
|
@ -317,8 +316,8 @@ impl Block {
|
|||
..time::now_utc()
|
||||
},
|
||||
previous: prev.hash(),
|
||||
total_difficulty: prev.pow.clone().to_difficulty() +
|
||||
prev.total_difficulty.clone(),
|
||||
total_difficulty: prev.pow.clone().to_difficulty()
|
||||
+ prev.total_difficulty.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
inputs: inputs,
|
||||
|
@ -439,7 +438,9 @@ impl Block {
|
|||
}
|
||||
|
||||
if k.lock_height > self.header.height {
|
||||
return Err(Error::KernelLockHeight { lock_height: k.lock_height });
|
||||
return Err(Error::KernelLockHeight {
|
||||
lock_height: k.lock_height,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -465,18 +466,17 @@ impl Block {
|
|||
}
|
||||
|
||||
// Validate the coinbase outputs generated by miners. Entails 2 main checks:
|
||||
//
|
||||
// * That the sum of all coinbase-marked outputs equal the supply.
|
||||
// * That the sum of blinding factors for all coinbase-marked outputs match
|
||||
// the coinbase-marked kernels.
|
||||
//
|
||||
// * That the sum of all coinbase-marked outputs equal the supply.
|
||||
// * That the sum of blinding factors for all coinbase-marked outputs match
|
||||
// the coinbase-marked kernels.
|
||||
fn verify_coinbase(&self, secp: &Secp256k1) -> Result<(), Error> {
|
||||
let cb_outs = filter_map_vec!(self.outputs, |out| if out.features.contains(
|
||||
COINBASE_OUTPUT,
|
||||
)
|
||||
{
|
||||
Some(out.commitment())
|
||||
} else {
|
||||
None
|
||||
let cb_outs = filter_map_vec!(self.outputs, |out| {
|
||||
if out.features.contains(COINBASE_OUTPUT) {
|
||||
Some(out.commitment())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
let cb_kerns = filter_map_vec!(self.kernels, |k| if k.features.contains(COINBASE_KERNEL) {
|
||||
Some(k.excess)
|
||||
|
@ -557,14 +557,14 @@ mod test {
|
|||
use util::secp;
|
||||
|
||||
// utility to create a block without worrying about the key or previous
|
||||
// header
|
||||
// header
|
||||
fn new_block(txs: Vec<&Transaction>, keychain: &Keychain) -> Block {
|
||||
let key_id = keychain.derive_key_id(1).unwrap();
|
||||
Block::new(&BlockHeader::default(), txs, keychain, &key_id).unwrap()
|
||||
}
|
||||
|
||||
// utility producing a transaction that spends an output with the provided
|
||||
// value and blinding key
|
||||
// value and blinding key
|
||||
fn txspend1i1o(
|
||||
v: u64,
|
||||
keychain: &Keychain,
|
||||
|
@ -624,7 +624,7 @@ mod test {
|
|||
let b = new_block(vec![&mut btx1, &mut btx2, &mut btx3], &keychain);
|
||||
|
||||
// block should have been automatically compacted (including reward
|
||||
// output) and should still be valid
|
||||
// output) and should still be valid
|
||||
b.validate(&keychain.secp()).unwrap();
|
||||
assert_eq!(b.inputs.len(), 3);
|
||||
assert_eq!(b.outputs.len(), 3);
|
||||
|
@ -632,7 +632,7 @@ mod test {
|
|||
|
||||
#[test]
|
||||
// builds 2 different blocks with a tx spending another and check if merging
|
||||
// occurs
|
||||
// occurs
|
||||
fn mergeable_blocks() {
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let key_id1 = keychain.derive_key_id(1).unwrap();
|
||||
|
@ -685,14 +685,14 @@ mod test {
|
|||
assert_eq!(coinbase_kernels.len(), 1);
|
||||
|
||||
// the block should be valid here (single coinbase output with corresponding
|
||||
// txn kernel)
|
||||
// txn kernel)
|
||||
assert_eq!(b.validate(&keychain.secp()), Ok(()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
// test that flipping the COINBASE_OUTPUT flag on the output features
|
||||
// invalidates the block and specifically it causes verify_coinbase to fail
|
||||
// additionally verifying the merkle_inputs_outputs also fails
|
||||
// invalidates the block and specifically it causes verify_coinbase to fail
|
||||
// additionally verifying the merkle_inputs_outputs also fails
|
||||
fn remove_coinbase_output_flag() {
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let mut b = new_block(vec![], &keychain);
|
||||
|
@ -714,7 +714,7 @@ mod test {
|
|||
|
||||
#[test]
|
||||
// test that flipping the COINBASE_KERNEL flag on the kernel features
|
||||
// invalidates the block and specifically it causes verify_coinbase to fail
|
||||
// invalidates the block and specifically it causes verify_coinbase to fail
|
||||
fn remove_coinbase_kernel_flag() {
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let mut b = new_block(vec![], &keychain);
|
||||
|
|
|
@ -27,11 +27,11 @@
|
|||
|
||||
use util::secp;
|
||||
|
||||
use core::{Transaction, Input, Output, SwitchCommitHash, DEFAULT_OUTPUT};
|
||||
use core::{Input, Output, SwitchCommitHash, Transaction, DEFAULT_OUTPUT};
|
||||
use core::transaction::kernel_sig_msg;
|
||||
use util::LOGGER;
|
||||
use keychain;
|
||||
use keychain::{Keychain, BlindSum, BlindingFactor, Identifier};
|
||||
use keychain::{BlindSum, BlindingFactor, Identifier, Keychain};
|
||||
|
||||
/// Context information available to transaction combinators.
|
||||
pub struct Context<'a> {
|
||||
|
@ -40,7 +40,8 @@ pub struct Context<'a> {
|
|||
|
||||
/// Function type returned by the transaction combinators. Transforms a
|
||||
/// (Transaction, BlindSum) pair into another, provided some context.
|
||||
pub type Append = for<'a> Fn(&'a mut Context, (Transaction, BlindSum)) -> (Transaction, BlindSum);
|
||||
pub type Append = for<'a> Fn(&'a mut Context, (Transaction, BlindSum))
|
||||
-> (Transaction, BlindSum);
|
||||
|
||||
/// Adds an input with the provided value and blinding key to the transaction
|
||||
/// being built.
|
||||
|
@ -132,10 +133,11 @@ pub fn transaction(
|
|||
keychain: &keychain::Keychain,
|
||||
) -> Result<(Transaction, BlindingFactor), keychain::Error> {
|
||||
let mut ctx = Context { keychain };
|
||||
let (mut tx, sum) = elems.iter().fold(
|
||||
(Transaction::empty(), BlindSum::new()),
|
||||
|acc, elem| elem(&mut ctx, acc),
|
||||
);
|
||||
let (mut tx, sum) = elems
|
||||
.iter()
|
||||
.fold((Transaction::empty(), BlindSum::new()), |acc, elem| {
|
||||
elem(&mut ctx, acc)
|
||||
});
|
||||
let blind_sum = ctx.keychain.blind_sum(&sum)?;
|
||||
let msg = secp::Message::from_slice(&kernel_sig_msg(tx.fee, tx.lock_height))?;
|
||||
let sig = ctx.keychain.sign_with_blinding(&msg, &blind_sum)?;
|
||||
|
|
|
@ -24,7 +24,7 @@ use std::convert::AsRef;
|
|||
use blake2::blake2b::Blake2b;
|
||||
|
||||
use consensus::VerifySortOrder;
|
||||
use ser::{self, Reader, Readable, Writer, Writeable, Error, AsFixedBytes};
|
||||
use ser::{self, AsFixedBytes, Error, Readable, Reader, Writeable, Writer};
|
||||
use util::LOGGER;
|
||||
|
||||
/// A hash consisting of all zeroes, used as a sentinel. No known preimage.
|
||||
|
@ -153,7 +153,9 @@ impl HashWriter {
|
|||
|
||||
impl Default for HashWriter {
|
||||
fn default() -> HashWriter {
|
||||
HashWriter { state: Blake2b::new(32) }
|
||||
HashWriter {
|
||||
state: Blake2b::new(32),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -173,19 +175,19 @@ pub trait Hashed {
|
|||
/// Obtain the hash of the object
|
||||
fn hash(&self) -> Hash;
|
||||
/// Hash the object together with another writeable object
|
||||
fn hash_with<T: Writeable>(&self, other:T) -> Hash;
|
||||
fn hash_with<T: Writeable>(&self, other: T) -> Hash;
|
||||
}
|
||||
|
||||
impl<W: ser::Writeable> Hashed for W {
|
||||
fn hash(&self) -> Hash {
|
||||
let mut hasher = HashWriter::default();
|
||||
ser::Writeable::write(self, &mut hasher).unwrap();
|
||||
ser::Writeable::write(self, &mut hasher).unwrap();
|
||||
let mut ret = [0; 32];
|
||||
hasher.finalize(&mut ret);
|
||||
Hash(ret)
|
||||
}
|
||||
|
||||
fn hash_with<T: Writeable>(&self, other:T) -> Hash{
|
||||
fn hash_with<T: Writeable>(&self, other: T) -> Hash {
|
||||
let mut hasher = HashWriter::default();
|
||||
ser::Writeable::write(self, &mut hasher).unwrap();
|
||||
trace!(LOGGER, "Hashing with additional data");
|
||||
|
@ -202,7 +204,8 @@ impl<T: Writeable> VerifySortOrder<T> for Vec<T> {
|
|||
.map(|item| item.hash())
|
||||
.collect::<Vec<_>>()
|
||||
.windows(2)
|
||||
.any(|pair| pair[0] > pair[1]) {
|
||||
.any(|pair| pair[0] > pair[1])
|
||||
{
|
||||
true => Err(ser::Error::BadlySorted),
|
||||
false => Ok(()),
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ use util::secp::pedersen::*;
|
|||
|
||||
pub use self::block::*;
|
||||
pub use self::transaction::*;
|
||||
use self::hash::{Hashed};
|
||||
use ser::{Writeable, Writer, Reader, Readable, Error};
|
||||
use self::hash::Hashed;
|
||||
use ser::{Error, Readable, Reader, Writeable, Writer};
|
||||
use global;
|
||||
// use keychain;
|
||||
|
||||
|
@ -53,7 +53,7 @@ pub trait Committed {
|
|||
let mut output_commits = map_vec!(self.outputs_committed(), |out| out.commitment());
|
||||
|
||||
// add the overage as output commitment if positive, as an input commitment if
|
||||
// negative
|
||||
// negative
|
||||
let overage = self.overage();
|
||||
if overage != 0 {
|
||||
let over_commit = secp.commit_value(overage.abs() as u64).unwrap();
|
||||
|
@ -186,11 +186,11 @@ impl Writeable for Proof {
|
|||
mod test {
|
||||
use super::*;
|
||||
use core::hash::ZERO_HASH;
|
||||
use core::build::{input, output, with_fee, initial_tx, with_excess, with_lock_height};
|
||||
use core::build::{initial_tx, input, output, with_excess, with_fee, with_lock_height};
|
||||
use core::block::Error::KernelLockHeight;
|
||||
use ser;
|
||||
use keychain;
|
||||
use keychain::{Keychain, BlindingFactor};
|
||||
use keychain::{BlindingFactor, Keychain};
|
||||
|
||||
#[test]
|
||||
#[should_panic(expected = "InvalidSecretKey")]
|
||||
|
@ -308,11 +308,11 @@ mod test {
|
|||
|
||||
{
|
||||
// Alice gets 2 of her pre-existing outputs to send 5 coins to Bob, they
|
||||
// become inputs in the new transaction
|
||||
// become inputs in the new transaction
|
||||
let (in1, in2) = (input(4, key_id1), input(3, key_id2));
|
||||
|
||||
// Alice builds her transaction, with change, which also produces the sum
|
||||
// of blinding factors before they're obscured.
|
||||
// of blinding factors before they're obscured.
|
||||
let (tx, sum) =
|
||||
build::transaction(vec![in1, in2, output(1, key_id3), with_fee(2)], &keychain)
|
||||
.unwrap();
|
||||
|
@ -321,8 +321,8 @@ mod test {
|
|||
}
|
||||
|
||||
// From now on, Bob only has the obscured transaction and the sum of
|
||||
// blinding factors. He adds his output, finalizes the transaction so it's
|
||||
// ready for broadcast.
|
||||
// blinding factors. He adds his output, finalizes the transaction so it's
|
||||
// ready for broadcast.
|
||||
let (tx_final, _) = build::transaction(
|
||||
vec![
|
||||
initial_tx(tx_alice),
|
||||
|
@ -382,7 +382,7 @@ mod test {
|
|||
let key_id3 = keychain.derive_key_id(3).unwrap();
|
||||
|
||||
// first check we can add a timelocked tx where lock height matches current block height
|
||||
// and that the resulting block is valid
|
||||
// and that the resulting block is valid
|
||||
let tx1 = build::transaction(
|
||||
vec![
|
||||
input(5, key_id1.clone()),
|
||||
|
@ -421,7 +421,9 @@ mod test {
|
|||
&key_id3.clone(),
|
||||
).unwrap();
|
||||
match b.validate(keychain.secp()) {
|
||||
Err(KernelLockHeight { lock_height: height }) => {
|
||||
Err(KernelLockHeight {
|
||||
lock_height: height,
|
||||
}) => {
|
||||
assert_eq!(height, 2);
|
||||
}
|
||||
_ => panic!("expecting KernelLockHeight error here"),
|
||||
|
|
|
@ -119,7 +119,7 @@ where
|
|||
T: Summable + Hashed,
|
||||
{
|
||||
/// Create a hash sum from a summable
|
||||
pub fn from_summable<W: Writeable>(idx: u64, elmt: &T, hash_with:Option<W>) -> HashSum<T> {
|
||||
pub fn from_summable<W: Writeable>(idx: u64, elmt: &T, hash_with: Option<W>) -> HashSum<T> {
|
||||
let hash = match hash_with {
|
||||
Some(h) => elmt.hash_with(h),
|
||||
None => elmt.hash(),
|
||||
|
@ -259,7 +259,7 @@ where
|
|||
|
||||
/// Push a new Summable element in the MMR. Computes new related peaks at
|
||||
/// the same time if applicable.
|
||||
pub fn push<W: Writeable>(&mut self, elmt: T, hash_with:Option<W>) -> Result<u64, String> {
|
||||
pub fn push<W: Writeable>(&mut self, elmt: T, hash_with: Option<W>) -> Result<u64, String> {
|
||||
let elmt_pos = self.last_pos + 1;
|
||||
let mut current_hashsum = HashSum::from_summable(elmt_pos, &elmt, hash_with);
|
||||
let mut to_append = vec![current_hashsum.clone()];
|
||||
|
@ -267,14 +267,14 @@ where
|
|||
let mut pos = elmt_pos;
|
||||
|
||||
// we look ahead one position in the MMR, if the expected node has a higher
|
||||
// height it means we have to build a higher peak by summing with a previous
|
||||
// sibling. we do it iteratively in case the new peak itself allows the
|
||||
// creation of another parent.
|
||||
// height it means we have to build a higher peak by summing with a previous
|
||||
// sibling. we do it iteratively in case the new peak itself allows the
|
||||
// creation of another parent.
|
||||
while bintree_postorder_height(pos + 1) > height {
|
||||
let left_sibling = bintree_jump_left_sibling(pos);
|
||||
let left_hashsum = self.backend.get(left_sibling).expect(
|
||||
"missing left sibling in tree, should not have been pruned",
|
||||
);
|
||||
let left_hashsum = self.backend
|
||||
.get(left_sibling)
|
||||
.expect("missing left sibling in tree, should not have been pruned");
|
||||
current_hashsum = left_hashsum + current_hashsum;
|
||||
|
||||
to_append.push(current_hashsum.clone());
|
||||
|
@ -293,8 +293,8 @@ where
|
|||
/// well as the consumer-provided index of when the change occurred.
|
||||
pub fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||
// identify which actual position we should rewind to as the provided
|
||||
// position is a leaf, which may had some parent that needs to exist
|
||||
// afterward for the MMR to be valid
|
||||
// position is a leaf, which may had some parent that needs to exist
|
||||
// afterward for the MMR to be valid
|
||||
let mut pos = position;
|
||||
while bintree_postorder_height(pos + 1) > 0 {
|
||||
pos += 1;
|
||||
|
@ -320,7 +320,7 @@ where
|
|||
}
|
||||
|
||||
// loop going up the tree, from node to parent, as long as we stay inside
|
||||
// the tree.
|
||||
// the tree.
|
||||
let mut to_prune = vec![];
|
||||
let mut current = position;
|
||||
while current + 1 < self.last_pos {
|
||||
|
@ -332,7 +332,7 @@ where
|
|||
to_prune.push(current);
|
||||
|
||||
// if we have a pruned sibling, we can continue up the tree
|
||||
// otherwise we're done
|
||||
// otherwise we're done
|
||||
if let None = self.backend.get(sibling) {
|
||||
current = parent;
|
||||
} else {
|
||||
|
@ -353,30 +353,30 @@ where
|
|||
/// Helper function to get the last N nodes inserted, i.e. the last
|
||||
/// n nodes along the bottom of the tree
|
||||
pub fn get_last_n_insertions(&self, n: u64) -> Vec<HashSum<T>> {
|
||||
let mut return_vec=Vec::new();
|
||||
let mut return_vec = Vec::new();
|
||||
let mut last_leaf = self.last_pos;
|
||||
let size=self.unpruned_size();
|
||||
//Special case that causes issues in bintree functions,
|
||||
//just return
|
||||
if size==1 {
|
||||
let size = self.unpruned_size();
|
||||
// Special case that causes issues in bintree functions,
|
||||
// just return
|
||||
if size == 1 {
|
||||
return_vec.push(self.backend.get(last_leaf).unwrap());
|
||||
return return_vec;
|
||||
}
|
||||
//if size is even, we're already at the bottom, otherwise
|
||||
//we need to traverse down to it (reverse post-order direction)
|
||||
// if size is even, we're already at the bottom, otherwise
|
||||
// we need to traverse down to it (reverse post-order direction)
|
||||
if size % 2 == 1 {
|
||||
last_leaf=bintree_rightmost(self.last_pos);
|
||||
last_leaf = bintree_rightmost(self.last_pos);
|
||||
}
|
||||
for _ in 0..n as u64 {
|
||||
if last_leaf==0 {
|
||||
if last_leaf == 0 {
|
||||
break;
|
||||
}
|
||||
if bintree_postorder_height(last_leaf) > 0 {
|
||||
last_leaf = bintree_rightmost(last_leaf);
|
||||
}
|
||||
return_vec.push(self.backend.get(last_leaf).unwrap());
|
||||
|
||||
last_leaf=bintree_jump_left_sibling(last_leaf);
|
||||
|
||||
last_leaf = bintree_jump_left_sibling(last_leaf);
|
||||
}
|
||||
return_vec
|
||||
}
|
||||
|
@ -388,30 +388,30 @@ where
|
|||
}
|
||||
|
||||
/// Debugging utility to print information about the MMRs. Short version
|
||||
/// only prints the last 8 nodes.
|
||||
/// only prints the last 8 nodes.
|
||||
pub fn dump(&self, short: bool) {
|
||||
let sz = self.unpruned_size();
|
||||
if sz > 600 {
|
||||
return;
|
||||
}
|
||||
let start = if short && sz > 7 { sz/8 - 1 } else { 0 };
|
||||
for n in start..(sz/8+1) {
|
||||
let mut idx = "".to_owned();
|
||||
let mut hashes = "".to_owned();
|
||||
for m in (n*8)..(n+1)*8 {
|
||||
if m >= sz {
|
||||
break;
|
||||
}
|
||||
idx.push_str(&format!("{:>8} ", m + 1));
|
||||
let ohs = self.get(m+1);
|
||||
match ohs {
|
||||
Some(hs) => hashes.push_str(&format!("{} ", hs.hash)),
|
||||
None => hashes.push_str(&format!("{:>8} ", "??")),
|
||||
}
|
||||
}
|
||||
debug!(LOGGER, "{}", idx);
|
||||
debug!(LOGGER, "{}", hashes);
|
||||
}
|
||||
let start = if short && sz > 7 { sz / 8 - 1 } else { 0 };
|
||||
for n in start..(sz / 8 + 1) {
|
||||
let mut idx = "".to_owned();
|
||||
let mut hashes = "".to_owned();
|
||||
for m in (n * 8)..(n + 1) * 8 {
|
||||
if m >= sz {
|
||||
break;
|
||||
}
|
||||
idx.push_str(&format!("{:>8} ", m + 1));
|
||||
let ohs = self.get(m + 1);
|
||||
match ohs {
|
||||
Some(hs) => hashes.push_str(&format!("{} ", hs.hash)),
|
||||
None => hashes.push_str(&format!("{:>8} ", "??")),
|
||||
}
|
||||
}
|
||||
debug!(LOGGER, "{}", idx);
|
||||
debug!(LOGGER, "{}", hashes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -503,19 +503,21 @@ pub struct PruneList {
|
|||
impl PruneList {
|
||||
/// Instantiate a new empty prune list
|
||||
pub fn new() -> PruneList {
|
||||
PruneList { pruned_nodes: vec![] }
|
||||
PruneList {
|
||||
pruned_nodes: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
/// Computes by how many positions a node at pos should be shifted given the
|
||||
/// number of nodes that have already been pruned before it.
|
||||
pub fn get_shift(&self, pos: u64) -> Option<u64> {
|
||||
// get the position where the node at pos would fit in the pruned list, if
|
||||
// it's already pruned, nothing to skip
|
||||
// it's already pruned, nothing to skip
|
||||
match self.pruned_pos(pos) {
|
||||
None => None,
|
||||
Some(idx) => {
|
||||
// skip by the number of elements pruned in the preceding subtrees,
|
||||
// which is the sum of the size of each subtree
|
||||
// which is the sum of the size of each subtree
|
||||
Some(
|
||||
self.pruned_nodes[0..(idx as usize)]
|
||||
.iter()
|
||||
|
@ -557,8 +559,8 @@ impl PruneList {
|
|||
Err(idx) => {
|
||||
if self.pruned_nodes.len() > idx {
|
||||
// the node at pos can't be a child of lower position nodes by MMR
|
||||
// construction but can be a child of the next node, going up parents
|
||||
// from pos to make sure it's not the case
|
||||
// construction but can be a child of the next node, going up parents
|
||||
// from pos to make sure it's not the case
|
||||
let next_peak_pos = self.pruned_nodes[idx];
|
||||
let mut cursor = pos;
|
||||
loop {
|
||||
|
@ -583,15 +585,14 @@ impl PruneList {
|
|||
/// side of the range, and navigates toward lower siblings toward the right
|
||||
/// of the range.
|
||||
fn peaks(num: u64) -> Vec<u64> {
|
||||
|
||||
// detecting an invalid mountain range, when siblings exist but no parent
|
||||
// exists
|
||||
// exists
|
||||
if bintree_postorder_height(num + 1) > bintree_postorder_height(num) {
|
||||
return vec![];
|
||||
}
|
||||
|
||||
// our top peak is always on the leftmost side of the tree and leftmost trees
|
||||
// have for index a binary values with all 1s (i.e. 11, 111, 1111, etc.)
|
||||
// have for index a binary values with all 1s (i.e. 11, 111, 1111, etc.)
|
||||
let mut top = 1;
|
||||
while (top - 1) <= num {
|
||||
top = top << 1;
|
||||
|
@ -604,7 +605,7 @@ fn peaks(num: u64) -> Vec<u64> {
|
|||
let mut peaks = vec![top];
|
||||
|
||||
// going down the range, next peaks are right neighbors of the top. if one
|
||||
// doesn't exist yet, we go down to a smaller peak to the left
|
||||
// doesn't exist yet, we go down to a smaller peak to the left
|
||||
let mut peak = top;
|
||||
'outer: loop {
|
||||
peak = bintree_jump_right_sibling(peak);
|
||||
|
@ -807,8 +808,8 @@ mod test {
|
|||
#[allow(unused_variables)]
|
||||
fn first_50_mmr_heights() {
|
||||
let first_100_str = "0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 \
|
||||
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 5 \
|
||||
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 0 0 1 0 0";
|
||||
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 5 \
|
||||
0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 0 0 1 0 0 1 2 0 0 1 0 0 1 2 3 4 0 0 1 0 0";
|
||||
let first_100 = first_100_str.split(' ').map(|n| n.parse::<u64>().unwrap());
|
||||
let mut count = 1;
|
||||
for n in first_100 {
|
||||
|
@ -844,9 +845,9 @@ mod test {
|
|||
type Sum = u64;
|
||||
fn sum(&self) -> u64 {
|
||||
// sums are not allowed to overflow, so we use this simple
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
|
||||
self.0[3] as u64
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
|
||||
+ self.0[3] as u64
|
||||
}
|
||||
fn sum_len() -> usize {
|
||||
8
|
||||
|
@ -896,7 +897,8 @@ mod test {
|
|||
|
||||
// two elements
|
||||
pmmr.push(elems[1], None::<TestElem>).unwrap();
|
||||
let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>) + HashSum::from_summable(2, &elems[1], None::<TestElem>);
|
||||
let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>)
|
||||
+ HashSum::from_summable(2, &elems[1], None::<TestElem>);
|
||||
assert_eq!(pmmr.root(), sum2);
|
||||
assert_eq!(pmmr.unpruned_size(), 3);
|
||||
|
||||
|
@ -908,8 +910,9 @@ mod test {
|
|||
|
||||
// four elements
|
||||
pmmr.push(elems[3], None::<TestElem>).unwrap();
|
||||
let sum4 = sum2 +
|
||||
(HashSum::from_summable(4, &elems[2], None::<TestElem>) + HashSum::from_summable(5, &elems[3], None::<TestElem>));
|
||||
let sum4 = sum2
|
||||
+ (HashSum::from_summable(4, &elems[2], None::<TestElem>)
|
||||
+ HashSum::from_summable(5, &elems[3], None::<TestElem>));
|
||||
assert_eq!(pmmr.root(), sum4);
|
||||
assert_eq!(pmmr.unpruned_size(), 7);
|
||||
|
||||
|
@ -921,8 +924,9 @@ mod test {
|
|||
|
||||
// six elements
|
||||
pmmr.push(elems[5], None::<TestElem>).unwrap();
|
||||
let sum6 = sum4.clone() +
|
||||
(HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>));
|
||||
let sum6 = sum4.clone()
|
||||
+ (HashSum::from_summable(8, &elems[4], None::<TestElem>)
|
||||
+ HashSum::from_summable(9, &elems[5], None::<TestElem>));
|
||||
assert_eq!(pmmr.root(), sum6.clone());
|
||||
assert_eq!(pmmr.unpruned_size(), 10);
|
||||
|
||||
|
@ -934,9 +938,11 @@ mod test {
|
|||
|
||||
// eight elements
|
||||
pmmr.push(elems[7], None::<TestElem>).unwrap();
|
||||
let sum8 = sum4 +
|
||||
((HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>)) +
|
||||
(HashSum::from_summable(11, &elems[6], None::<TestElem>) + HashSum::from_summable(12, &elems[7], None::<TestElem>)));
|
||||
let sum8 = sum4
|
||||
+ ((HashSum::from_summable(8, &elems[4], None::<TestElem>)
|
||||
+ HashSum::from_summable(9, &elems[5], None::<TestElem>))
|
||||
+ (HashSum::from_summable(11, &elems[6], None::<TestElem>)
|
||||
+ HashSum::from_summable(12, &elems[7], None::<TestElem>)));
|
||||
assert_eq!(pmmr.root(), sum8);
|
||||
assert_eq!(pmmr.unpruned_size(), 15);
|
||||
|
||||
|
@ -949,7 +955,6 @@ mod test {
|
|||
|
||||
#[test]
|
||||
fn pmmr_get_last_n_insertions() {
|
||||
|
||||
let elems = [
|
||||
TestElem([0, 0, 0, 1]),
|
||||
TestElem([0, 0, 0, 2]),
|
||||
|
@ -964,28 +969,31 @@ mod test {
|
|||
let mut ba = VecBackend::new();
|
||||
let mut pmmr = PMMR::new(&mut ba);
|
||||
|
||||
//test when empty
|
||||
let res=pmmr.get_last_n_insertions(19);
|
||||
assert!(res.len()==0);
|
||||
// test when empty
|
||||
let res = pmmr.get_last_n_insertions(19);
|
||||
assert!(res.len() == 0);
|
||||
|
||||
pmmr.push(elems[0], None::<TestElem>).unwrap();
|
||||
let res=pmmr.get_last_n_insertions(19);
|
||||
assert!(res.len()==1 && res[0].sum==1);
|
||||
let res = pmmr.get_last_n_insertions(19);
|
||||
assert!(res.len() == 1 && res[0].sum == 1);
|
||||
|
||||
pmmr.push(elems[1], None::<TestElem>).unwrap();
|
||||
|
||||
let res = pmmr.get_last_n_insertions(12);
|
||||
assert!(res[0].sum==2 && res[1].sum==1);
|
||||
assert!(res[0].sum == 2 && res[1].sum == 1);
|
||||
|
||||
pmmr.push(elems[2], None::<TestElem>).unwrap();
|
||||
|
||||
let res = pmmr.get_last_n_insertions(2);
|
||||
assert!(res[0].sum==3 && res[1].sum==2);
|
||||
assert!(res[0].sum == 3 && res[1].sum == 2);
|
||||
|
||||
pmmr.push(elems[3], None::<TestElem>).unwrap();
|
||||
|
||||
let res = pmmr.get_last_n_insertions(19);
|
||||
assert!(res[0].sum==4 && res[1].sum==3 && res[2].sum==2 && res[3].sum==1 && res.len()==4);
|
||||
assert!(
|
||||
res[0].sum == 4 && res[1].sum == 3 && res[2].sum == 2 && res[3].sum == 1
|
||||
&& res.len() == 4
|
||||
);
|
||||
|
||||
pmmr.push(elems[5], None::<TestElem>).unwrap();
|
||||
pmmr.push(elems[6], None::<TestElem>).unwrap();
|
||||
|
@ -993,8 +1001,10 @@ mod test {
|
|||
pmmr.push(elems[8], None::<TestElem>).unwrap();
|
||||
|
||||
let res = pmmr.get_last_n_insertions(7);
|
||||
assert!(res[0].sum==9 && res[1].sum==8 && res[2].sum==7 && res[3].sum==6 && res.len()==7);
|
||||
|
||||
assert!(
|
||||
res[0].sum == 9 && res[1].sum == 8 && res[2].sum == 7 && res[3].sum == 6
|
||||
&& res.len() == 7
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -20,13 +20,13 @@
|
|||
//! wrapper in case the internal representation needs to change again
|
||||
|
||||
use std::fmt;
|
||||
use std::ops::{Add, Mul, Div, Sub};
|
||||
use std::ops::{Add, Div, Mul, Sub};
|
||||
|
||||
use serde::{Serialize, Serializer, Deserialize, Deserializer, de};
|
||||
use byteorder::{ByteOrder, BigEndian};
|
||||
use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
|
||||
use core::hash::Hash;
|
||||
use ser::{self, Reader, Writer, Writeable, Readable};
|
||||
use ser::{self, Readable, Reader, Writeable, Writer};
|
||||
|
||||
/// The target is the 32-bytes hash block hashes must be lower than.
|
||||
pub const MAX_TARGET: [u8; 8] = [0xf, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff];
|
||||
|
@ -63,7 +63,9 @@ impl Difficulty {
|
|||
let mut in_vec = h.to_vec();
|
||||
in_vec.truncate(8);
|
||||
let num = BigEndian::read_u64(&in_vec);
|
||||
Difficulty { num: max_target / num }
|
||||
Difficulty {
|
||||
num: max_target / num,
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts the difficulty into a u64
|
||||
|
@ -81,28 +83,36 @@ impl fmt::Display for Difficulty {
|
|||
impl Add<Difficulty> for Difficulty {
|
||||
type Output = Difficulty;
|
||||
fn add(self, other: Difficulty) -> Difficulty {
|
||||
Difficulty { num: self.num + other.num }
|
||||
Difficulty {
|
||||
num: self.num + other.num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Sub<Difficulty> for Difficulty {
|
||||
type Output = Difficulty;
|
||||
fn sub(self, other: Difficulty) -> Difficulty {
|
||||
Difficulty { num: self.num - other.num }
|
||||
Difficulty {
|
||||
num: self.num - other.num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Mul<Difficulty> for Difficulty {
|
||||
type Output = Difficulty;
|
||||
fn mul(self, other: Difficulty) -> Difficulty {
|
||||
Difficulty { num: self.num * other.num }
|
||||
Difficulty {
|
||||
num: self.num * other.num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Div<Difficulty> for Difficulty {
|
||||
type Output = Difficulty;
|
||||
fn div(self, other: Difficulty) -> Difficulty {
|
||||
Difficulty { num: self.num / other.num }
|
||||
Difficulty {
|
||||
num: self.num / other.num,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -157,6 +167,8 @@ impl<'de> de::Visitor<'de> for DiffVisitor {
|
|||
&"a value number",
|
||||
));
|
||||
};
|
||||
Ok(Difficulty { num: num_in.unwrap() })
|
||||
Ok(Difficulty {
|
||||
num: num_in.unwrap(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
//! Transactions
|
||||
|
||||
use byteorder::{ByteOrder, BigEndian};
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
use blake2::blake2b::blake2b;
|
||||
use util::secp::{self, Secp256k1, Message, Signature};
|
||||
use util::secp::pedersen::{RangeProof, Commitment};
|
||||
|
@ -23,7 +23,7 @@ use std::ops;
|
|||
use core::Committed;
|
||||
use core::pmmr::Summable;
|
||||
use keychain::{Identifier, Keychain};
|
||||
use ser::{self, Reader, Writer, Readable, Writeable, WriteableSorted, read_and_verify_sorted};
|
||||
use ser::{self, read_and_verify_sorted, Readable, Reader, Writeable, WriteableSorted, Writer};
|
||||
use util::LOGGER;
|
||||
|
||||
/// The size to use for the stored blake2 hash of a switch_commitment
|
||||
|
@ -102,9 +102,8 @@ impl Writeable for TxKernel {
|
|||
|
||||
impl Readable for TxKernel {
|
||||
fn read(reader: &mut Reader) -> Result<TxKernel, ser::Error> {
|
||||
let features = KernelFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
let features =
|
||||
KernelFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||
|
||||
Ok(TxKernel {
|
||||
features: features,
|
||||
|
@ -287,12 +286,12 @@ impl Transaction {
|
|||
let sig = Signature::from_der(secp, &self.excess_sig)?;
|
||||
|
||||
// pretend the sum is a public key (which it is, being of the form r.G) and
|
||||
// verify the transaction sig with it
|
||||
//
|
||||
// we originally converted the commitment to a key_id here (commitment to zero)
|
||||
// and then passed the key_id to secp.verify()
|
||||
// the secp api no longer allows us to do this so we have wrapped the complexity
|
||||
// of generating a public key from a commitment behind verify_from_commit
|
||||
// verify the transaction sig with it
|
||||
//
|
||||
// we originally converted the commitment to a key_id here (commitment to zero)
|
||||
// and then passed the key_id to secp.verify()
|
||||
// the secp api no longer allows us to do this so we have wrapped the complexity
|
||||
// of generating a public key from a commitment behind verify_from_commit
|
||||
secp.verify_from_commit(&msg, &sig, &rsum)?;
|
||||
|
||||
let kernel = TxKernel {
|
||||
|
@ -456,9 +455,8 @@ impl Writeable for Output {
|
|||
/// an Output from a binary stream.
|
||||
impl Readable for Output {
|
||||
fn read(reader: &mut Reader) -> Result<Output, ser::Error> {
|
||||
let features = OutputFeatures::from_bits(reader.read_u8()?).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
let features =
|
||||
OutputFeatures::from_bits(reader.read_u8()?).ok_or(ser::Error::CorruptedData)?;
|
||||
|
||||
Ok(Output {
|
||||
features: features,
|
||||
|
@ -494,13 +492,11 @@ impl Output {
|
|||
/// value from the range proof and the commitment
|
||||
pub fn recover_value(&self, keychain: &Keychain, key_id: &Identifier) -> Option<u64> {
|
||||
match keychain.rewind_range_proof(key_id, self.commit, self.proof) {
|
||||
Ok(proof_info) => {
|
||||
if proof_info.success {
|
||||
Some(proof_info.value)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
Ok(proof_info) => if proof_info.success {
|
||||
Some(proof_info.value)
|
||||
} else {
|
||||
None
|
||||
},
|
||||
Err(_) => None,
|
||||
}
|
||||
}
|
||||
|
@ -554,10 +550,9 @@ impl ops::Add for SumCommit {
|
|||
type Output = SumCommit;
|
||||
|
||||
fn add(self, other: SumCommit) -> SumCommit {
|
||||
let sum = match self.secp.commit_sum(
|
||||
vec![self.commit.clone(), other.commit.clone()],
|
||||
vec![],
|
||||
) {
|
||||
let sum = match self.secp
|
||||
.commit_sum(vec![self.commit.clone(), other.commit.clone()], vec![])
|
||||
{
|
||||
Ok(s) => s,
|
||||
Err(_) => Commitment::from_vec(vec![1; 33]),
|
||||
};
|
||||
|
|
|
@ -61,8 +61,8 @@ pub enum MiningParameterMode {
|
|||
}
|
||||
|
||||
lazy_static!{
|
||||
/// The mining parameter mode
|
||||
pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> =
|
||||
/// The mining parameter mode
|
||||
pub static ref MINING_PARAMETER_MODE: RwLock<MiningParameterMode> =
|
||||
RwLock::new(MiningParameterMode::Production);
|
||||
}
|
||||
|
||||
|
|
|
@ -25,8 +25,10 @@
|
|||
extern crate bitflags;
|
||||
extern crate blake2_rfc as blake2;
|
||||
extern crate byteorder;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_util as util;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate lazy_static;
|
||||
extern crate num_bigint as bigint;
|
||||
extern crate rand;
|
||||
extern crate grin_keychain as keychain;
|
||||
|
@ -34,9 +36,9 @@ extern crate grin_util as util;
|
|||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate time;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
extern crate slog;
|
||||
extern crate time;
|
||||
|
||||
#[macro_use]
|
||||
pub mod macros;
|
||||
|
|
|
@ -19,9 +19,9 @@
|
|||
//! To use it simply implement `Writeable` or `Readable` and then use the
|
||||
//! `serialize` or `deserialize` functions on them as appropriate.
|
||||
|
||||
use std::{error, fmt, cmp};
|
||||
use std::io::{self, Write, Read};
|
||||
use byteorder::{ByteOrder, ReadBytesExt, BigEndian};
|
||||
use std::{cmp, error, fmt};
|
||||
use std::io::{self, Read, Write};
|
||||
use byteorder::{BigEndian, ByteOrder, ReadBytesExt};
|
||||
use keychain::{Identifier, IDENTIFIER_SIZE};
|
||||
use core::hash::Hashed;
|
||||
use consensus::VerifySortOrder;
|
||||
|
@ -199,7 +199,8 @@ pub trait WriteableSorted {
|
|||
/// A consensus rule requires everything is sorted lexicographically to avoid
|
||||
/// leaking any information through specific ordering of items.
|
||||
pub fn read_and_verify_sorted<T>(reader: &mut Reader, count: u64) -> Result<Vec<T>, Error>
|
||||
where T: Readable + Hashed + Writeable
|
||||
where
|
||||
T: Readable + Hashed + Writeable,
|
||||
{
|
||||
let result: Vec<T> = try!((0..count).map(|_| T::read(reader)).collect());
|
||||
result.verify_sort_order()?;
|
||||
|
@ -276,9 +277,10 @@ impl<'a> Reader for BinReader<'a> {
|
|||
return Err(Error::TooLargeReadErr);
|
||||
}
|
||||
let mut buf = vec![0; length];
|
||||
self.source.read_exact(&mut buf).map(move |_| buf).map_err(
|
||||
Error::IOErr,
|
||||
)
|
||||
self.source
|
||||
.read_exact(&mut buf)
|
||||
.map(move |_| buf)
|
||||
.map_err(Error::IOErr)
|
||||
}
|
||||
|
||||
fn expect_u8(&mut self, val: u8) -> Result<u8, Error> {
|
||||
|
@ -532,7 +534,8 @@ impl AsFixedBytes for [u8; 20] {
|
|||
fn len(&self) -> usize {
|
||||
return 20;
|
||||
}
|
||||
}impl AsFixedBytes for [u8; 32] {
|
||||
}
|
||||
impl AsFixedBytes for [u8; 32] {
|
||||
fn len(&self) -> usize {
|
||||
return 32;
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ Before running your mining server, a wallet server needs to be set up and listen
|
|||
|
||||
See [wallet](wallet.md) for more info on the various Grin wallet commands and options.
|
||||
|
||||
This will create a wallet server listening on the default port 13416 with the password "password". Next, in another terminal window in the 'node1' directory, run a full mining node with the following command:
|
||||
This will create a wallet server listening on the default port 13415 with the password "password". Next, in another terminal window in the 'node1' directory, run a full mining node with the following command:
|
||||
|
||||
node1$ grin server -m run
|
||||
|
||||
|
|
|
@ -111,11 +111,11 @@ attempt_time_per_block = 90
|
|||
|
||||
#the wallet reciever to which coinbase rewards will be sent
|
||||
|
||||
wallet_receiver_url = "http://127.0.0.1:13416"
|
||||
wallet_receiver_url = "http://127.0.0.1:13415"
|
||||
|
||||
#whether to ignore the reward (mostly for testing)
|
||||
|
||||
burn_reward = false
|
||||
burn_reward = true
|
||||
|
||||
#testing value, optional
|
||||
#slow_down_in_millis = 30
|
||||
|
|
|
@ -27,6 +27,7 @@ serde_json = "~1.0.2"
|
|||
tokio-core="~0.1.1"
|
||||
tokio-timer="~0.1.0"
|
||||
rand = "^0.3"
|
||||
router = "~0.5.1"
|
||||
itertools = "~0.6.0"
|
||||
|
||||
[dev_dependencies]
|
||||
|
|
|
@ -21,7 +21,7 @@ use core::core::{self, Output};
|
|||
use core::core::block::BlockHeader;
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use core::core::target::Difficulty;
|
||||
use p2p::{self, NetAdapter, Server, PeerStore, PeerData, State};
|
||||
use p2p::{self, NetAdapter, PeerData, PeerStore, Server, State};
|
||||
use pool;
|
||||
use util::secp::pedersen::Commitment;
|
||||
use util::OneTime;
|
||||
|
@ -180,11 +180,8 @@ impl NetAdapter for NetToChainAdapter {
|
|||
/// Find good peers we know with the provided capability and return their
|
||||
/// addresses.
|
||||
fn find_peer_addrs(&self, capab: p2p::Capabilities) -> Vec<SocketAddr> {
|
||||
let peers = self.peer_store.find_peers(
|
||||
State::Healthy,
|
||||
capab,
|
||||
p2p::MAX_PEER_ADDRS as usize,
|
||||
);
|
||||
let peers = self.peer_store
|
||||
.find_peers(State::Healthy, capab, p2p::MAX_PEER_ADDRS as usize);
|
||||
debug!(LOGGER, "Got {} peer addrs to send.", peers.len());
|
||||
map_vec!(peers, |p| p.addr)
|
||||
}
|
||||
|
@ -244,11 +241,11 @@ impl NetToChainAdapter {
|
|||
pub fn start_sync(&self, sync: sync::Syncer) {
|
||||
let arc_sync = Arc::new(sync);
|
||||
self.syncer.init(arc_sync.clone());
|
||||
let _ = thread::Builder::new().name("syncer".to_string()).spawn(
|
||||
move || {
|
||||
let _ = thread::Builder::new()
|
||||
.name("syncer".to_string())
|
||||
.spawn(move || {
|
||||
let _ = arc_sync.run();
|
||||
},
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
pub fn syncing(&self) -> bool {
|
||||
|
@ -325,7 +322,9 @@ impl pool::PoolAdapter for PoolToNetAdapter {
|
|||
impl PoolToNetAdapter {
|
||||
/// Create a new pool to net adapter
|
||||
pub fn new() -> PoolToNetAdapter {
|
||||
PoolToNetAdapter { p2p: OneTime::new() }
|
||||
PoolToNetAdapter {
|
||||
p2p: OneTime::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Setup the p2p server on the adapter
|
||||
|
@ -345,7 +344,9 @@ pub struct PoolToChainAdapter {
|
|||
impl PoolToChainAdapter {
|
||||
/// Create a new pool adapter
|
||||
pub fn new() -> PoolToChainAdapter {
|
||||
PoolToChainAdapter { chain: OneTime::new() }
|
||||
PoolToChainAdapter {
|
||||
chain: OneTime::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_chain(&self, chain_ref: Arc<chain::Chain>) {
|
||||
|
@ -355,13 +356,14 @@ impl PoolToChainAdapter {
|
|||
|
||||
impl pool::BlockChain for PoolToChainAdapter {
|
||||
fn get_unspent(&self, output_ref: &Commitment) -> Result<Output, pool::PoolError> {
|
||||
self.chain.borrow().get_unspent(output_ref).map_err(
|
||||
|e| match e {
|
||||
self.chain
|
||||
.borrow()
|
||||
.get_unspent(output_ref)
|
||||
.map_err(|e| match e {
|
||||
chain::types::Error::OutputNotFound => pool::PoolError::OutputNotFound,
|
||||
chain::types::Error::OutputSpent => pool::PoolError::OutputSpent,
|
||||
_ => pool::PoolError::GenericPoolError,
|
||||
},
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_block_header_by_output_commit(
|
||||
|
@ -375,8 +377,9 @@ impl pool::BlockChain for PoolToChainAdapter {
|
|||
}
|
||||
|
||||
fn head_header(&self) -> Result<BlockHeader, pool::PoolError> {
|
||||
self.chain.borrow().head_header().map_err(|_| {
|
||||
pool::PoolError::GenericPoolError
|
||||
})
|
||||
self.chain
|
||||
.borrow()
|
||||
.head_header()
|
||||
.map_err(|_| pool::PoolError::GenericPoolError)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2016 The Grin Developers
|
||||
// Copyright 2016-2017 The Grin Developers
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -21,32 +21,32 @@
|
|||
#![deny(unused_mut)]
|
||||
#![warn(missing_docs)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate futures;
|
||||
extern crate futures_cpupool as cpupool;
|
||||
extern crate hyper;
|
||||
extern crate itertools;
|
||||
extern crate rand;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate time;
|
||||
extern crate tokio_core;
|
||||
extern crate tokio_timer;
|
||||
extern crate itertools;
|
||||
|
||||
extern crate grin_api as api;
|
||||
extern crate grin_chain as chain;
|
||||
#[macro_use]
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_pool as pool;
|
||||
extern crate grin_pow as pow;
|
||||
extern crate grin_store as store;
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_wallet as wallet;
|
||||
extern crate grin_pow as pow;
|
||||
|
||||
mod adapters;
|
||||
mod server;
|
||||
|
@ -56,4 +56,4 @@ mod types;
|
|||
mod miner;
|
||||
|
||||
pub use server::Server;
|
||||
pub use types::{ServerConfig, Seeding, ServerStats};
|
||||
pub use types::{Seeding, ServerConfig, ServerStats};
|
||||
|
|
|
@ -17,7 +17,7 @@
|
|||
|
||||
use rand::{self, Rng};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{thread, str};
|
||||
use std::{str, thread};
|
||||
use std;
|
||||
use time;
|
||||
|
||||
|
@ -97,7 +97,6 @@ impl ser::Writer for HeaderPartWriter {
|
|||
for i in 0..bytes_in.len() {
|
||||
self.pre_nonce.push(bytes_in.as_ref()[i])
|
||||
}
|
||||
|
||||
} else if self.bytes_written != 0 {
|
||||
for i in 0..bytes_in.len() {
|
||||
self.post_nonce.push(bytes_in.as_ref()[i])
|
||||
|
@ -158,7 +157,6 @@ impl Miner {
|
|||
latest_hash: &Hash,
|
||||
attempt_time_per_block: u32,
|
||||
) -> Option<Proof> {
|
||||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"(Server ID: {}) Mining at Cuckoo{} for at most {} secs at height {} and difficulty {}.",
|
||||
|
@ -170,9 +168,9 @@ impl Miner {
|
|||
);
|
||||
|
||||
// look for a pow for at most attempt_time_per_block sec on the
|
||||
// same block (to give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
// Will change this to something else at some point
|
||||
// same block (to give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
// Will change this to something else at some point
|
||||
let deadline = time::get_time().sec + attempt_time_per_block as i64;
|
||||
|
||||
// how often to output stats
|
||||
|
@ -215,7 +213,7 @@ impl Miner {
|
|||
debug!(
|
||||
LOGGER,
|
||||
"Mining: Plugin {} - Device {} ({}): Last Solution time: {}s; \
|
||||
Solutions per second: {:.*} - Total Attempts: {}",
|
||||
Solutions per second: {:.*} - Total Attempts: {}",
|
||||
i,
|
||||
s.device_id,
|
||||
s.device_name,
|
||||
|
@ -248,7 +246,6 @@ impl Miner {
|
|||
|
||||
job_handle.stop_jobs();
|
||||
sol
|
||||
|
||||
}
|
||||
|
||||
/// The inner part of mining loop for cuckoo miner sync mode
|
||||
|
@ -262,8 +259,8 @@ impl Miner {
|
|||
latest_hash: &mut Hash,
|
||||
) -> Option<Proof> {
|
||||
// look for a pow for at most attempt_time_per_block sec on the same block (to
|
||||
// give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
// give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
let deadline = time::get_time().sec + attempt_time_per_block as i64;
|
||||
let stat_check_interval = 3;
|
||||
let mut next_stat_check = time::get_time().sec + stat_check_interval;
|
||||
|
@ -271,7 +268,7 @@ impl Miner {
|
|||
debug!(
|
||||
LOGGER,
|
||||
"(Server ID: {}) Mining at Cuckoo{} for {} secs (will wait for last solution) \
|
||||
on block {} at difficulty {}.",
|
||||
on block {} at difficulty {}.",
|
||||
self.debug_output_id,
|
||||
cuckoo_size,
|
||||
attempt_time_per_block,
|
||||
|
@ -291,7 +288,6 @@ impl Miner {
|
|||
|
||||
let mut sol = None;
|
||||
while head.hash() == *latest_hash && time::get_time().sec < deadline {
|
||||
|
||||
let pow_hash = b.hash();
|
||||
if let Ok(proof) = plugin_miner.mine(&pow_hash[..]) {
|
||||
let proof_diff = proof.clone().to_difficulty();
|
||||
|
@ -324,8 +320,8 @@ impl Miner {
|
|||
iter_count += 1;
|
||||
|
||||
// Artificial slow down
|
||||
if self.config.slow_down_in_millis != None &&
|
||||
self.config.slow_down_in_millis.unwrap() > 0
|
||||
if self.config.slow_down_in_millis != None
|
||||
&& self.config.slow_down_in_millis.unwrap() > 0
|
||||
{
|
||||
thread::sleep(std::time::Duration::from_millis(
|
||||
self.config.slow_down_in_millis.unwrap(),
|
||||
|
@ -356,7 +352,7 @@ impl Miner {
|
|||
latest_hash: &mut Hash,
|
||||
) -> Option<Proof> {
|
||||
// look for a pow for at most 2 sec on the same block (to give a chance to new
|
||||
// transactions) and as long as the head hasn't changed
|
||||
// transactions) and as long as the head hasn't changed
|
||||
let deadline = time::get_time().sec + attempt_time_per_block as i64;
|
||||
|
||||
debug!(
|
||||
|
@ -381,7 +377,6 @@ impl Miner {
|
|||
|
||||
let mut sol = None;
|
||||
while head.hash() == *latest_hash && time::get_time().sec < deadline {
|
||||
|
||||
let pow_hash = b.hash();
|
||||
if let Ok(proof) = miner.mine(&pow_hash[..]) {
|
||||
let proof_diff = proof.clone().to_difficulty();
|
||||
|
@ -396,8 +391,8 @@ impl Miner {
|
|||
iter_count += 1;
|
||||
|
||||
// Artificial slow down
|
||||
if self.config.slow_down_in_millis != None &&
|
||||
self.config.slow_down_in_millis.unwrap() > 0
|
||||
if self.config.slow_down_in_millis != None
|
||||
&& self.config.slow_down_in_millis.unwrap() > 0
|
||||
{
|
||||
thread::sleep(std::time::Duration::from_millis(
|
||||
self.config.slow_down_in_millis.unwrap(),
|
||||
|
@ -419,7 +414,6 @@ impl Miner {
|
|||
/// Starts the mining loop, building a new block on top of the existing
|
||||
/// chain anytime required and looking for PoW solution.
|
||||
pub fn run_loop(&self, miner_config: MinerConfig, cuckoo_size: u32, proof_size: usize) {
|
||||
|
||||
info!(
|
||||
LOGGER,
|
||||
"(Server ID: {}) Starting miner loop.",
|
||||
|
@ -443,8 +437,8 @@ impl Miner {
|
|||
}
|
||||
|
||||
// to prevent the wallet from generating a new HD key derivation for each
|
||||
// iteration, we keep the returned derivation to provide it back when
|
||||
// nothing has changed
|
||||
// iteration, we keep the returned derivation to provide it back when
|
||||
// nothing has changed
|
||||
let mut key_id = None;
|
||||
|
||||
loop {
|
||||
|
@ -550,9 +544,10 @@ impl Miner {
|
|||
let difficulty = consensus::next_difficulty(diff_iter).unwrap();
|
||||
|
||||
// extract current transaction from the pool
|
||||
let txs_box = self.tx_pool.read().unwrap().prepare_mineable_transactions(
|
||||
MAX_TX,
|
||||
);
|
||||
let txs_box = self.tx_pool
|
||||
.read()
|
||||
.unwrap()
|
||||
.prepare_mineable_transactions(MAX_TX);
|
||||
let txs: Vec<&Transaction> = txs_box.iter().map(|tx| tx.as_ref()).collect();
|
||||
|
||||
// build the coinbase and the block itself
|
||||
|
@ -564,7 +559,8 @@ impl Miner {
|
|||
height,
|
||||
};
|
||||
|
||||
// TODO - error handling, things can go wrong with get_coinbase (wallet api down etc.)
|
||||
// TODO - error handling, things can go wrong with get_coinbase (wallet api
|
||||
// down etc.)
|
||||
let (output, kernel, block_fees) = self.get_coinbase(block_fees).unwrap();
|
||||
let mut b = core::Block::with_reward(head, txs, output, kernel).unwrap();
|
||||
|
||||
|
@ -585,9 +581,9 @@ impl Miner {
|
|||
b.header.nonce = rng.gen();
|
||||
b.header.difficulty = difficulty;
|
||||
b.header.timestamp = time::at_utc(time::Timespec::new(now_sec, 0));
|
||||
self.chain.set_sumtree_roots(&mut b).expect(
|
||||
"Error setting sum tree roots",
|
||||
);
|
||||
self.chain
|
||||
.set_sumtree_roots(&mut b)
|
||||
.expect("Error setting sum tree roots");
|
||||
(b, block_fees)
|
||||
}
|
||||
|
||||
|
@ -600,8 +596,8 @@ impl Miner {
|
|||
) -> Result<(core::Output, core::TxKernel, BlockFees), Error> {
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let key_id = keychain.derive_key_id(1).unwrap();
|
||||
let (out, kernel) = core::Block::reward_output(&keychain, &key_id, block_fees.fees)
|
||||
.unwrap();
|
||||
let (out, kernel) =
|
||||
core::Block::reward_output(&keychain, &key_id, block_fees.fees).unwrap();
|
||||
Ok((out, kernel, block_fees))
|
||||
}
|
||||
|
||||
|
@ -613,8 +609,9 @@ impl Miner {
|
|||
self.burn_reward(block_fees)
|
||||
} else {
|
||||
let url = format!(
|
||||
"{}/v2/receive/coinbase",
|
||||
self.config.wallet_receiver_url.as_str());
|
||||
"{}/v1/receive/coinbase",
|
||||
self.config.wallet_receiver_url.as_str()
|
||||
);
|
||||
|
||||
let res = wallet::client::create_coinbase(&url, &block_fees)?;
|
||||
|
||||
|
|
|
@ -63,14 +63,13 @@ impl Seeder {
|
|||
seed_list: Box<Future<Item = Vec<SocketAddr>, Error = String>>,
|
||||
) {
|
||||
// open a channel with a listener that connects every peer address sent below
|
||||
// max peer count
|
||||
// max peer count
|
||||
let (tx, rx) = futures::sync::mpsc::unbounded();
|
||||
h.spawn(self.listen_for_addrs(h.clone(), rx));
|
||||
|
||||
// check seeds and start monitoring connections
|
||||
let seeder = self.connect_to_seeds(tx.clone(), seed_list).join(
|
||||
self.monitor_peers(tx.clone()),
|
||||
);
|
||||
let seeder = self.connect_to_seeds(tx.clone(), seed_list)
|
||||
.join(self.monitor_peers(tx.clone()));
|
||||
|
||||
h.spawn(seeder.map(|_| ()).map_err(|e| {
|
||||
error!(LOGGER, "Seeding or peer monitoring error: {}", e);
|
||||
|
@ -86,13 +85,12 @@ impl Seeder {
|
|||
let p2p_server = self.p2p.clone();
|
||||
|
||||
// now spawn a new future to regularly check if we need to acquire more peers
|
||||
// and if so, gets them from db
|
||||
// and if so, gets them from db
|
||||
let mon_loop = Timer::default()
|
||||
.interval(time::Duration::from_secs(10))
|
||||
.for_each(move |_| {
|
||||
|
||||
// maintenance step first, clean up p2p server peers and mark bans
|
||||
// if needed
|
||||
// if needed
|
||||
let disconnected = p2p_server.clean_peers();
|
||||
for p in disconnected {
|
||||
if p.is_banned() {
|
||||
|
@ -135,7 +133,7 @@ impl Seeder {
|
|||
}
|
||||
|
||||
// Check if we have any pre-existing peer in db. If so, start with those,
|
||||
// otherwise use the seeds provided.
|
||||
// otherwise use the seeds provided.
|
||||
fn connect_to_seeds(
|
||||
&self,
|
||||
tx: mpsc::UnboundedSender<SocketAddr>,
|
||||
|
@ -144,7 +142,7 @@ impl Seeder {
|
|||
let peer_store = self.peer_store.clone();
|
||||
|
||||
// a thread pool is required so we don't block the event loop with a
|
||||
// db query
|
||||
// db query
|
||||
let thread_pool = cpupool::CpuPool::new(1);
|
||||
let seeder = thread_pool
|
||||
.spawn_fn(move || {
|
||||
|
@ -231,8 +229,10 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
|
|||
})
|
||||
.and_then(|res| {
|
||||
// collect all chunks and split around whitespace to get a list of SocketAddr
|
||||
res.body().collect().map_err(|e| e.to_string()).and_then(
|
||||
|chunks| {
|
||||
res.body()
|
||||
.collect()
|
||||
.map_err(|e| e.to_string())
|
||||
.and_then(|chunks| {
|
||||
let res = chunks.iter().fold("".to_string(), |acc, ref chunk| {
|
||||
acc + str::from_utf8(&chunk[..]).unwrap()
|
||||
});
|
||||
|
@ -240,8 +240,7 @@ pub fn web_seeds(h: reactor::Handle) -> Box<Future<Item = Vec<SocketAddr>, Error
|
|||
.map(|s| s.parse().unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
Ok(addrs)
|
||||
},
|
||||
)
|
||||
})
|
||||
})
|
||||
});
|
||||
Box::new(seeds)
|
||||
|
|
|
@ -79,7 +79,6 @@ impl Server {
|
|||
|
||||
/// Instantiates a new server associated with the provided future reactor.
|
||||
pub fn future(mut config: ServerConfig, evt_handle: &reactor::Handle) -> Result<Server, Error> {
|
||||
|
||||
let pool_adapter = Arc::new(PoolToChainAdapter::new());
|
||||
let pool_net_adapter = Arc::new(PoolToNetAdapter::new());
|
||||
let tx_pool = Arc::new(RwLock::new(pool::TransactionPool::new(
|
||||
|
|
|
@ -23,7 +23,7 @@ const MAX_BODY_DOWNLOADS: usize = 8;
|
|||
use std::ops::{Deref, DerefMut};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::thread;
|
||||
use std::time::{Instant, Duration};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use core::core::hash::{Hash, Hashed};
|
||||
use chain;
|
||||
|
@ -87,7 +87,7 @@ impl Syncer {
|
|||
self.init_download()?;
|
||||
|
||||
// main syncing loop, requests more headers and bodies periodically as long
|
||||
// as a peer with higher difficulty exists and we're not fully caught up
|
||||
// as a peer with higher difficulty exists and we're not fully caught up
|
||||
info!(LOGGER, "Starting sync loop.");
|
||||
loop {
|
||||
let tip = self.chain.get_header_head()?;
|
||||
|
@ -139,7 +139,7 @@ impl Syncer {
|
|||
let mut blocks_to_download = self.blocks_to_download.lock().unwrap();
|
||||
|
||||
// go back the chain and insert for download all blocks we only have the
|
||||
// head for
|
||||
// head for
|
||||
let mut prev_h = header_head.last_block_h;
|
||||
while prev_h != full_head.last_block_h {
|
||||
let header = self.chain.get_block_header(&prev_h)?;
|
||||
|
@ -184,7 +184,7 @@ impl Syncer {
|
|||
}
|
||||
|
||||
// consume hashes from blocks to download, place them in downloading and
|
||||
// request them from the network
|
||||
// request them from the network
|
||||
let mut count = 0;
|
||||
while blocks_to_download.len() > 0 && blocks_downloading.len() < MAX_BODY_DOWNLOADS {
|
||||
let h = blocks_to_download.pop().unwrap();
|
||||
|
@ -197,21 +197,21 @@ impl Syncer {
|
|||
});
|
||||
}
|
||||
debug!(
|
||||
LOGGER,
|
||||
LOGGER,
|
||||
"Requested {} full blocks to download, total left: {}. Current list: {:?}.",
|
||||
count,
|
||||
count,
|
||||
blocks_to_download.len(),
|
||||
blocks_downloading.deref(),
|
||||
);
|
||||
blocks_downloading.deref(),
|
||||
);
|
||||
}
|
||||
|
||||
/// We added a block, clean up the downloading structure
|
||||
pub fn block_received(&self, bh: Hash) {
|
||||
// just clean up the downloading list
|
||||
let mut bds = self.blocks_downloading.lock().unwrap();
|
||||
bds.iter().position(|ref h| h.hash == bh).map(
|
||||
|n| bds.remove(n),
|
||||
);
|
||||
bds.iter()
|
||||
.position(|ref h| h.hash == bh)
|
||||
.map(|n| bds.remove(n));
|
||||
}
|
||||
|
||||
/// Request some block headers from a peer to advance us
|
||||
|
@ -257,7 +257,7 @@ impl Syncer {
|
|||
/// us the right block headers.
|
||||
fn get_locator(&self, tip: &chain::Tip) -> Result<Vec<Hash>, Error> {
|
||||
// Prepare the heights we want as the latests height minus increasing powers
|
||||
// of 2 up to max.
|
||||
// of 2 up to max.
|
||||
let mut heights = vec![tip.height];
|
||||
let mut tail = (1..p2p::MAX_LOCATORS)
|
||||
.map(|n| 2u64.pow(n))
|
||||
|
@ -271,7 +271,7 @@ impl Syncer {
|
|||
debug!(LOGGER, "Loc heights: {:?}", heights);
|
||||
|
||||
// Iteratively travel the header chain back from our head and retain the
|
||||
// headers at the wanted heights.
|
||||
// headers at the wanted heights.
|
||||
let mut header = self.chain.get_block_header(&tip.last_block_h)?;
|
||||
let mut locator = vec![];
|
||||
while heights.len() > 0 {
|
||||
|
|
|
@ -119,7 +119,7 @@ impl Default for ServerConfig {
|
|||
fn default() -> ServerConfig {
|
||||
ServerConfig {
|
||||
db_root: ".grin".to_string(),
|
||||
api_http_addr: "0.0.0.0:13415".to_string(),
|
||||
api_http_addr: "0.0.0.0:13413".to_string(),
|
||||
capabilities: p2p::FULL_NODE,
|
||||
seeding_type: Seeding::None,
|
||||
seeds: None,
|
||||
|
|
|
@ -12,21 +12,21 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_api as api;
|
||||
extern crate grin_wallet as wallet;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_pow as pow;
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_wallet as wallet;
|
||||
|
||||
extern crate blake2_rfc as blake2;
|
||||
extern crate futures;
|
||||
extern crate futures_cpupool;
|
||||
extern crate tokio_core;
|
||||
extern crate tokio_timer;
|
||||
extern crate futures_cpupool;
|
||||
|
||||
use std::thread;
|
||||
use std::time;
|
||||
|
@ -42,9 +42,7 @@ use util::secp::Secp256k1;
|
|||
use self::keychain::Keychain;
|
||||
use wallet::WalletConfig;
|
||||
|
||||
|
||||
/// Just removes all results from previous runs
|
||||
|
||||
pub fn clean_all_output(test_name_dir: &str) {
|
||||
let target_dir = format!("target/test_servers/{}", test_name_dir);
|
||||
let result = fs::remove_dir_all(target_dir);
|
||||
|
@ -116,9 +114,9 @@ impl Default for LocalServerContainerConfig {
|
|||
LocalServerContainerConfig {
|
||||
name: String::from("test_host"),
|
||||
base_addr: String::from("127.0.0.1"),
|
||||
api_server_port: 13413,
|
||||
p2p_server_port: 13414,
|
||||
api_server_port: 13415,
|
||||
wallet_port: 13416,
|
||||
wallet_port: 13415,
|
||||
seed_addr: String::from(""),
|
||||
is_seeding: false,
|
||||
start_miner: false,
|
||||
|
@ -172,15 +170,15 @@ impl LocalServerContainer {
|
|||
let working_dir = format!("target/test_servers/{}", config.name);
|
||||
Ok(
|
||||
(LocalServerContainer {
|
||||
config: config,
|
||||
p2p_server_stats: None,
|
||||
api_server: None,
|
||||
server_is_running: false,
|
||||
server_is_mining: false,
|
||||
wallet_is_running: false,
|
||||
working_dir: working_dir,
|
||||
peer_list: Vec::new(),
|
||||
}),
|
||||
config: config,
|
||||
p2p_server_stats: None,
|
||||
api_server: None,
|
||||
server_is_running: false,
|
||||
server_is_mining: false,
|
||||
wallet_is_running: false,
|
||||
working_dir: working_dir,
|
||||
peer_list: Vec::new(),
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -256,14 +254,12 @@ impl LocalServerContainer {
|
|||
}
|
||||
|
||||
s.get_server_stats().unwrap()
|
||||
|
||||
}
|
||||
|
||||
/// Starts a wallet daemon to receive and returns the
|
||||
/// listening server url
|
||||
|
||||
pub fn run_wallet(&mut self, _duration_in_seconds: u64) {
|
||||
|
||||
// URL on which to start the wallet listener (i.e. api server)
|
||||
let url = format!("{}:{}", self.config.base_addr, self.config.wallet_port);
|
||||
|
||||
|
@ -287,23 +283,22 @@ impl LocalServerContainer {
|
|||
wallet_config.check_node_api_http_addr = self.config.wallet_validating_node_url.clone();
|
||||
wallet_config.data_file_dir = self.working_dir.clone();
|
||||
|
||||
let receive_tx_handler = wallet::WalletReceiver {
|
||||
config: wallet_config.clone(),
|
||||
keychain: keychain.clone(),
|
||||
};
|
||||
let router = router!(
|
||||
receive_tx: get "/receive/transaction" => receive_tx_handler,
|
||||
);
|
||||
|
||||
let mut api_server = api::ApiServer::new("/v1".to_string());
|
||||
|
||||
api_server.register_endpoint(
|
||||
"/receive".to_string(),
|
||||
wallet::WalletReceiver {
|
||||
keychain: keychain,
|
||||
config: wallet_config,
|
||||
},
|
||||
);
|
||||
|
||||
api_server.register_handler(router);
|
||||
api_server.start(url).unwrap_or_else(|e| {
|
||||
println!("Failed to start Grin wallet receiver: {}.", e);
|
||||
});
|
||||
|
||||
self.api_server = Some(api_server);
|
||||
self.wallet_is_running = true;
|
||||
|
||||
}
|
||||
|
||||
/// Stops the running wallet server
|
||||
|
@ -384,13 +379,13 @@ pub struct LocalServerContainerPool {
|
|||
impl LocalServerContainerPool {
|
||||
pub fn new(config: LocalServerContainerPoolConfig) -> LocalServerContainerPool {
|
||||
(LocalServerContainerPool {
|
||||
next_api_port: config.base_api_port,
|
||||
next_p2p_port: config.base_p2p_port,
|
||||
next_wallet_port: config.base_wallet_port,
|
||||
config: config,
|
||||
server_containers: Vec::new(),
|
||||
is_seeding: false,
|
||||
})
|
||||
next_api_port: config.base_api_port,
|
||||
next_p2p_port: config.base_p2p_port,
|
||||
next_wallet_port: config.base_wallet_port,
|
||||
config: config,
|
||||
server_containers: Vec::new(),
|
||||
is_seeding: false,
|
||||
})
|
||||
}
|
||||
|
||||
/// adds a single server on the next available port
|
||||
|
@ -400,7 +395,6 @@ impl LocalServerContainerPool {
|
|||
///
|
||||
|
||||
pub fn create_server(&mut self, server_config: &mut LocalServerContainerConfig) {
|
||||
|
||||
// If we're calling it this way, need to override these
|
||||
server_config.p2p_server_port = self.next_p2p_port;
|
||||
server_config.api_server_port = self.next_api_port;
|
||||
|
@ -440,12 +434,10 @@ impl LocalServerContainerPool {
|
|||
// self.server_containers.push(server_arc);
|
||||
|
||||
// Create a future that runs the server for however many seconds
|
||||
// collect them all and run them in the run_all_servers
|
||||
// collect them all and run them in the run_all_servers
|
||||
let _run_time = self.config.run_length_in_seconds;
|
||||
|
||||
self.server_containers.push(server_container);
|
||||
|
||||
|
||||
}
|
||||
|
||||
/// adds n servers, ready to run
|
||||
|
@ -463,7 +455,6 @@ impl LocalServerContainerPool {
|
|||
///
|
||||
|
||||
pub fn run_all_servers(self) -> Vec<grin::ServerStats> {
|
||||
|
||||
let run_length = self.config.run_length_in_seconds;
|
||||
let mut handles = vec![];
|
||||
|
||||
|
@ -477,18 +468,17 @@ impl LocalServerContainerPool {
|
|||
let handle = thread::spawn(move || {
|
||||
if is_seeding && !s.config.is_seeding {
|
||||
// there's a seed and we're not it, so hang around longer and give the seed
|
||||
// a chance to start
|
||||
// a chance to start
|
||||
thread::sleep(time::Duration::from_millis(2000));
|
||||
}
|
||||
let server_ref = s.run_server(run_length);
|
||||
return_container_ref.lock().unwrap().push(server_ref);
|
||||
});
|
||||
// Not a big fan of sleeping hack here, but there appears to be a
|
||||
// concurrency issue when creating files in rocksdb that causes
|
||||
// failure if we don't pause a bit before starting the next server
|
||||
// concurrency issue when creating files in rocksdb that causes
|
||||
// failure if we don't pause a bit before starting the next server
|
||||
thread::sleep(time::Duration::from_millis(500));
|
||||
handles.push(handle);
|
||||
|
||||
}
|
||||
|
||||
for handle in handles {
|
||||
|
@ -508,7 +498,6 @@ impl LocalServerContainerPool {
|
|||
pub fn connect_all_peers(&mut self) {
|
||||
/// just pull out all currently active servers, build a list,
|
||||
/// and feed into all servers
|
||||
|
||||
let mut server_addresses: Vec<String> = Vec::new();
|
||||
for s in &self.server_containers {
|
||||
let server_address = format!("{}:{}", s.config.base_addr, s.config.p2p_server_port);
|
|
@ -12,14 +12,17 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_chain as chain;
|
||||
#[macro_use]
|
||||
extern crate router;
|
||||
|
||||
extern crate grin_api as api;
|
||||
extern crate grin_wallet as wallet;
|
||||
extern crate grin_chain as chain;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate grin_pow as pow;
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_wallet as wallet;
|
||||
|
||||
extern crate futures;
|
||||
extern crate tokio_core;
|
||||
|
@ -31,7 +34,7 @@ use std::thread;
|
|||
use std::time;
|
||||
use std::default::Default;
|
||||
|
||||
use futures::{Future, Poll, Async};
|
||||
use futures::{Async, Future, Poll};
|
||||
use futures::task::current;
|
||||
use tokio_core::reactor;
|
||||
use tokio_timer::Timer;
|
||||
|
@ -41,8 +44,8 @@ use core::global;
|
|||
use core::global::{MiningParameterMode, MINING_PARAMETER_MODE};
|
||||
use wallet::WalletConfig;
|
||||
|
||||
use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerContainerPoolConfig,
|
||||
LocalServerContainerPool};
|
||||
use framework::{LocalServerContainer, LocalServerContainerConfig, LocalServerContainerPool,
|
||||
LocalServerContainerPoolConfig};
|
||||
|
||||
/// Testing the frameworks by starting a fresh server, creating a genesis
|
||||
/// Block and mining into a wallet for a bit
|
||||
|
@ -71,7 +74,6 @@ fn basic_genesis_mine() {
|
|||
|
||||
pool.create_server(&mut server_config);
|
||||
pool.run_all_servers();
|
||||
|
||||
}
|
||||
|
||||
/// Creates 5 servers, first being a seed and check that through peer address
|
||||
|
@ -175,10 +177,8 @@ fn simulate_parallel_mining() {
|
|||
let _ = pool.run_all_servers();
|
||||
|
||||
// Check mining difficulty here?, though I'd think it's more valuable
|
||||
// to simply output it. Can at least see the evolution of the difficulty target
|
||||
// in the debug log output for now
|
||||
|
||||
|
||||
// to simply output it. Can at least see the evolution of the difficulty target
|
||||
// in the debug log output for now
|
||||
}
|
||||
|
||||
// TODO: Convert these tests to newer framework format
|
||||
|
@ -186,6 +186,7 @@ fn simulate_parallel_mining() {
|
|||
/// gets propagated to all.
|
||||
#[test]
|
||||
fn a_simulate_block_propagation() {
|
||||
util::init_test_logger();
|
||||
global::set_mining_mode(MiningParameterMode::AutomatedTesting);
|
||||
|
||||
let test_name_dir = "grin-prop";
|
||||
|
@ -243,7 +244,7 @@ fn a_simulate_block_propagation() {
|
|||
let original_height = servers[0].head().height;
|
||||
|
||||
// monitor for a change of head on a different server and check whether
|
||||
// chain height has changed
|
||||
// chain height has changed
|
||||
evtlp.run(change(&servers[4]).and_then(|tip| {
|
||||
assert!(tip.height == original_height + 1);
|
||||
Ok(())
|
||||
|
|
|
@ -17,7 +17,7 @@ use std::cmp::min;
|
|||
|
||||
use serde::{de, ser};
|
||||
|
||||
use byteorder::{ByteOrder, BigEndian};
|
||||
use byteorder::{BigEndian, ByteOrder};
|
||||
use blake2::blake2b::blake2b;
|
||||
use util::secp;
|
||||
use util::secp::Secp256k1;
|
||||
|
@ -253,11 +253,11 @@ impl ExtendedKey {
|
|||
|
||||
let derived = blake2b(64, &self.chaincode[..], &seed[..]);
|
||||
|
||||
let mut secret_key = SecretKey::from_slice(&secp, &derived.as_bytes()[0..32])
|
||||
let mut secret_key =
|
||||
SecretKey::from_slice(&secp, &derived.as_bytes()[0..32]).expect("Error deriving key");
|
||||
secret_key
|
||||
.add_assign(secp, &self.key)
|
||||
.expect("Error deriving key");
|
||||
secret_key.add_assign(secp, &self.key).expect(
|
||||
"Error deriving key",
|
||||
);
|
||||
// TODO check if key != 0 ?
|
||||
|
||||
let mut chain_code: [u8; 32] = [0; 32];
|
||||
|
@ -312,13 +312,10 @@ mod test {
|
|||
let s = Secp256k1::new();
|
||||
let seed = from_hex("000102030405060708090a0b0c0d0e0f");
|
||||
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
|
||||
let sec = from_hex(
|
||||
"c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd",
|
||||
);
|
||||
let sec = from_hex("c3f5ae520f474b390a637de4669c84d0ed9bbc21742577fac930834d3c3083dd");
|
||||
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
|
||||
let chaincode = from_hex(
|
||||
"e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72",
|
||||
);
|
||||
let chaincode =
|
||||
from_hex("e7298e68452b0c6d54837670896e1aee76b118075150d90d4ee416ece106ae72");
|
||||
let identifier = from_hex("83e59c48297b78b34b73");
|
||||
let depth = 0;
|
||||
let n_child = 0;
|
||||
|
@ -343,13 +340,10 @@ mod test {
|
|||
let seed = from_hex("000102030405060708090a0b0c0d0e0f");
|
||||
let extk = ExtendedKey::from_seed(&s, &seed.as_slice()).unwrap();
|
||||
let derived = extk.derive(&s, 0).unwrap();
|
||||
let sec = from_hex(
|
||||
"d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f",
|
||||
);
|
||||
let sec = from_hex("d75f70beb2bd3b56f9b064087934bdedee98e4b5aae6280c58b4eff38847888f");
|
||||
let secret_key = SecretKey::from_slice(&s, sec.as_slice()).unwrap();
|
||||
let chaincode = from_hex(
|
||||
"243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52",
|
||||
);
|
||||
let chaincode =
|
||||
from_hex("243cb881e1549e714db31d23af45540b13ad07941f64a786bbf3313b4de1df52");
|
||||
let root_key_id = from_hex("83e59c48297b78b34b73");
|
||||
let identifier = from_hex("0185adb4d8b730099c93");
|
||||
let depth = 1;
|
||||
|
|
|
@ -20,7 +20,7 @@ use util::secp::{Message, Secp256k1, Signature};
|
|||
use util::secp::key::SecretKey;
|
||||
use util::secp::pedersen::{Commitment, ProofMessage, ProofInfo, RangeProof};
|
||||
use blake2;
|
||||
use blind::{BlindingFactor, BlindSum};
|
||||
use blind::{BlindSum, BlindingFactor};
|
||||
use extkey::{self, Identifier};
|
||||
|
||||
|
||||
|
@ -56,7 +56,7 @@ impl Keychain {
|
|||
}
|
||||
|
||||
// For tests and burn only, associate a key identifier with a known secret key.
|
||||
//
|
||||
//
|
||||
pub fn burn_enabled(keychain: &Keychain, burn_key_id: &Identifier) -> Keychain {
|
||||
let mut key_overrides = HashMap::new();
|
||||
key_overrides.insert(
|
||||
|
@ -210,7 +210,7 @@ mod test {
|
|||
let msg = secp::Message::from_slice(&msg_bytes[..]).unwrap();
|
||||
|
||||
// now create a zero commitment using the key on the keychain associated with
|
||||
// the key_id
|
||||
// the key_id
|
||||
let commit = keychain.commit(0, &key_id).unwrap();
|
||||
|
||||
// now check we can use our key to verify a signature from this zero commitment
|
||||
|
@ -232,7 +232,7 @@ mod test {
|
|||
assert_eq!(proof_info.value, 5);
|
||||
|
||||
// now check the recovered message is "empty" (but not truncated) i.e. all
|
||||
// zeroes
|
||||
// zeroes
|
||||
assert_eq!(
|
||||
proof_info.message,
|
||||
secp::pedersen::ProofMessage::from_bytes(&[0; secp::constants::PROOF_MSG_SIZE])
|
||||
|
|
|
@ -16,10 +16,10 @@
|
|||
|
||||
extern crate blake2_rfc as blake2;
|
||||
extern crate byteorder;
|
||||
extern crate grin_util as util;
|
||||
extern crate rand;
|
||||
extern crate grin_util as util;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
|
||||
|
@ -27,6 +27,6 @@ mod blind;
|
|||
mod extkey;
|
||||
|
||||
pub use blind::{BlindSum, BlindingFactor};
|
||||
pub use extkey::{Identifier, ExtendedKey, IDENTIFIER_SIZE};
|
||||
pub use extkey::{ExtendedKey, Identifier, IDENTIFIER_SIZE};
|
||||
pub mod keychain;
|
||||
pub use keychain::{Error, Keychain};
|
||||
|
|
|
@ -17,13 +17,13 @@
|
|||
|
||||
use std::iter;
|
||||
use std::ops::Deref;
|
||||
use std::sync::{Mutex, Arc};
|
||||
use std::time::{Instant, Duration};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use futures;
|
||||
use futures::{Stream, Future};
|
||||
use futures::{Future, Stream};
|
||||
use futures::stream;
|
||||
use futures::sync::mpsc::{Sender, UnboundedSender, UnboundedReceiver};
|
||||
use futures::sync::mpsc::{Sender, UnboundedReceiver, UnboundedSender};
|
||||
use tokio_core::net::TcpStream;
|
||||
use tokio_io::{AsyncRead, AsyncWrite};
|
||||
use tokio_io::io::{read_exact, write_all};
|
||||
|
@ -54,7 +54,7 @@ pub trait Handler: Sync + Send {
|
|||
impl<F> Handler for F
|
||||
where
|
||||
F: Fn(UnboundedSender<Vec<u8>>, MsgHeader, Vec<u8>)
|
||||
-> Result<Option<Hash>, ser::Error>,
|
||||
-> Result<Option<Hash>, ser::Error>,
|
||||
F: Sync + Send,
|
||||
{
|
||||
fn handle(
|
||||
|
@ -99,7 +99,6 @@ impl Connection {
|
|||
where
|
||||
F: Handler + 'static,
|
||||
{
|
||||
|
||||
let (reader, writer) = conn.split();
|
||||
|
||||
// Set Max Read to 12 Mb/s
|
||||
|
@ -112,9 +111,9 @@ impl Connection {
|
|||
|
||||
// same for closing the connection
|
||||
let (close_tx, close_rx) = futures::sync::mpsc::channel(1);
|
||||
let close_conn = close_rx.for_each(|_| Ok(())).map_err(
|
||||
|_| Error::ConnectionClose,
|
||||
);
|
||||
let close_conn = close_rx
|
||||
.for_each(|_| Ok(()))
|
||||
.map_err(|_| Error::ConnectionClose);
|
||||
|
||||
let me = Connection {
|
||||
outbound_chan: tx.clone(),
|
||||
|
@ -128,7 +127,7 @@ impl Connection {
|
|||
let read_msg = me.read_msg(tx, reader, handler).map(|_| ());
|
||||
|
||||
// setting the writing future, getting messages from our system and sending
|
||||
// them out
|
||||
// them out
|
||||
let write_msg = me.write_msg(rx, writer).map(|_| ());
|
||||
|
||||
// select between our different futures and return them
|
||||
|
@ -152,7 +151,6 @@ impl Connection {
|
|||
where
|
||||
W: AsyncWrite + 'static,
|
||||
{
|
||||
|
||||
let sent_bytes = self.sent_bytes.clone();
|
||||
let send_data = rx
|
||||
.map_err(|_| Error::ConnectionClose)
|
||||
|
@ -181,9 +179,8 @@ impl Connection {
|
|||
F: Handler + 'static,
|
||||
R: AsyncRead + 'static,
|
||||
{
|
||||
|
||||
// infinite iterator stream so we repeat the message reading logic until the
|
||||
// peer is stopped
|
||||
// peer is stopped
|
||||
let iter = stream::iter_ok(iter::repeat(()).map(Ok::<(), Error>));
|
||||
|
||||
// setup the reading future, getting messages from the peer and processing them
|
||||
|
@ -229,7 +226,6 @@ impl Connection {
|
|||
/// Utility function to send any Writeable. Handles adding the header and
|
||||
/// serialization.
|
||||
pub fn send_msg<W: ser::Writeable>(&self, t: Type, body: &W) -> Result<(), Error> {
|
||||
|
||||
let mut body_data = vec![];
|
||||
try!(ser::serialize(&mut body_data, body));
|
||||
let mut data = vec![];
|
||||
|
@ -239,9 +235,9 @@ impl Connection {
|
|||
));
|
||||
data.append(&mut body_data);
|
||||
|
||||
self.outbound_chan.unbounded_send(data).map_err(|_| {
|
||||
Error::ConnectionClose
|
||||
})
|
||||
self.outbound_chan
|
||||
.unbounded_send(data)
|
||||
.map_err(|_| Error::ConnectionClose)
|
||||
}
|
||||
|
||||
/// Bytes sent and received by this peer to the remote peer.
|
||||
|
@ -269,11 +265,10 @@ impl TimeoutConnection {
|
|||
where
|
||||
F: Handler + 'static,
|
||||
{
|
||||
|
||||
let expects = Arc::new(Mutex::new(vec![]));
|
||||
|
||||
// Decorates the handler to remove the "subscription" from the expected
|
||||
// responses. We got our replies, so no timeout should occur.
|
||||
// responses. We got our replies, so no timeout should occur.
|
||||
let exp = expects.clone();
|
||||
let (conn, fut) = Connection::listen(conn, move |sender, header: MsgHeader, data| {
|
||||
let msg_type = header.msg_type;
|
||||
|
|
|
@ -44,7 +44,9 @@ unsafe impl Send for Handshake {}
|
|||
impl Handshake {
|
||||
/// Creates a new handshake handler
|
||||
pub fn new() -> Handshake {
|
||||
Handshake { nonces: Arc::new(RwLock::new(VecDeque::with_capacity(NONCES_CAP))) }
|
||||
Handshake {
|
||||
nonces: Arc::new(RwLock::new(VecDeque::with_capacity(NONCES_CAP))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Handles connecting to a new remote peer, starting the version handshake.
|
||||
|
|
|
@ -22,25 +22,25 @@
|
|||
|
||||
#[macro_use]
|
||||
extern crate bitflags;
|
||||
extern crate bytes;
|
||||
#[macro_use]
|
||||
extern crate enum_primitive;
|
||||
extern crate futures;
|
||||
#[macro_use]
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_store;
|
||||
extern crate grin_util as util;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate futures;
|
||||
extern crate tokio_core;
|
||||
extern crate tokio_io;
|
||||
extern crate bytes;
|
||||
extern crate tokio_timer;
|
||||
extern crate num;
|
||||
extern crate rand;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate time;
|
||||
extern crate num;
|
||||
extern crate tokio_core;
|
||||
extern crate tokio_io;
|
||||
extern crate tokio_timer;
|
||||
|
||||
mod conn;
|
||||
pub mod handshake;
|
||||
|
@ -52,8 +52,8 @@ mod server;
|
|||
mod store;
|
||||
mod types;
|
||||
|
||||
pub use server::{Server, DummyAdapter};
|
||||
pub use server::{DummyAdapter, Server};
|
||||
pub use peer::Peer;
|
||||
pub use types::{P2PConfig, NetAdapter, MAX_LOCATORS, MAX_BLOCK_HEADERS, MAX_PEER_ADDRS,
|
||||
Capabilities, UNKNOWN, FULL_NODE, FULL_HIST, PeerInfo, Error};
|
||||
pub use store::{PeerStore, PeerData, State};
|
||||
pub use types::{Capabilities, Error, NetAdapter, P2PConfig, PeerInfo, FULL_HIST, FULL_NODE,
|
||||
MAX_BLOCK_HEADERS, MAX_LOCATORS, MAX_PEER_ADDRS, UNKNOWN};
|
||||
pub use store::{PeerData, PeerStore, State};
|
||||
|
|
|
@ -14,10 +14,10 @@
|
|||
|
||||
//! Message types that transit over the network and related serialization code.
|
||||
|
||||
use std::net::{SocketAddr, SocketAddrV4, SocketAddrV6, Ipv4Addr, Ipv6Addr};
|
||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV4, SocketAddrV6};
|
||||
use num::FromPrimitive;
|
||||
|
||||
use futures::future::{Future, ok};
|
||||
use futures::future::{ok, Future};
|
||||
use tokio_core::net::TcpStream;
|
||||
use tokio_io::io::{read_exact, write_all};
|
||||
|
||||
|
@ -25,7 +25,7 @@ use core::consensus::MAX_MSG_LEN;
|
|||
use core::core::BlockHeader;
|
||||
use core::core::hash::Hash;
|
||||
use core::core::target::Difficulty;
|
||||
use core::ser::{self, Writeable, Readable, Writer, Reader};
|
||||
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
|
||||
use types::*;
|
||||
|
||||
|
@ -51,18 +51,18 @@ pub enum ErrCodes {
|
|||
enum_from_primitive! {
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum Type {
|
||||
Error,
|
||||
Hand,
|
||||
Shake,
|
||||
Ping,
|
||||
Pong,
|
||||
GetPeerAddrs,
|
||||
PeerAddrs,
|
||||
GetHeaders,
|
||||
Headers,
|
||||
GetBlock,
|
||||
Block,
|
||||
Transaction,
|
||||
Error,
|
||||
Hand,
|
||||
Shake,
|
||||
Ping,
|
||||
Pong,
|
||||
GetPeerAddrs,
|
||||
PeerAddrs,
|
||||
GetHeaders,
|
||||
Headers,
|
||||
GetBlock,
|
||||
Block,
|
||||
Transaction,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,7 +79,7 @@ where
|
|||
let header = try!(ser::deserialize::<MsgHeader>(&mut &buf[..]));
|
||||
if header.msg_len > MAX_MSG_LEN {
|
||||
// TODO add additional restrictions on a per-message-type basis to avoid 20MB
|
||||
// pings
|
||||
// pings
|
||||
return Err(Error::Serialization(ser::Error::TooLargeReadErr));
|
||||
}
|
||||
Ok((reader, header))
|
||||
|
@ -170,13 +170,11 @@ impl Readable for MsgHeader {
|
|||
try!(reader.expect_u8(MAGIC[1]));
|
||||
let (t, len) = ser_multiread!(reader, read_u8, read_u64);
|
||||
match Type::from_u8(t) {
|
||||
Some(ty) => {
|
||||
Ok(MsgHeader {
|
||||
magic: MAGIC,
|
||||
msg_type: ty,
|
||||
msg_len: len,
|
||||
})
|
||||
}
|
||||
Some(ty) => Ok(MsgHeader {
|
||||
magic: MAGIC,
|
||||
msg_type: ty,
|
||||
msg_len: len,
|
||||
}),
|
||||
None => Err(ser::Error::CorruptedData),
|
||||
}
|
||||
}
|
||||
|
@ -226,9 +224,7 @@ impl Readable for Hand {
|
|||
let receiver_addr = try!(SockAddr::read(reader));
|
||||
let ua = try!(reader.read_vec());
|
||||
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData,));
|
||||
Ok(Hand {
|
||||
version: version,
|
||||
capabilities: capabilities,
|
||||
|
@ -275,9 +271,7 @@ impl Readable for Shake {
|
|||
let total_diff = try!(Difficulty::read(reader));
|
||||
let ua = try!(reader.read_vec());
|
||||
let user_agent = try!(String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
));
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData,));
|
||||
Ok(Shake {
|
||||
version: version,
|
||||
capabilities: capabilities,
|
||||
|
@ -302,10 +296,10 @@ impl Writeable for GetPeerAddrs {
|
|||
impl Readable for GetPeerAddrs {
|
||||
fn read(reader: &mut Reader) -> Result<GetPeerAddrs, ser::Error> {
|
||||
let capab = try!(reader.read_u32());
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
));
|
||||
Ok(GetPeerAddrs { capabilities: capabilities })
|
||||
let capabilities = try!(Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData,));
|
||||
Ok(GetPeerAddrs {
|
||||
capabilities: capabilities,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -361,9 +355,7 @@ impl Writeable for PeerError {
|
|||
impl Readable for PeerError {
|
||||
fn read(reader: &mut Reader) -> Result<PeerError, ser::Error> {
|
||||
let (code, msg) = ser_multiread!(reader, read_u32, read_vec);
|
||||
let message = try!(String::from_utf8(msg).map_err(
|
||||
|_| ser::Error::CorruptedData,
|
||||
));
|
||||
let message = try!(String::from_utf8(msg).map_err(|_| ser::Error::CorruptedData,));
|
||||
Ok(PeerError {
|
||||
code: code,
|
||||
message: message,
|
||||
|
@ -413,16 +405,7 @@ impl Readable for SockAddr {
|
|||
let ip = try_map_vec!([0..8], |_| reader.read_u16());
|
||||
let port = try!(reader.read_u16());
|
||||
Ok(SockAddr(SocketAddr::V6(SocketAddrV6::new(
|
||||
Ipv6Addr::new(
|
||||
ip[0],
|
||||
ip[1],
|
||||
ip[2],
|
||||
ip[3],
|
||||
ip[4],
|
||||
ip[5],
|
||||
ip[6],
|
||||
ip[7],
|
||||
),
|
||||
Ipv6Addr::new(ip[0], ip[1], ip[2], ip[3], ip[4], ip[5], ip[6], ip[7]),
|
||||
port,
|
||||
0,
|
||||
0,
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
// limitations under the License.
|
||||
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{RwLock, Arc};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use futures::Future;
|
||||
use tokio_core::net::TcpStream;
|
||||
|
@ -80,20 +80,16 @@ impl Peer {
|
|||
hs: &Handshake,
|
||||
na: Arc<NetAdapter>,
|
||||
) -> Box<Future<Item = (TcpStream, Peer), Error = Error>> {
|
||||
let hs_peer = hs.handshake(capab, total_difficulty, conn).and_then(
|
||||
|(conn,
|
||||
proto,
|
||||
info)| {
|
||||
let hs_peer = hs.handshake(capab, total_difficulty, conn)
|
||||
.and_then(|(conn, proto, info)| {
|
||||
Ok((conn, Peer::new(info, Box::new(proto), na)))
|
||||
},
|
||||
);
|
||||
});
|
||||
Box::new(hs_peer)
|
||||
}
|
||||
|
||||
/// Main peer loop listening for messages and forwarding to the rest of the
|
||||
/// system.
|
||||
pub fn run(&self, conn: TcpStream) -> Box<Future<Item = (), Error = Error>> {
|
||||
|
||||
let addr = self.info.addr;
|
||||
let state = self.state.clone();
|
||||
let adapter = Arc::new(self.tracking_adapter.clone());
|
||||
|
@ -204,7 +200,7 @@ impl TrackingAdapter {
|
|||
fn has(&self, hash: Hash) -> bool {
|
||||
let known = self.known.read().unwrap();
|
||||
// may become too slow, an ordered set (by timestamp for eviction) may
|
||||
// end up being a better choice
|
||||
// end up being a better choice
|
||||
known.contains(&hash)
|
||||
}
|
||||
|
||||
|
|
|
@ -34,7 +34,9 @@ pub struct ProtocolV1 {
|
|||
|
||||
impl ProtocolV1 {
|
||||
pub fn new() -> ProtocolV1 {
|
||||
ProtocolV1 { conn: OneTime::new() }
|
||||
ProtocolV1 {
|
||||
conn: OneTime::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -45,7 +47,6 @@ impl Protocol for ProtocolV1 {
|
|||
conn: TcpStream,
|
||||
adapter: Arc<NetAdapter>,
|
||||
) -> Box<Future<Item = (), Error = Error>> {
|
||||
|
||||
let (conn, listener) = TimeoutConnection::listen(conn, move |sender, header, data| {
|
||||
let adapt = adapter.as_ref();
|
||||
handle_payload(adapt, sender, header, data)
|
||||
|
@ -94,7 +95,9 @@ impl Protocol for ProtocolV1 {
|
|||
self.send_request(
|
||||
Type::GetPeerAddrs,
|
||||
Type::PeerAddrs,
|
||||
&GetPeerAddrs { capabilities: capab },
|
||||
&GetPeerAddrs {
|
||||
capabilities: capab,
|
||||
},
|
||||
None,
|
||||
)
|
||||
}
|
||||
|
|
|
@ -174,7 +174,7 @@ impl Server {
|
|||
let total_diff = adapter.clone().total_difficulty();
|
||||
|
||||
// connect to the peer and add it to the server map, wiring it a timeout for
|
||||
// the handhake
|
||||
// the handhake
|
||||
let connect = Peer::connect(
|
||||
socket,
|
||||
capab,
|
||||
|
@ -330,12 +330,11 @@ fn with_timeout<T: 'static>(
|
|||
h: &reactor::Handle,
|
||||
) -> Box<Future<Item = T, Error = Error>> {
|
||||
let timeout = reactor::Timeout::new(Duration::new(5, 0), h).unwrap();
|
||||
let timed = fut.select(timeout.map(Err).from_err()).then(
|
||||
|res| match res {
|
||||
let timed = fut.select(timeout.map(Err).from_err())
|
||||
.then(|res| match res {
|
||||
Ok((Ok(inner), _timeout)) => Ok(inner),
|
||||
Ok((_, _accept)) => Err(Error::Timeout),
|
||||
Err((e, _other)) => Err(e),
|
||||
},
|
||||
);
|
||||
});
|
||||
Box::new(timed)
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@
|
|||
use std::net::SocketAddr;
|
||||
use num::FromPrimitive;
|
||||
|
||||
use core::ser::{self, Readable, Writeable, Reader, Writer};
|
||||
use grin_store::{self, Error, to_key, option_to_not_found};
|
||||
use core::ser::{self, Readable, Reader, Writeable, Writer};
|
||||
use grin_store::{self, option_to_not_found, to_key, Error};
|
||||
use msg::SockAddr;
|
||||
use types::Capabilities;
|
||||
|
||||
|
@ -30,9 +30,9 @@ const PEER_PREFIX: u8 = 'p' as u8;
|
|||
enum_from_primitive! {
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum State {
|
||||
Healthy,
|
||||
Banned,
|
||||
Defunct,
|
||||
Healthy,
|
||||
Banned,
|
||||
Defunct,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -68,18 +68,14 @@ impl Readable for PeerData {
|
|||
let addr = SockAddr::read(reader)?;
|
||||
let (capab, ua, fl) = ser_multiread!(reader, read_u32, read_vec, read_u8);
|
||||
let user_agent = String::from_utf8(ua).map_err(|_| ser::Error::CorruptedData)?;
|
||||
let capabilities = Capabilities::from_bits(capab).ok_or(
|
||||
ser::Error::CorruptedData,
|
||||
)?;
|
||||
let capabilities = Capabilities::from_bits(capab).ok_or(ser::Error::CorruptedData)?;
|
||||
match State::from_u8(fl) {
|
||||
Some(flags) => {
|
||||
Ok(PeerData {
|
||||
addr: addr.0,
|
||||
capabilities: capabilities,
|
||||
user_agent: user_agent,
|
||||
flags: flags,
|
||||
})
|
||||
}
|
||||
Some(flags) => Ok(PeerData {
|
||||
addr: addr.0,
|
||||
capabilities: capabilities,
|
||||
user_agent: user_agent,
|
||||
flags: flags,
|
||||
}),
|
||||
None => Err(ser::Error::CorruptedData),
|
||||
}
|
||||
}
|
||||
|
@ -109,22 +105,18 @@ impl PeerStore {
|
|||
}
|
||||
|
||||
pub fn exists_peer(&self, peer_addr: SocketAddr) -> Result<bool, Error> {
|
||||
self.db.exists(
|
||||
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..],
|
||||
)
|
||||
self.db
|
||||
.exists(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..])
|
||||
}
|
||||
|
||||
pub fn delete_peer(&self, peer_addr: SocketAddr) -> Result<(), Error> {
|
||||
self.db.delete(
|
||||
&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..],
|
||||
)
|
||||
self.db
|
||||
.delete(&to_key(PEER_PREFIX, &mut format!("{}", peer_addr).into_bytes())[..])
|
||||
}
|
||||
|
||||
pub fn find_peers(&self, state: State, cap: Capabilities, count: usize) -> Vec<PeerData> {
|
||||
let peers_iter = self.db.iter::<PeerData>(&to_key(
|
||||
PEER_PREFIX,
|
||||
&mut "".to_string().into_bytes(),
|
||||
));
|
||||
let peers_iter = self.db
|
||||
.iter::<PeerData>(&to_key(PEER_PREFIX, &mut "".to_string().into_bytes()));
|
||||
let mut peers = Vec::with_capacity(count);
|
||||
for p in peers_iter {
|
||||
if p.flags == state && p.capabilities.contains(cap) {
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
use std::convert::From;
|
||||
use std::io;
|
||||
use std::net::{SocketAddr, IpAddr};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures::Future;
|
||||
|
@ -85,17 +85,17 @@ bitflags! {
|
|||
/// Options for what type of interaction a peer supports
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub flags Capabilities: u32 {
|
||||
/// We don't know (yet) what the peer can do.
|
||||
const UNKNOWN = 0b00000000,
|
||||
/// Full archival node, has the whole history without any pruning.
|
||||
const FULL_HIST = 0b00000001,
|
||||
/// Can provide block headers and the UTXO set for some recent-enough
|
||||
/// height.
|
||||
const UTXO_HIST = 0b00000010,
|
||||
/// Can provide a list of healthy peers
|
||||
const PEER_LIST = 0b00000100,
|
||||
/// We don't know (yet) what the peer can do.
|
||||
const UNKNOWN = 0b00000000,
|
||||
/// Full archival node, has the whole history without any pruning.
|
||||
const FULL_HIST = 0b00000001,
|
||||
/// Can provide block headers and the UTXO set for some recent-enough
|
||||
/// height.
|
||||
const UTXO_HIST = 0b00000010,
|
||||
/// Can provide a list of healthy peers
|
||||
const PEER_LIST = 0b00000100,
|
||||
|
||||
const FULL_NODE = FULL_HIST.bits | UTXO_HIST.bits | PEER_LIST.bits,
|
||||
const FULL_NODE = FULL_HIST.bits | UTXO_HIST.bits | PEER_LIST.bits,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,9 +12,9 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
extern crate futures;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_p2p as p2p;
|
||||
extern crate futures;
|
||||
extern crate tokio_core;
|
||||
|
||||
use std::net::SocketAddr;
|
||||
|
@ -32,7 +32,6 @@ use p2p::Peer;
|
|||
// followed by a ping/pong exchange to make sure the connection is live.
|
||||
#[test]
|
||||
fn peer_handshake() {
|
||||
|
||||
let mut evtlp = Core::new().unwrap();
|
||||
let handle = evtlp.handle();
|
||||
let p2p_conf = p2p::P2PConfig::default();
|
||||
|
@ -89,5 +88,4 @@ fn peer_handshake() {
|
|||
);
|
||||
|
||||
evtlp.run(run_server).unwrap();
|
||||
|
||||
}
|
||||
|
|
|
@ -49,7 +49,9 @@ pub struct DummyUtxoSet {
|
|||
#[allow(dead_code)]
|
||||
impl DummyUtxoSet {
|
||||
pub fn empty() -> DummyUtxoSet {
|
||||
DummyUtxoSet { outputs: HashMap::new() }
|
||||
DummyUtxoSet {
|
||||
outputs: HashMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn root(&self) -> hash::Hash {
|
||||
hash::ZERO_HASH
|
||||
|
@ -62,7 +64,9 @@ impl DummyUtxoSet {
|
|||
for output in &b.outputs {
|
||||
new_hashmap.insert(output.commitment(), output.clone());
|
||||
}
|
||||
DummyUtxoSet { outputs: new_hashmap }
|
||||
DummyUtxoSet {
|
||||
outputs: new_hashmap,
|
||||
}
|
||||
}
|
||||
pub fn with_block(&mut self, b: &block::Block) {
|
||||
for input in &b.inputs {
|
||||
|
@ -73,14 +77,18 @@ impl DummyUtxoSet {
|
|||
}
|
||||
}
|
||||
pub fn rewind(&self, _: &block::Block) -> DummyUtxoSet {
|
||||
DummyUtxoSet { outputs: HashMap::new() }
|
||||
DummyUtxoSet {
|
||||
outputs: HashMap::new(),
|
||||
}
|
||||
}
|
||||
pub fn get_output(&self, output_ref: &Commitment) -> Option<&transaction::Output> {
|
||||
self.outputs.get(output_ref)
|
||||
}
|
||||
|
||||
fn clone(&self) -> DummyUtxoSet {
|
||||
DummyUtxoSet { outputs: self.outputs.clone() }
|
||||
DummyUtxoSet {
|
||||
outputs: self.outputs.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
// only for testing: add an output to the map
|
||||
|
@ -108,8 +116,12 @@ pub struct DummyChainImpl {
|
|||
impl DummyChainImpl {
|
||||
pub fn new() -> DummyChainImpl {
|
||||
DummyChainImpl {
|
||||
utxo: RwLock::new(DummyUtxoSet { outputs: HashMap::new() }),
|
||||
block_headers: RwLock::new(DummyBlockHeaderIndex { block_headers: HashMap::new() }),
|
||||
utxo: RwLock::new(DummyUtxoSet {
|
||||
outputs: HashMap::new(),
|
||||
}),
|
||||
block_headers: RwLock::new(DummyBlockHeaderIndex {
|
||||
block_headers: HashMap::new(),
|
||||
}),
|
||||
head_header: RwLock::new(vec![]),
|
||||
}
|
||||
}
|
||||
|
@ -131,7 +143,8 @@ impl BlockChain for DummyChainImpl {
|
|||
match self.block_headers
|
||||
.read()
|
||||
.unwrap()
|
||||
.get_block_header_by_output_commit(*commit) {
|
||||
.get_block_header_by_output_commit(*commit)
|
||||
{
|
||||
Ok(h) => Ok(h.clone()),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
|
@ -159,10 +172,10 @@ impl DummyChain for DummyChainImpl {
|
|||
commitment: Commitment,
|
||||
block_header: &block::BlockHeader,
|
||||
) {
|
||||
self.block_headers.write().unwrap().insert(
|
||||
commitment,
|
||||
block_header.clone(),
|
||||
);
|
||||
self.block_headers
|
||||
.write()
|
||||
.unwrap()
|
||||
.insert(commitment, block_header.clone());
|
||||
}
|
||||
fn store_head_header(&self, block_header: &block::BlockHeader) {
|
||||
let mut h = self.head_header.write().unwrap();
|
||||
|
|
|
@ -165,35 +165,36 @@ impl DirectedGraph {
|
|||
|
||||
/// Remove a vertex by its hash
|
||||
pub fn remove_vertex(&mut self, tx_hash: core::hash::Hash) -> Option<PoolEntry> {
|
||||
match self.roots.iter().position(
|
||||
|x| x.transaction_hash == tx_hash,
|
||||
) {
|
||||
match self.roots
|
||||
.iter()
|
||||
.position(|x| x.transaction_hash == tx_hash)
|
||||
{
|
||||
Some(i) => Some(self.roots.swap_remove(i)),
|
||||
None => {
|
||||
match self.vertices.iter().position(
|
||||
|x| x.transaction_hash == tx_hash,
|
||||
) {
|
||||
Some(i) => Some(self.vertices.swap_remove(i)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
None => match self.vertices
|
||||
.iter()
|
||||
.position(|x| x.transaction_hash == tx_hash)
|
||||
{
|
||||
Some(i) => Some(self.vertices.swap_remove(i)),
|
||||
None => None,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// Promote any non-root vertices to roots based on current edges.
|
||||
/// For a given tx, if there are no edges with that tx as destination then it is a root.
|
||||
/// For a given tx, if there are no edges with that tx as destination then
|
||||
/// it is a root.
|
||||
pub fn update_roots(&mut self) {
|
||||
let mut new_vertices: Vec<PoolEntry> = vec![];
|
||||
|
||||
// first find the set of all destinations from the edges in the graph
|
||||
// a root is a vertex that is not a destination of any edge
|
||||
// a root is a vertex that is not a destination of any edge
|
||||
let destinations = self.edges
|
||||
.values()
|
||||
.filter_map(|edge| edge.destination)
|
||||
.collect::<HashSet<_>>();
|
||||
|
||||
// now iterate over the current non-root vertices
|
||||
// and check if it is now a root based on the set of edge destinations
|
||||
// and check if it is now a root based on the set of edge destinations
|
||||
for x in &self.vertices {
|
||||
if destinations.contains(&x.transaction_hash) {
|
||||
new_vertices.push(x.clone());
|
||||
|
@ -272,7 +273,7 @@ impl DirectedGraph {
|
|||
.map(|x| x.transaction_hash)
|
||||
.collect::<Vec<_>>();
|
||||
hashes.extend(&non_root_hashes);
|
||||
return hashes
|
||||
return hashes;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -313,7 +314,9 @@ mod tests {
|
|||
features: core::transaction::DEFAULT_OUTPUT,
|
||||
commit: output_commit,
|
||||
switch_commit_hash: switch_commit_hash,
|
||||
proof: keychain.range_proof(100, &key_id1, output_commit, msg).unwrap(),
|
||||
proof: keychain
|
||||
.range_proof(100, &key_id1, output_commit, msg)
|
||||
.unwrap(),
|
||||
},
|
||||
];
|
||||
let test_transaction = core::transaction::Transaction::new(inputs, outputs, 5, 0);
|
||||
|
|
|
@ -26,15 +26,15 @@ mod types;
|
|||
mod blockchain;
|
||||
mod pool;
|
||||
|
||||
extern crate time;
|
||||
extern crate rand;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate blake2_rfc as blake2;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_util as util;
|
||||
extern crate rand;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate time;
|
||||
|
||||
pub use pool::TransactionPool;
|
||||
pub use types::{BlockChain, PoolAdapter, TxSource, PoolError, PoolConfig};
|
||||
pub use types::{BlockChain, PoolAdapter, PoolConfig, PoolError, TxSource};
|
||||
|
|
415
pool/src/pool.rs
415
pool/src/pool.rs
|
@ -71,12 +71,14 @@ where
|
|||
/// be accounted for separately, if relevant.
|
||||
pub fn search_for_best_output(&self, output_commitment: &Commitment) -> Parent {
|
||||
// The current best unspent set is:
|
||||
// Pool unspent + (blockchain unspent - pool->blockchain spent)
|
||||
// Pool unspents are unconditional so we check those first
|
||||
// Pool unspent + (blockchain unspent - pool->blockchain spent)
|
||||
// Pool unspents are unconditional so we check those first
|
||||
self.pool
|
||||
.get_available_output(output_commitment)
|
||||
.map(|x| {
|
||||
Parent::PoolTransaction { tx_ref: x.source_hash().unwrap() }
|
||||
Parent::PoolTransaction {
|
||||
tx_ref: x.source_hash().unwrap(),
|
||||
}
|
||||
})
|
||||
.or(self.search_blockchain_unspents(output_commitment))
|
||||
.or(self.search_pool_spents(output_commitment))
|
||||
|
@ -84,25 +86,31 @@ where
|
|||
}
|
||||
|
||||
// search_blockchain_unspents searches the current view of the blockchain
|
||||
// unspent set, represented by blockchain unspents - pool spents, for an
|
||||
// output designated by output_commitment.
|
||||
// unspent set, represented by blockchain unspents - pool spents, for an
|
||||
// output designated by output_commitment.
|
||||
fn search_blockchain_unspents(&self, output_commitment: &Commitment) -> Option<Parent> {
|
||||
self.blockchain.get_unspent(output_commitment).ok().map(
|
||||
|output| match self.pool.get_blockchain_spent(output_commitment) {
|
||||
Some(x) => Parent::AlreadySpent { other_tx: x.destination_hash().unwrap() },
|
||||
None => Parent::BlockTransaction { output },
|
||||
},
|
||||
)
|
||||
self.blockchain
|
||||
.get_unspent(output_commitment)
|
||||
.ok()
|
||||
.map(|output| {
|
||||
match self.pool.get_blockchain_spent(output_commitment) {
|
||||
Some(x) => Parent::AlreadySpent {
|
||||
other_tx: x.destination_hash().unwrap(),
|
||||
},
|
||||
None => Parent::BlockTransaction { output },
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// search_pool_spents is the second half of pool input detection, after the
|
||||
// available_outputs have been checked. This returns either a
|
||||
// Parent::AlreadySpent or None.
|
||||
// available_outputs have been checked. This returns either a
|
||||
// Parent::AlreadySpent or None.
|
||||
fn search_pool_spents(&self, output_commitment: &Commitment) -> Option<Parent> {
|
||||
self.pool.get_internal_spent(output_commitment).map(|x| {
|
||||
Parent::AlreadySpent { other_tx: x.destination_hash().unwrap() }
|
||||
Parent::AlreadySpent {
|
||||
other_tx: x.destination_hash().unwrap(),
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
/// Get the number of transactions in the pool
|
||||
|
@ -131,7 +139,6 @@ where
|
|||
_: TxSource,
|
||||
tx: transaction::Transaction,
|
||||
) -> Result<(), PoolError> {
|
||||
|
||||
// Do we have the capacity to accept this transaction?
|
||||
if let Err(e) = self.is_acceptable(&tx) {
|
||||
return Err(e);
|
||||
|
@ -142,14 +149,14 @@ where
|
|||
tx.validate(&secp).map_err(|_e| PoolError::Invalid)?;
|
||||
|
||||
// The first check involves ensuring that an identical transaction is
|
||||
// not already in the pool's transaction set.
|
||||
// A non-authoritative similar check should be performed under the
|
||||
// pool's read lock before we get to this point, which would catch the
|
||||
// majority of duplicate cases. The race condition is caught here.
|
||||
// TODO: When the transaction identifier is finalized, the assumptions
|
||||
// here may change depending on the exact coverage of the identifier.
|
||||
// The current tx.hash() method, for example, does not cover changes
|
||||
// to fees or other elements of the signature preimage.
|
||||
// not already in the pool's transaction set.
|
||||
// A non-authoritative similar check should be performed under the
|
||||
// pool's read lock before we get to this point, which would catch the
|
||||
// majority of duplicate cases. The race condition is caught here.
|
||||
// TODO: When the transaction identifier is finalized, the assumptions
|
||||
// here may change depending on the exact coverage of the identifier.
|
||||
// The current tx.hash() method, for example, does not cover changes
|
||||
// to fees or other elements of the signature preimage.
|
||||
let tx_hash = graph::transaction_identifier(&tx);
|
||||
if self.transactions.contains_key(&tx_hash) {
|
||||
return Err(PoolError::AlreadyInPool);
|
||||
|
@ -163,7 +170,7 @@ where
|
|||
}
|
||||
|
||||
// The next issue is to identify all unspent outputs that
|
||||
// this transaction will consume and make sure they exist in the set.
|
||||
// this transaction will consume and make sure they exist in the set.
|
||||
let mut pool_refs: Vec<graph::Edge> = Vec::new();
|
||||
let mut orphan_refs: Vec<graph::Edge> = Vec::new();
|
||||
let mut blockchain_refs: Vec<graph::Edge> = Vec::new();
|
||||
|
@ -172,9 +179,9 @@ where
|
|||
let base = graph::Edge::new(None, Some(tx_hash), input.commitment());
|
||||
|
||||
// Note that search_for_best_output does not examine orphans, by
|
||||
// design. If an incoming transaction consumes pool outputs already
|
||||
// spent by the orphans set, this does not preclude its inclusion
|
||||
// into the pool.
|
||||
// design. If an incoming transaction consumes pool outputs already
|
||||
// spent by the orphans set, this does not preclude its inclusion
|
||||
// into the pool.
|
||||
match self.search_for_best_output(&input.commitment()) {
|
||||
Parent::PoolTransaction { tx_ref: x } => pool_refs.push(base.with_source(Some(x))),
|
||||
Parent::BlockTransaction { output } => {
|
||||
|
@ -207,24 +214,24 @@ where
|
|||
let is_orphan = orphan_refs.len() > 0;
|
||||
|
||||
// Next we examine the outputs this transaction creates and ensure
|
||||
// that they do not already exist.
|
||||
// I believe its worth preventing duplicate outputs from being
|
||||
// accepted, even though it is possible for them to be mined
|
||||
// with strict ordering. In the future, if desirable, this could
|
||||
// be node policy config or more intelligent.
|
||||
// that they do not already exist.
|
||||
// I believe its worth preventing duplicate outputs from being
|
||||
// accepted, even though it is possible for them to be mined
|
||||
// with strict ordering. In the future, if desirable, this could
|
||||
// be node policy config or more intelligent.
|
||||
for output in &tx.outputs {
|
||||
self.check_duplicate_outputs(output, is_orphan)?
|
||||
}
|
||||
|
||||
// Assertion: we have exactly as many resolved spending references as
|
||||
// inputs to the transaction.
|
||||
// inputs to the transaction.
|
||||
assert_eq!(
|
||||
tx.inputs.len(),
|
||||
blockchain_refs.len() + pool_refs.len() + orphan_refs.len()
|
||||
);
|
||||
|
||||
// At this point we know if we're spending all known unspents and not
|
||||
// creating any duplicate unspents.
|
||||
// creating any duplicate unspents.
|
||||
let pool_entry = graph::PoolEntry::new(&tx);
|
||||
let new_unspents = tx.outputs
|
||||
.iter()
|
||||
|
@ -233,33 +240,28 @@ where
|
|||
|
||||
if !is_orphan {
|
||||
// In the non-orphan (pool) case, we've ensured that every input
|
||||
// maps one-to-one with an unspent (available) output, and each
|
||||
// output is unique. No further checks are necessary.
|
||||
self.pool.add_pool_transaction(
|
||||
pool_entry,
|
||||
blockchain_refs,
|
||||
pool_refs,
|
||||
new_unspents,
|
||||
);
|
||||
// maps one-to-one with an unspent (available) output, and each
|
||||
// output is unique. No further checks are necessary.
|
||||
self.pool
|
||||
.add_pool_transaction(pool_entry, blockchain_refs, pool_refs, new_unspents);
|
||||
|
||||
self.reconcile_orphans().unwrap();
|
||||
self.adapter.tx_accepted(&tx);
|
||||
self.transactions.insert(tx_hash, Box::new(tx));
|
||||
Ok(())
|
||||
|
||||
} else {
|
||||
// At this point, we're pretty sure the transaction is an orphan,
|
||||
// but we have to explicitly check for double spends against the
|
||||
// orphans set; we do not check this as part of the connectivity
|
||||
// checking above.
|
||||
// First, any references resolved to the pool need to be compared
|
||||
// against active orphan pool_connections.
|
||||
// Note that pool_connections here also does double duty to
|
||||
// account for blockchain connections.
|
||||
// but we have to explicitly check for double spends against the
|
||||
// orphans set; we do not check this as part of the connectivity
|
||||
// checking above.
|
||||
// First, any references resolved to the pool need to be compared
|
||||
// against active orphan pool_connections.
|
||||
// Note that pool_connections here also does double duty to
|
||||
// account for blockchain connections.
|
||||
for pool_ref in pool_refs.iter().chain(blockchain_refs.iter()) {
|
||||
match self.orphans.get_external_spent_output(
|
||||
&pool_ref.output_commitment(),
|
||||
) {
|
||||
match self.orphans
|
||||
.get_external_spent_output(&pool_ref.output_commitment())
|
||||
{
|
||||
// Should the below err be subtyped to orphans somehow?
|
||||
Some(x) => {
|
||||
return Err(PoolError::DoubleSpend {
|
||||
|
@ -272,9 +274,9 @@ where
|
|||
}
|
||||
|
||||
// Next, we have to consider the possibility of double spends
|
||||
// within the orphans set.
|
||||
// We also have to distinguish now between missing and internal
|
||||
// references.
|
||||
// within the orphans set.
|
||||
// We also have to distinguish now between missing and internal
|
||||
// references.
|
||||
let missing_refs = self.resolve_orphan_refs(tx_hash, &mut orphan_refs)?;
|
||||
|
||||
// We have passed all failure modes.
|
||||
|
@ -289,7 +291,6 @@ where
|
|||
|
||||
Err(PoolError::OrphanTransaction)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// Check the output for a conflict with an existing output.
|
||||
|
@ -303,8 +304,8 @@ where
|
|||
is_orphan: bool,
|
||||
) -> Result<(), PoolError> {
|
||||
// Checking against current blockchain unspent outputs
|
||||
// We want outputs even if they're spent by pool txs, so we ignore
|
||||
// consumed_blockchain_outputs
|
||||
// We want outputs even if they're spent by pool txs, so we ignore
|
||||
// consumed_blockchain_outputs
|
||||
if self.blockchain.get_unspent(&output.commitment()).is_ok() {
|
||||
return Err(PoolError::DuplicateOutput {
|
||||
other_tx: None,
|
||||
|
@ -328,7 +329,7 @@ where
|
|||
|
||||
|
||||
// If the transaction might go into orphans, perform the same
|
||||
// checks as above but against the orphan set instead.
|
||||
// checks as above but against the orphan set instead.
|
||||
if is_orphan {
|
||||
// Checking against orphan outputs
|
||||
match self.orphans.find_output(&output.commitment()) {
|
||||
|
@ -342,7 +343,7 @@ where
|
|||
None => {}
|
||||
};
|
||||
// No need to check pool connections since those are covered
|
||||
// by pool unspents and blockchain connections.
|
||||
// by pool unspents and blockchain connections.
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
@ -380,9 +381,9 @@ where
|
|||
}
|
||||
None => {
|
||||
// The reference does not resolve to anything.
|
||||
// Make sure this missing_output has not already
|
||||
// been claimed, then add this entry to
|
||||
// missing_refs
|
||||
// Make sure this missing_output has not already
|
||||
// been claimed, then add this entry to
|
||||
// missing_refs
|
||||
match self.orphans.get_unknown_output(&orphan_commitment) {
|
||||
Some(x) => {
|
||||
return Err(PoolError::DoubleSpend {
|
||||
|
@ -430,34 +431,34 @@ where
|
|||
block: &block::Block,
|
||||
) -> Result<Vec<Box<transaction::Transaction>>, PoolError> {
|
||||
// If this pool has been kept in sync correctly, serializing all
|
||||
// updates, then the inputs must consume only members of the blockchain
|
||||
// utxo set.
|
||||
// If the block has been resolved properly and reduced fully to its
|
||||
// canonical form, no inputs may consume outputs generated by previous
|
||||
// transactions in the block; they would be cut-through. TODO: If this
|
||||
// is not consensus enforced, then logic must be added here to account
|
||||
// for that.
|
||||
// Based on this, we operate under the following algorithm:
|
||||
// For each block input, we examine the pool transaction, if any, that
|
||||
// consumes the same blockchain output.
|
||||
// If one exists, we mark the transaction and then examine its
|
||||
// children. Recursively, we mark each child until a child is
|
||||
// fully satisfied by outputs in the updated utxo view (after
|
||||
// reconciliation of the block), or there are no more children.
|
||||
//
|
||||
// Additionally, to protect our invariant dictating no duplicate
|
||||
// outputs, each output generated by the new utxo set is checked
|
||||
// against outputs generated by the pool and the corresponding
|
||||
// transactions are also marked.
|
||||
//
|
||||
// After marking concludes, sweeping begins. In order, the marked
|
||||
// transactions are removed, the vertexes corresponding to the
|
||||
// transactions are removed, all the marked transactions' outputs are
|
||||
// removed, and all remaining non-blockchain inputs are returned to the
|
||||
// unspent_outputs set.
|
||||
//
|
||||
// After the pool has been successfully processed, an orphans
|
||||
// reconciliation job is triggered.
|
||||
// updates, then the inputs must consume only members of the blockchain
|
||||
// utxo set.
|
||||
// If the block has been resolved properly and reduced fully to its
|
||||
// canonical form, no inputs may consume outputs generated by previous
|
||||
// transactions in the block; they would be cut-through. TODO: If this
|
||||
// is not consensus enforced, then logic must be added here to account
|
||||
// for that.
|
||||
// Based on this, we operate under the following algorithm:
|
||||
// For each block input, we examine the pool transaction, if any, that
|
||||
// consumes the same blockchain output.
|
||||
// If one exists, we mark the transaction and then examine its
|
||||
// children. Recursively, we mark each child until a child is
|
||||
// fully satisfied by outputs in the updated utxo view (after
|
||||
// reconciliation of the block), or there are no more children.
|
||||
//
|
||||
// Additionally, to protect our invariant dictating no duplicate
|
||||
// outputs, each output generated by the new utxo set is checked
|
||||
// against outputs generated by the pool and the corresponding
|
||||
// transactions are also marked.
|
||||
//
|
||||
// After marking concludes, sweeping begins. In order, the marked
|
||||
// transactions are removed, the vertexes corresponding to the
|
||||
// transactions are removed, all the marked transactions' outputs are
|
||||
// removed, and all remaining non-blockchain inputs are returned to the
|
||||
// unspent_outputs set.
|
||||
//
|
||||
// After the pool has been successfully processed, an orphans
|
||||
// reconciliation job is triggered.
|
||||
let mut marked_transactions: HashSet<hash::Hash> = HashSet::new();
|
||||
|
||||
{
|
||||
|
@ -469,20 +470,21 @@ where
|
|||
.filter_map(|x| x.destination_hash())
|
||||
.collect();
|
||||
|
||||
// find all outputs that conflict - potential for duplicates so use a HashSet here
|
||||
// find all outputs that conflict - potential for duplicates so use a HashSet
|
||||
// here
|
||||
let conflicting_outputs: HashSet<hash::Hash> = block
|
||||
.outputs
|
||||
.iter()
|
||||
.filter_map(|x: &transaction::Output| {
|
||||
self.pool.get_internal_spent_output(&x.commitment()).or(
|
||||
self.pool.get_available_output(&x.commitment()),
|
||||
)
|
||||
self.pool
|
||||
.get_internal_spent_output(&x.commitment())
|
||||
.or(self.pool.get_available_output(&x.commitment()))
|
||||
})
|
||||
.filter_map(|x| x.source_hash())
|
||||
.collect();
|
||||
|
||||
// now iterate over all conflicting hashes from both txs and outputs
|
||||
// we can just use the union of the two sets here to remove duplicates
|
||||
// we can just use the union of the two sets here to remove duplicates
|
||||
for &txh in conflicting_txs.union(&conflicting_outputs) {
|
||||
self.mark_transaction(txh, &mut marked_transactions);
|
||||
}
|
||||
|
@ -504,11 +506,7 @@ where
|
|||
///
|
||||
/// Marked transactions are added to the mutable marked_txs HashMap which
|
||||
/// is supplied by the calling function.
|
||||
fn mark_transaction(
|
||||
&self,
|
||||
conflicting_tx: hash::Hash,
|
||||
marked_txs: &mut HashSet<hash::Hash>,
|
||||
) {
|
||||
fn mark_transaction(&self, conflicting_tx: hash::Hash, marked_txs: &mut HashSet<hash::Hash>) {
|
||||
// we can stop recursively visiting txs if we have already seen this one
|
||||
if marked_txs.contains(&conflicting_tx) {
|
||||
return;
|
||||
|
@ -520,11 +518,9 @@ where
|
|||
|
||||
for output in &tx_ref.unwrap().outputs {
|
||||
match self.pool.get_internal_spent_output(&output.commitment()) {
|
||||
Some(x) => {
|
||||
if self.blockchain.get_unspent(&x.output_commitment()).is_err() {
|
||||
self.mark_transaction(x.destination_hash().unwrap(), marked_txs);
|
||||
}
|
||||
}
|
||||
Some(x) => if self.blockchain.get_unspent(&x.output_commitment()).is_err() {
|
||||
self.mark_transaction(x.destination_hash().unwrap(), marked_txs);
|
||||
},
|
||||
None => {}
|
||||
};
|
||||
}
|
||||
|
@ -544,22 +540,19 @@ where
|
|||
&mut self,
|
||||
marked_transactions: HashSet<hash::Hash>,
|
||||
) -> Vec<Box<transaction::Transaction>> {
|
||||
|
||||
let mut removed_txs = Vec::new();
|
||||
|
||||
for tx_hash in &marked_transactions {
|
||||
let removed_tx = self.transactions.remove(&tx_hash).unwrap();
|
||||
|
||||
self.pool.remove_pool_transaction(
|
||||
&removed_tx,
|
||||
&marked_transactions,
|
||||
);
|
||||
self.pool
|
||||
.remove_pool_transaction(&removed_tx, &marked_transactions);
|
||||
|
||||
removed_txs.push(removed_tx);
|
||||
}
|
||||
|
||||
// final step is to update the pool to reflect the new set of roots
|
||||
// a tx that was non-root may now be root based on the txs removed
|
||||
// a tx that was non-root may now be root based on the txs removed
|
||||
self.pool.update_roots();
|
||||
|
||||
removed_txs
|
||||
|
@ -592,9 +585,9 @@ where
|
|||
}
|
||||
|
||||
// for a basic transaction (1 input, 2 outputs) -
|
||||
// (-1 * 1) + (4 * 2) + 1 = 8
|
||||
// 8 * 10 = 80
|
||||
//
|
||||
// (-1 * 1) + (4 * 2) + 1 = 8
|
||||
// 8 * 10 = 80
|
||||
//
|
||||
if self.config.accept_fee_base > 0 {
|
||||
let mut tx_weight = -1 * (tx.inputs.len() as i32) + (4 * tx.outputs.len() as i32) + 1;
|
||||
if tx_weight < 1 {
|
||||
|
@ -660,7 +653,7 @@ mod tests {
|
|||
dummy_chain.update_utxo_set(new_utxo);
|
||||
|
||||
// To mirror how this construction is intended to be used, the pool
|
||||
// is placed inside a RwLock.
|
||||
// is placed inside a RwLock.
|
||||
let pool = RwLock::new(test_setup(&Arc::new(dummy_chain)));
|
||||
|
||||
// Take the write lock and add a pool entry
|
||||
|
@ -683,11 +676,10 @@ mod tests {
|
|||
child_result.err().unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Now take the read lock and use a few exposed methods to check
|
||||
// consistency
|
||||
// consistency
|
||||
{
|
||||
let read_pool = pool.read().unwrap();
|
||||
assert_eq!(read_pool.total_size(), 2);
|
||||
|
@ -721,7 +713,7 @@ mod tests {
|
|||
assert_eq!(write_pool.total_size(), 0);
|
||||
|
||||
// First expected failure: duplicate output
|
||||
let duplicate_tx = test_transaction(vec![5,6], vec![7]);
|
||||
let duplicate_tx = test_transaction(vec![5, 6], vec![7]);
|
||||
|
||||
match write_pool.add_to_memory_pool(test_source(), duplicate_tx) {
|
||||
Ok(_) => panic!("Got OK from add_to_memory_pool when dup was expected"),
|
||||
|
@ -731,23 +723,22 @@ mod tests {
|
|||
other_tx,
|
||||
in_chain,
|
||||
output,
|
||||
} => {
|
||||
if other_tx.is_some() || !in_chain ||
|
||||
output != test_output(7).commitment()
|
||||
{
|
||||
panic!("Unexpected parameter in DuplicateOutput: {:?}", x);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
panic!("Unexpected error when adding duplicate output transaction: {:?}", x)
|
||||
}
|
||||
} => if other_tx.is_some() || !in_chain
|
||||
|| output != test_output(7).commitment()
|
||||
{
|
||||
panic!("Unexpected parameter in DuplicateOutput: {:?}", x);
|
||||
},
|
||||
_ => panic!(
|
||||
"Unexpected error when adding duplicate output transaction: {:?}",
|
||||
x
|
||||
),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// To test DoubleSpend and AlreadyInPool conditions, we need to add
|
||||
// a valid transaction.
|
||||
let valid_transaction = test_transaction(vec![5,6], vec![9]);
|
||||
// a valid transaction.
|
||||
let valid_transaction = test_transaction(vec![5, 6], vec![9]);
|
||||
|
||||
match write_pool.add_to_memory_pool(test_source(), valid_transaction) {
|
||||
Ok(_) => {}
|
||||
|
@ -755,7 +746,7 @@ mod tests {
|
|||
};
|
||||
|
||||
// Now, test a DoubleSpend by consuming the same blockchain unspent
|
||||
// as valid_transaction:
|
||||
// as valid_transaction:
|
||||
let double_spend_transaction = test_transaction(vec![6], vec![2]);
|
||||
|
||||
match write_pool.add_to_memory_pool(test_source(), double_spend_transaction) {
|
||||
|
@ -765,19 +756,18 @@ mod tests {
|
|||
PoolError::DoubleSpend {
|
||||
other_tx: _,
|
||||
spent_output,
|
||||
} => {
|
||||
if spent_output != test_output(6).commitment() {
|
||||
panic!("Unexpected parameter in DoubleSpend: {:?}", x);
|
||||
}
|
||||
}
|
||||
_ => {
|
||||
panic!("Unexpected error when adding double spend transaction: {:?}", x)
|
||||
}
|
||||
} => if spent_output != test_output(6).commitment() {
|
||||
panic!("Unexpected parameter in DoubleSpend: {:?}", x);
|
||||
},
|
||||
_ => panic!(
|
||||
"Unexpected error when adding double spend transaction: {:?}",
|
||||
x
|
||||
),
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
let already_in_pool = test_transaction(vec![5,6], vec![9]);
|
||||
let already_in_pool = test_transaction(vec![5, 6], vec![9]);
|
||||
|
||||
match write_pool.add_to_memory_pool(test_source(), already_in_pool) {
|
||||
Ok(_) => panic!("Expected error when adding already in pool, got Ok"),
|
||||
|
@ -792,10 +782,12 @@ mod tests {
|
|||
assert_eq!(write_pool.total_size(), 1);
|
||||
|
||||
// now attempt to add a timelocked tx to the pool
|
||||
// should fail as invalid based on current height
|
||||
// should fail as invalid based on current height
|
||||
let timelocked_tx_1 = timelocked_transaction(vec![9], vec![5], 10);
|
||||
match write_pool.add_to_memory_pool(test_source(), timelocked_tx_1) {
|
||||
Err(PoolError::ImmatureTransaction { lock_height: height }) => {
|
||||
Err(PoolError::ImmatureTransaction {
|
||||
lock_height: height,
|
||||
}) => {
|
||||
assert_eq!(height, 10);
|
||||
}
|
||||
Err(e) => panic!("expected ImmatureTransaction error here - {:?}", e),
|
||||
|
@ -821,10 +813,8 @@ mod tests {
|
|||
height: 1,
|
||||
..block::BlockHeader::default()
|
||||
};
|
||||
chain_ref.store_header_by_output_commitment(
|
||||
coinbase_output.commitment(),
|
||||
&coinbase_header,
|
||||
);
|
||||
chain_ref
|
||||
.store_header_by_output_commitment(coinbase_output.commitment(), &coinbase_header);
|
||||
|
||||
let head_header = block::BlockHeader {
|
||||
height: 2,
|
||||
|
@ -836,9 +826,9 @@ mod tests {
|
|||
let result = write_pool.add_to_memory_pool(test_source(), txn);
|
||||
match result {
|
||||
Err(PoolError::ImmatureCoinbase {
|
||||
header: _,
|
||||
output: out,
|
||||
}) => {
|
||||
header: _,
|
||||
output: out,
|
||||
}) => {
|
||||
assert_eq!(out, coinbase_output.commitment());
|
||||
}
|
||||
_ => panic!("expected ImmatureCoinbase error here"),
|
||||
|
@ -854,9 +844,9 @@ mod tests {
|
|||
let result = write_pool.add_to_memory_pool(test_source(), txn);
|
||||
match result {
|
||||
Err(PoolError::ImmatureCoinbase {
|
||||
header: _,
|
||||
output: out,
|
||||
}) => {
|
||||
header: _,
|
||||
output: out,
|
||||
}) => {
|
||||
assert_eq!(out, coinbase_output.commitment());
|
||||
}
|
||||
_ => panic!("expected ImmatureCoinbase error here"),
|
||||
|
@ -893,16 +883,15 @@ mod tests {
|
|||
dummy_chain.store_head_header(&head_header);
|
||||
|
||||
// single UTXO
|
||||
let new_utxo = DummyUtxoSet::empty()
|
||||
.with_output(test_output(100));
|
||||
let new_utxo = DummyUtxoSet::empty().with_output(test_output(100));
|
||||
|
||||
dummy_chain.update_utxo_set(new_utxo);
|
||||
let chain_ref = Arc::new(dummy_chain);
|
||||
let pool = RwLock::new(test_setup(&chain_ref));
|
||||
|
||||
// now create two txs
|
||||
// tx1 spends the UTXO
|
||||
// tx2 spends output from tx1
|
||||
// tx1 spends the UTXO
|
||||
// tx2 spends output from tx1
|
||||
let tx1 = test_transaction(vec![100], vec![90]);
|
||||
let tx2 = test_transaction(vec![90], vec![80]);
|
||||
|
||||
|
@ -911,7 +900,7 @@ mod tests {
|
|||
assert_eq!(write_pool.total_size(), 0);
|
||||
|
||||
// now add both txs to the pool (tx2 spends tx1 with zero confirmations)
|
||||
// both should be accepted if tx1 added before tx2
|
||||
// both should be accepted if tx1 added before tx2
|
||||
write_pool.add_to_memory_pool(test_source(), tx1).unwrap();
|
||||
write_pool.add_to_memory_pool(test_source(), tx2).unwrap();
|
||||
|
||||
|
@ -925,7 +914,7 @@ mod tests {
|
|||
txs = mineable_txs.drain(..).map(|x| *x).collect();
|
||||
|
||||
// confirm we can preparing both txs for mining here
|
||||
// one root tx in the pool, and one non-root vertex in the pool
|
||||
// one root tx in the pool, and one non-root vertex in the pool
|
||||
assert_eq!(txs.len(), 2);
|
||||
}
|
||||
|
||||
|
@ -944,7 +933,7 @@ mod tests {
|
|||
chain_ref.apply_block(&block);
|
||||
|
||||
// now reconcile the block
|
||||
// we should evict both txs here
|
||||
// we should evict both txs here
|
||||
{
|
||||
let mut write_pool = pool.write().unwrap();
|
||||
let evicted_transactions = write_pool.reconcile_block(&block).unwrap();
|
||||
|
@ -952,7 +941,7 @@ mod tests {
|
|||
}
|
||||
|
||||
// check the pool is consistent after reconciling the block
|
||||
// we should have zero txs in the pool (neither roots nor non-roots)
|
||||
// we should have zero txs in the pool (neither roots nor non-roots)
|
||||
{
|
||||
let read_pool = pool.write().unwrap();
|
||||
assert_eq!(read_pool.pool.len_vertices(), 0);
|
||||
|
@ -983,44 +972,44 @@ mod tests {
|
|||
let pool = RwLock::new(test_setup(&chain_ref));
|
||||
|
||||
// Preparation: We will introduce a three root pool transactions.
|
||||
// 1. A transaction that should be invalidated because it is exactly
|
||||
// contained in the block.
|
||||
// 2. A transaction that should be invalidated because the input is
|
||||
// consumed in the block, although it is not exactly consumed.
|
||||
// 3. A transaction that should remain after block reconciliation.
|
||||
// 1. A transaction that should be invalidated because it is exactly
|
||||
// contained in the block.
|
||||
// 2. A transaction that should be invalidated because the input is
|
||||
// consumed in the block, although it is not exactly consumed.
|
||||
// 3. A transaction that should remain after block reconciliation.
|
||||
let block_transaction = test_transaction(vec![10], vec![8]);
|
||||
let conflict_transaction = test_transaction(vec![20], vec![12,6]);
|
||||
let valid_transaction = test_transaction(vec![30], vec![13,15]);
|
||||
let conflict_transaction = test_transaction(vec![20], vec![12, 6]);
|
||||
let valid_transaction = test_transaction(vec![30], vec![13, 15]);
|
||||
|
||||
// We will also introduce a few children:
|
||||
// 4. A transaction that descends from transaction 1, that is in
|
||||
// turn exactly contained in the block.
|
||||
let block_child = test_transaction(vec![8], vec![5,1]);
|
||||
// 4. A transaction that descends from transaction 1, that is in
|
||||
// turn exactly contained in the block.
|
||||
let block_child = test_transaction(vec![8], vec![5, 1]);
|
||||
// 5. A transaction that descends from transaction 4, that is not
|
||||
// contained in the block at all and should be valid after
|
||||
// reconciliation.
|
||||
// contained in the block at all and should be valid after
|
||||
// reconciliation.
|
||||
let pool_child = test_transaction(vec![5], vec![3]);
|
||||
// 6. A transaction that descends from transaction 2 that does not
|
||||
// conflict with anything in the block in any way, but should be
|
||||
// invalidated (orphaned).
|
||||
// conflict with anything in the block in any way, but should be
|
||||
// invalidated (orphaned).
|
||||
let conflict_child = test_transaction(vec![12], vec![2]);
|
||||
// 7. A transaction that descends from transaction 2 that should be
|
||||
// valid due to its inputs being satisfied by the block.
|
||||
// valid due to its inputs being satisfied by the block.
|
||||
let conflict_valid_child = test_transaction(vec![6], vec![4]);
|
||||
// 8. A transaction that descends from transaction 3 that should be
|
||||
// invalidated due to an output conflict.
|
||||
// invalidated due to an output conflict.
|
||||
let valid_child_conflict = test_transaction(vec![13], vec![9]);
|
||||
// 9. A transaction that descends from transaction 3 that should remain
|
||||
// valid after reconciliation.
|
||||
// valid after reconciliation.
|
||||
let valid_child_valid = test_transaction(vec![15], vec![11]);
|
||||
// 10. A transaction that descends from both transaction 6 and
|
||||
// transaction 9
|
||||
let mixed_child = test_transaction(vec![2,11], vec![7]);
|
||||
// transaction 9
|
||||
let mixed_child = test_transaction(vec![2, 11], vec![7]);
|
||||
|
||||
// Add transactions.
|
||||
// Note: There are some ordering constraints that must be followed here
|
||||
// until orphans is 100% implemented. Once the orphans process has
|
||||
// stabilized, we can mix these up to exercise that path a bit.
|
||||
// Note: There are some ordering constraints that must be followed here
|
||||
// until orphans is 100% implemented. Once the orphans process has
|
||||
// stabilized, we can mix these up to exercise that path a bit.
|
||||
let mut txs_to_add = vec![
|
||||
block_transaction,
|
||||
conflict_transaction,
|
||||
|
@ -1037,7 +1026,7 @@ mod tests {
|
|||
let expected_pool_size = txs_to_add.len();
|
||||
|
||||
// First we add the above transactions to the pool; all should be
|
||||
// accepted.
|
||||
// accepted.
|
||||
{
|
||||
let mut write_pool = pool.write().unwrap();
|
||||
assert_eq!(write_pool.total_size(), 0);
|
||||
|
@ -1049,15 +1038,15 @@ mod tests {
|
|||
assert_eq!(write_pool.total_size(), expected_pool_size);
|
||||
}
|
||||
// Now we prepare the block that will cause the above condition.
|
||||
// First, the transactions we want in the block:
|
||||
// - Copy of 1
|
||||
// First, the transactions we want in the block:
|
||||
// - Copy of 1
|
||||
let block_tx_1 = test_transaction(vec![10], vec![8]);
|
||||
// - Conflict w/ 2, satisfies 7
|
||||
let block_tx_2 = test_transaction(vec![20], vec![6]);
|
||||
// - Copy of 4
|
||||
let block_tx_3 = test_transaction(vec![8], vec![5,1]);
|
||||
let block_tx_3 = test_transaction(vec![8], vec![5, 1]);
|
||||
// - Output conflict w/ 8
|
||||
let block_tx_4 = test_transaction(vec![40], vec![9,1]);
|
||||
let block_tx_4 = test_transaction(vec![40], vec![9, 1]);
|
||||
let block_transactions = vec![&block_tx_1, &block_tx_2, &block_tx_3, &block_tx_4];
|
||||
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
|
@ -1083,7 +1072,7 @@ mod tests {
|
|||
assert_eq!(evicted_transactions.unwrap().len(), 6);
|
||||
|
||||
// TODO: Txids are not yet deterministic. When they are, we should
|
||||
// check the specific transactions that were evicted.
|
||||
// check the specific transactions that were evicted.
|
||||
}
|
||||
|
||||
|
||||
|
@ -1147,11 +1136,31 @@ mod tests {
|
|||
let mut write_pool = pool.write().unwrap();
|
||||
assert_eq!(write_pool.total_size(), 0);
|
||||
|
||||
assert!(write_pool.add_to_memory_pool(test_source(), root_tx_1).is_ok());
|
||||
assert!(write_pool.add_to_memory_pool(test_source(), root_tx_2).is_ok());
|
||||
assert!(write_pool.add_to_memory_pool(test_source(), root_tx_3).is_ok());
|
||||
assert!(write_pool.add_to_memory_pool(test_source(), child_tx_1).is_ok());
|
||||
assert!(write_pool.add_to_memory_pool(test_source(), child_tx_2).is_ok());
|
||||
assert!(
|
||||
write_pool
|
||||
.add_to_memory_pool(test_source(), root_tx_1)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
write_pool
|
||||
.add_to_memory_pool(test_source(), root_tx_2)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
write_pool
|
||||
.add_to_memory_pool(test_source(), root_tx_3)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
write_pool
|
||||
.add_to_memory_pool(test_source(), child_tx_1)
|
||||
.is_ok()
|
||||
);
|
||||
assert!(
|
||||
write_pool
|
||||
.add_to_memory_pool(test_source(), child_tx_2)
|
||||
.is_ok()
|
||||
);
|
||||
|
||||
assert_eq!(write_pool.total_size(), 5);
|
||||
}
|
||||
|
@ -1164,8 +1173,8 @@ mod tests {
|
|||
txs = read_pool.prepare_mineable_transactions(3);
|
||||
assert_eq!(txs.len(), 3);
|
||||
// TODO: This is ugly, either make block::new take owned
|
||||
// txs instead of mut refs, or change
|
||||
// prepare_mineable_transactions to return mut refs
|
||||
// txs instead of mut refs, or change
|
||||
// prepare_mineable_transactions to return mut refs
|
||||
let block_txs: Vec<transaction::Transaction> = txs.drain(..).map(|x| *x).collect();
|
||||
let tx_refs = block_txs.iter().collect();
|
||||
|
||||
|
@ -1199,7 +1208,7 @@ mod tests {
|
|||
pool: Pool::empty(),
|
||||
orphans: Orphans::empty(),
|
||||
blockchain: dummy_chain.clone(),
|
||||
adapter: Arc::new(NoopAdapter{}),
|
||||
adapter: Arc::new(NoopAdapter {}),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1217,8 +1226,8 @@ mod tests {
|
|||
) -> transaction::Transaction {
|
||||
let keychain = keychain_for_tests();
|
||||
|
||||
let fees: i64 = input_values.iter().sum::<u64>() as i64 -
|
||||
output_values.iter().sum::<u64>() as i64;
|
||||
let fees: i64 =
|
||||
input_values.iter().sum::<u64>() as i64 - output_values.iter().sum::<u64>() as i64;
|
||||
assert!(fees >= 0);
|
||||
|
||||
let mut tx_elements = Vec::new();
|
||||
|
@ -1245,8 +1254,8 @@ mod tests {
|
|||
) -> transaction::Transaction {
|
||||
let keychain = keychain_for_tests();
|
||||
|
||||
let fees: i64 = input_values.iter().sum::<u64>() as i64 -
|
||||
output_values.iter().sum::<u64>() as i64;
|
||||
let fees: i64 =
|
||||
input_values.iter().sum::<u64>() as i64 - output_values.iter().sum::<u64>() as i64;
|
||||
assert!(fees >= 0);
|
||||
|
||||
let mut tx_elements = Vec::new();
|
||||
|
|
|
@ -34,11 +34,11 @@ pub struct PoolConfig {
|
|||
/// Base fee for a transaction to be accepted by the pool. The transaction
|
||||
/// weight is computed from its number of inputs, outputs and kernels and
|
||||
/// multipled by the base fee to compare to the actual transaction fee.
|
||||
#[serde="default_accept_fee_base"]
|
||||
#[serde = "default_accept_fee_base"]
|
||||
pub accept_fee_base: u64,
|
||||
|
||||
/// Maximum capacity of the pool in number of transactions
|
||||
#[serde="default_max_pool_size"]
|
||||
#[serde = "default_max_pool_size"]
|
||||
pub max_pool_size: usize,
|
||||
}
|
||||
|
||||
|
@ -51,8 +51,12 @@ impl Default for PoolConfig {
|
|||
}
|
||||
}
|
||||
|
||||
fn default_accept_fee_base() -> u64 { 10 }
|
||||
fn default_max_pool_size() -> usize { 50_000 }
|
||||
fn default_accept_fee_base() -> u64 {
|
||||
10
|
||||
}
|
||||
fn default_max_pool_size() -> usize {
|
||||
50_000
|
||||
}
|
||||
|
||||
/// Placeholder: the data representing where we heard about a tx from.
|
||||
///
|
||||
|
@ -240,7 +244,6 @@ impl Pool {
|
|||
pool_refs: Vec<graph::Edge>,
|
||||
mut new_unspents: Vec<graph::Edge>,
|
||||
) {
|
||||
|
||||
// Removing consumed available_outputs
|
||||
for new_edge in &pool_refs {
|
||||
// All of these should correspond to an existing unspent
|
||||
|
@ -253,23 +256,18 @@ impl Pool {
|
|||
|
||||
// Accounting for consumed blockchain outputs
|
||||
for new_blockchain_edge in blockchain_refs.drain(..) {
|
||||
self.consumed_blockchain_outputs.insert(
|
||||
new_blockchain_edge
|
||||
.output_commitment(),
|
||||
new_blockchain_edge,
|
||||
);
|
||||
self.consumed_blockchain_outputs
|
||||
.insert(new_blockchain_edge.output_commitment(), new_blockchain_edge);
|
||||
}
|
||||
|
||||
// Adding the transaction to the vertices list along with internal
|
||||
// pool edges
|
||||
// pool edges
|
||||
self.graph.add_entry(pool_entry, pool_refs);
|
||||
|
||||
// Adding the new unspents to the unspent map
|
||||
for unspent_output in new_unspents.drain(..) {
|
||||
self.available_outputs.insert(
|
||||
unspent_output.output_commitment(),
|
||||
unspent_output,
|
||||
);
|
||||
self.available_outputs
|
||||
.insert(unspent_output.output_commitment(), unspent_output);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -282,19 +280,14 @@ impl Pool {
|
|||
tx: &transaction::Transaction,
|
||||
marked_txs: &HashSet<hash::Hash>,
|
||||
) {
|
||||
|
||||
self.graph.remove_vertex(graph::transaction_identifier(tx));
|
||||
|
||||
for input in tx.inputs.iter().map(|x| x.commitment()) {
|
||||
match self.graph.remove_edge_by_commitment(&input) {
|
||||
Some(x) => {
|
||||
if !marked_txs.contains(&x.source_hash().unwrap()) {
|
||||
self.available_outputs.insert(
|
||||
x.output_commitment(),
|
||||
x.with_destination(None),
|
||||
);
|
||||
}
|
||||
}
|
||||
Some(x) => if !marked_txs.contains(&x.source_hash().unwrap()) {
|
||||
self.available_outputs
|
||||
.insert(x.output_commitment(), x.with_destination(None));
|
||||
},
|
||||
None => {
|
||||
self.consumed_blockchain_outputs.remove(&input);
|
||||
}
|
||||
|
@ -303,15 +296,10 @@ impl Pool {
|
|||
|
||||
for output in tx.outputs.iter().map(|x| x.commitment()) {
|
||||
match self.graph.remove_edge_by_commitment(&output) {
|
||||
Some(x) => {
|
||||
if !marked_txs.contains(&x.destination_hash().unwrap()) {
|
||||
|
||||
self.consumed_blockchain_outputs.insert(
|
||||
x.output_commitment(),
|
||||
x.with_source(None),
|
||||
);
|
||||
}
|
||||
}
|
||||
Some(x) => if !marked_txs.contains(&x.destination_hash().unwrap()) {
|
||||
self.consumed_blockchain_outputs
|
||||
.insert(x.output_commitment(), x.with_source(None));
|
||||
},
|
||||
None => {
|
||||
self.available_outputs.remove(&output);
|
||||
}
|
||||
|
@ -413,14 +401,11 @@ impl Orphans {
|
|||
is_missing: HashMap<usize, ()>,
|
||||
mut new_unspents: Vec<graph::Edge>,
|
||||
) {
|
||||
|
||||
// Removing consumed available_outputs
|
||||
for (i, new_edge) in orphan_refs.drain(..).enumerate() {
|
||||
if is_missing.contains_key(&i) {
|
||||
self.missing_outputs.insert(
|
||||
new_edge.output_commitment(),
|
||||
new_edge,
|
||||
);
|
||||
self.missing_outputs
|
||||
.insert(new_edge.output_commitment(), new_edge);
|
||||
} else {
|
||||
assert!(
|
||||
self.available_outputs
|
||||
|
@ -433,27 +418,21 @@ impl Orphans {
|
|||
|
||||
// Accounting for consumed blockchain and pool outputs
|
||||
for external_edge in pool_refs.drain(..) {
|
||||
self.pool_connections.insert(
|
||||
external_edge.output_commitment(),
|
||||
external_edge,
|
||||
);
|
||||
self.pool_connections
|
||||
.insert(external_edge.output_commitment(), external_edge);
|
||||
}
|
||||
|
||||
// if missing_refs is the same length as orphan_refs, we have
|
||||
// no orphan-orphan links for this transaction and it is a
|
||||
// root transaction of the orphans set
|
||||
self.graph.add_vertex_only(
|
||||
orphan_entry,
|
||||
is_missing.len() == orphan_refs.len(),
|
||||
);
|
||||
// no orphan-orphan links for this transaction and it is a
|
||||
// root transaction of the orphans set
|
||||
self.graph
|
||||
.add_vertex_only(orphan_entry, is_missing.len() == orphan_refs.len());
|
||||
|
||||
|
||||
// Adding the new unspents to the unspent map
|
||||
for unspent_output in new_unspents.drain(..) {
|
||||
self.available_outputs.insert(
|
||||
unspent_output.output_commitment(),
|
||||
unspent_output,
|
||||
);
|
||||
self.available_outputs
|
||||
.insert(unspent_output.output_commitment(), unspent_output);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -313,9 +313,9 @@ impl Miner {
|
|||
|
||||
/// Utility to transform a 8 bytes of a byte array into a u64.
|
||||
fn u8_to_u64(p: &[u8], i: usize) -> u64 {
|
||||
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 |
|
||||
(p[i + 3] as u64) << 24 | (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 |
|
||||
(p[i + 6] as u64) << 48 | (p[i + 7] as u64) << 56
|
||||
(p[i] as u64) | (p[i + 1] as u64) << 8 | (p[i + 2] as u64) << 16 | (p[i + 3] as u64) << 24
|
||||
| (p[i + 4] as u64) << 32 | (p[i + 5] as u64) << 40 | (p[i + 6] as u64) << 48
|
||||
| (p[i + 7] as u64) << 56
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
|
@ -324,32 +324,183 @@ mod test {
|
|||
use core::core::Proof;
|
||||
|
||||
|
||||
static V1: [u32; 42] = [0x1fe9, 0x2050, 0x4581, 0x6322, 0x65ab, 0xb3c1, 0xc1a4, 0xe257,
|
||||
0x106ae, 0x17b11, 0x202d4, 0x2705d, 0x2deb2, 0x2f80e, 0x32298,
|
||||
0x34782, 0x35c5a, 0x37458, 0x38f28, 0x406b2, 0x40e34, 0x40fc6,
|
||||
0x42220, 0x42d13, 0x46c0f, 0x4fd47, 0x55ad2, 0x598f7, 0x5aa8f,
|
||||
0x62aa3, 0x65725, 0x65dcb, 0x671c7, 0x6eb20, 0x752fe, 0x7594f,
|
||||
0x79b9c, 0x7f775, 0x81635, 0x8401c, 0x844e5, 0x89fa8];
|
||||
static V2: [u32; 42] = [0x2a37, 0x7557, 0xa3c3, 0xfce6, 0x1248e, 0x15837, 0x1827f, 0x18a93,
|
||||
0x1a7dd, 0x1b56b, 0x1ceb4, 0x1f962, 0x1fe2a, 0x29cb9, 0x2f30e,
|
||||
0x2f771, 0x336bf, 0x34355, 0x391d7, 0x39495, 0x3be0c, 0x463be,
|
||||
0x4d0c2, 0x4eead, 0x50214, 0x520de, 0x52a86, 0x53818, 0x53b3b,
|
||||
0x54c0b, 0x572fa, 0x5d79c, 0x5e3c2, 0x6769e, 0x6a0fe, 0x6d835,
|
||||
0x6fc7c, 0x70f03, 0x79d4a, 0x7b03e, 0x81e09, 0x9bd44];
|
||||
static V3: [u32; 42] = [0x8158, 0x9f18, 0xc4ba, 0x108c7, 0x11caa, 0x13b82, 0x1618f, 0x1c83b,
|
||||
0x1ec89, 0x24354, 0x28864, 0x2a0fb, 0x2ce50, 0x2e8fa, 0x32b36,
|
||||
0x343e6, 0x34dc9, 0x36881, 0x3ffca, 0x40f79, 0x42721, 0x43b8c,
|
||||
0x44b9d, 0x47ed3, 0x4cd34, 0x5278a, 0x5ab64, 0x5b4d4, 0x5d842,
|
||||
0x5fa33, 0x6464e, 0x676ee, 0x685d6, 0x69df0, 0x6a5fd, 0x6bda3,
|
||||
0x72544, 0x77974, 0x7908c, 0x80e67, 0x81ef4, 0x8d882];
|
||||
static V1: [u32; 42] = [
|
||||
0x1fe9,
|
||||
0x2050,
|
||||
0x4581,
|
||||
0x6322,
|
||||
0x65ab,
|
||||
0xb3c1,
|
||||
0xc1a4,
|
||||
0xe257,
|
||||
0x106ae,
|
||||
0x17b11,
|
||||
0x202d4,
|
||||
0x2705d,
|
||||
0x2deb2,
|
||||
0x2f80e,
|
||||
0x32298,
|
||||
0x34782,
|
||||
0x35c5a,
|
||||
0x37458,
|
||||
0x38f28,
|
||||
0x406b2,
|
||||
0x40e34,
|
||||
0x40fc6,
|
||||
0x42220,
|
||||
0x42d13,
|
||||
0x46c0f,
|
||||
0x4fd47,
|
||||
0x55ad2,
|
||||
0x598f7,
|
||||
0x5aa8f,
|
||||
0x62aa3,
|
||||
0x65725,
|
||||
0x65dcb,
|
||||
0x671c7,
|
||||
0x6eb20,
|
||||
0x752fe,
|
||||
0x7594f,
|
||||
0x79b9c,
|
||||
0x7f775,
|
||||
0x81635,
|
||||
0x8401c,
|
||||
0x844e5,
|
||||
0x89fa8,
|
||||
];
|
||||
static V2: [u32; 42] = [
|
||||
0x2a37,
|
||||
0x7557,
|
||||
0xa3c3,
|
||||
0xfce6,
|
||||
0x1248e,
|
||||
0x15837,
|
||||
0x1827f,
|
||||
0x18a93,
|
||||
0x1a7dd,
|
||||
0x1b56b,
|
||||
0x1ceb4,
|
||||
0x1f962,
|
||||
0x1fe2a,
|
||||
0x29cb9,
|
||||
0x2f30e,
|
||||
0x2f771,
|
||||
0x336bf,
|
||||
0x34355,
|
||||
0x391d7,
|
||||
0x39495,
|
||||
0x3be0c,
|
||||
0x463be,
|
||||
0x4d0c2,
|
||||
0x4eead,
|
||||
0x50214,
|
||||
0x520de,
|
||||
0x52a86,
|
||||
0x53818,
|
||||
0x53b3b,
|
||||
0x54c0b,
|
||||
0x572fa,
|
||||
0x5d79c,
|
||||
0x5e3c2,
|
||||
0x6769e,
|
||||
0x6a0fe,
|
||||
0x6d835,
|
||||
0x6fc7c,
|
||||
0x70f03,
|
||||
0x79d4a,
|
||||
0x7b03e,
|
||||
0x81e09,
|
||||
0x9bd44,
|
||||
];
|
||||
static V3: [u32; 42] = [
|
||||
0x8158,
|
||||
0x9f18,
|
||||
0xc4ba,
|
||||
0x108c7,
|
||||
0x11caa,
|
||||
0x13b82,
|
||||
0x1618f,
|
||||
0x1c83b,
|
||||
0x1ec89,
|
||||
0x24354,
|
||||
0x28864,
|
||||
0x2a0fb,
|
||||
0x2ce50,
|
||||
0x2e8fa,
|
||||
0x32b36,
|
||||
0x343e6,
|
||||
0x34dc9,
|
||||
0x36881,
|
||||
0x3ffca,
|
||||
0x40f79,
|
||||
0x42721,
|
||||
0x43b8c,
|
||||
0x44b9d,
|
||||
0x47ed3,
|
||||
0x4cd34,
|
||||
0x5278a,
|
||||
0x5ab64,
|
||||
0x5b4d4,
|
||||
0x5d842,
|
||||
0x5fa33,
|
||||
0x6464e,
|
||||
0x676ee,
|
||||
0x685d6,
|
||||
0x69df0,
|
||||
0x6a5fd,
|
||||
0x6bda3,
|
||||
0x72544,
|
||||
0x77974,
|
||||
0x7908c,
|
||||
0x80e67,
|
||||
0x81ef4,
|
||||
0x8d882,
|
||||
];
|
||||
// cuckoo28 at 50% edges of letter 'u'
|
||||
static V4: [u32; 42] = [0x1CBBFD, 0x2C5452, 0x520338, 0x6740C5, 0x8C6997, 0xC77150, 0xFD4972,
|
||||
0x1060FA7, 0x11BFEA0, 0x1343E8D, 0x14CE02A, 0x1533515, 0x1715E61,
|
||||
0x1996D9B, 0x1CB296B, 0x1FCA180, 0x209A367, 0x20AD02E, 0x23CD2E4,
|
||||
0x2A3B360, 0x2DD1C0C, 0x333A200, 0x33D77BC, 0x3620C78, 0x3DD7FB8,
|
||||
0x3FBFA49, 0x41BDED2, 0x4A86FD9, 0x570DE24, 0x57CAB86, 0x594B886,
|
||||
0x5C74C94, 0x5DE7572, 0x60ADD6F, 0x635918B, 0x6C9E120, 0x6EFA583,
|
||||
0x7394ACA, 0x7556A23, 0x77F70AA, 0x7CF750A, 0x7F60790];
|
||||
static V4: [u32; 42] = [
|
||||
0x1CBBFD,
|
||||
0x2C5452,
|
||||
0x520338,
|
||||
0x6740C5,
|
||||
0x8C6997,
|
||||
0xC77150,
|
||||
0xFD4972,
|
||||
0x1060FA7,
|
||||
0x11BFEA0,
|
||||
0x1343E8D,
|
||||
0x14CE02A,
|
||||
0x1533515,
|
||||
0x1715E61,
|
||||
0x1996D9B,
|
||||
0x1CB296B,
|
||||
0x1FCA180,
|
||||
0x209A367,
|
||||
0x20AD02E,
|
||||
0x23CD2E4,
|
||||
0x2A3B360,
|
||||
0x2DD1C0C,
|
||||
0x333A200,
|
||||
0x33D77BC,
|
||||
0x3620C78,
|
||||
0x3DD7FB8,
|
||||
0x3FBFA49,
|
||||
0x41BDED2,
|
||||
0x4A86FD9,
|
||||
0x570DE24,
|
||||
0x57CAB86,
|
||||
0x594B886,
|
||||
0x5C74C94,
|
||||
0x5DE7572,
|
||||
0x60ADD6F,
|
||||
0x635918B,
|
||||
0x6C9E120,
|
||||
0x6EFA583,
|
||||
0x7394ACA,
|
||||
0x7556A23,
|
||||
0x77F70AA,
|
||||
0x7CF750A,
|
||||
0x7F60790,
|
||||
];
|
||||
|
||||
/// Find a 42-cycle on Cuckoo20 at 75% easiness and verifiy against a few
|
||||
/// known cycle proofs
|
||||
|
@ -384,13 +535,15 @@ mod test {
|
|||
fn validate_fail() {
|
||||
// edge checks
|
||||
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0; 42]), 75));
|
||||
assert!(!Cuckoo::new(&[49], 20).verify(Proof::new(vec![0xffff; 42]), 75));
|
||||
assert!(!Cuckoo::new(&[49], 20)
|
||||
.verify(Proof::new(vec![0xffff; 42]), 75));
|
||||
// wrong data for proof
|
||||
assert!(!Cuckoo::new(&[50], 20).verify(Proof::new(V1.to_vec().clone()), 75));
|
||||
assert!(!Cuckoo::new(&[50], 20)
|
||||
.verify(Proof::new(V1.to_vec().clone()), 75));
|
||||
let mut test_header = [0; 32];
|
||||
test_header[0] = 24;
|
||||
assert!(!Cuckoo::new(&test_header, 20).verify(Proof::new(V4.to_vec().clone()), 50));
|
||||
|
||||
assert!(!Cuckoo::new(&test_header, 20)
|
||||
.verify(Proof::new(V4.to_vec().clone()), 50));
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
|
|
@ -29,15 +29,15 @@
|
|||
#![warn(missing_docs)]
|
||||
|
||||
extern crate blake2_rfc as blake2;
|
||||
extern crate rand;
|
||||
extern crate time;
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate rand;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate time;
|
||||
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_util as util;
|
||||
|
@ -63,7 +63,9 @@ use cuckoo::{Cuckoo, Error};
|
|||
|
||||
pub trait MiningWorker {
|
||||
/// This only sets parameters and does initialisation work now
|
||||
fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self where Self: Sized;
|
||||
fn new(ease: u32, sizeshift: u32, proof_size: usize) -> Self
|
||||
where
|
||||
Self: Sized;
|
||||
|
||||
/// Actually perform a mining attempt on the given input and
|
||||
/// return a proof if found
|
||||
|
@ -74,7 +76,7 @@ pub trait MiningWorker {
|
|||
/// satisfies the requirements of the header.
|
||||
pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
|
||||
// make sure the pow hash shows a difficulty at least as large as the target
|
||||
// difficulty
|
||||
// difficulty
|
||||
if bh.difficulty > bh.pow.clone().to_difficulty() {
|
||||
return false;
|
||||
}
|
||||
|
@ -83,10 +85,11 @@ pub fn verify_size(bh: &BlockHeader, cuckoo_sz: u32) -> bool {
|
|||
|
||||
/// Uses the much easier Cuckoo20 (mostly for
|
||||
/// tests).
|
||||
pub fn pow20<T: MiningWorker>(miner: &mut T,
|
||||
bh: &mut BlockHeader,
|
||||
diff: Difficulty)
|
||||
-> Result<(), Error> {
|
||||
pub fn pow20<T: MiningWorker>(
|
||||
miner: &mut T,
|
||||
bh: &mut BlockHeader,
|
||||
diff: Difficulty,
|
||||
) -> Result<(), Error> {
|
||||
pow_size(miner, bh, diff, 20)
|
||||
}
|
||||
|
||||
|
@ -104,16 +107,13 @@ pub fn mine_genesis_block(miner_config: Option<types::MinerConfig>) -> Option<co
|
|||
let proof_size = global::proofsize();
|
||||
|
||||
let mut miner: Box<MiningWorker> = match miner_config {
|
||||
Some(c) => {
|
||||
if c.use_cuckoo_miner {
|
||||
let mut p = plugin::PluginMiner::new(consensus::EASINESS, sz, proof_size);
|
||||
p.init(c.clone());
|
||||
Box::new(p)
|
||||
|
||||
} else {
|
||||
Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size))
|
||||
}
|
||||
}
|
||||
Some(c) => if c.use_cuckoo_miner {
|
||||
let mut p = plugin::PluginMiner::new(consensus::EASINESS, sz, proof_size);
|
||||
p.init(c.clone());
|
||||
Box::new(p)
|
||||
} else {
|
||||
Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size))
|
||||
},
|
||||
None => Box::new(cuckoo::Miner::new(consensus::EASINESS, sz, proof_size)),
|
||||
};
|
||||
pow_size(&mut *miner, &mut gen.header, diff, sz as u32).unwrap();
|
||||
|
@ -124,11 +124,12 @@ pub fn mine_genesis_block(miner_config: Option<types::MinerConfig>) -> Option<co
|
|||
/// Mining Worker,
|
||||
/// until the required difficulty target is reached. May take a while for a low
|
||||
/// target...
|
||||
pub fn pow_size<T: MiningWorker + ?Sized>(miner: &mut T,
|
||||
bh: &mut BlockHeader,
|
||||
diff: Difficulty,
|
||||
_: u32)
|
||||
-> Result<(), Error> {
|
||||
pub fn pow_size<T: MiningWorker + ?Sized>(
|
||||
miner: &mut T,
|
||||
bh: &mut BlockHeader,
|
||||
diff: Difficulty,
|
||||
_: u32,
|
||||
) -> Result<(), Error> {
|
||||
let start_nonce = bh.nonce;
|
||||
|
||||
// if we're in production mode, try the pre-mined solution first
|
||||
|
@ -143,11 +144,11 @@ pub fn pow_size<T: MiningWorker + ?Sized>(miner: &mut T,
|
|||
// try to find a cuckoo cycle on that header hash
|
||||
loop {
|
||||
// can be trivially optimized by avoiding re-serialization every time but this
|
||||
// is not meant as a fast miner implementation
|
||||
// is not meant as a fast miner implementation
|
||||
let pow_hash = bh.hash();
|
||||
|
||||
// if we found a cycle (not guaranteed) and the proof hash is higher that the
|
||||
// diff, we're all good
|
||||
// diff, we're all good
|
||||
|
||||
if let Ok(proof) = miner.mine(&pow_hash[..]) {
|
||||
if proof.clone().to_difficulty() >= diff {
|
||||
|
@ -160,7 +161,7 @@ pub fn pow_size<T: MiningWorker + ?Sized>(miner: &mut T,
|
|||
bh.nonce += 1;
|
||||
|
||||
// and if we're back where we started, update the time (changes the hash as
|
||||
// well)
|
||||
// well)
|
||||
if bh.nonce == start_nonce {
|
||||
bh.timestamp = time::at_utc(time::Timespec { sec: 0, nsec: 0 });
|
||||
}
|
||||
|
|
|
@ -30,8 +30,8 @@ use util::LOGGER;
|
|||
|
||||
use std::sync::Mutex;
|
||||
|
||||
use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMinerSolution,
|
||||
CuckooMinerDeviceStats, CuckooMinerError};
|
||||
use cuckoo_miner::{CuckooMiner, CuckooMinerConfig, CuckooMinerDeviceStats, CuckooMinerError,
|
||||
CuckooMinerSolution, CuckooPluginManager};
|
||||
|
||||
// For now, we're just going to keep a static reference around to the loaded
|
||||
// config
|
||||
|
@ -41,7 +41,7 @@ use cuckoo_miner::{CuckooMiner, CuckooPluginManager, CuckooMinerConfig, CuckooMi
|
|||
// testing threads don't try to load/unload the library while another thread is
|
||||
// using it.
|
||||
lazy_static!{
|
||||
static ref LOADED_CONFIG: Mutex<Option<Vec<CuckooMinerConfig>>> = Mutex::new(None);
|
||||
static ref LOADED_CONFIG: Mutex<Option<Vec<CuckooMinerConfig>>> = Mutex::new(None);
|
||||
}
|
||||
|
||||
/// plugin miner
|
||||
|
@ -84,9 +84,9 @@ impl PluginMiner {
|
|||
}
|
||||
|
||||
// First, load and query the plugins in the given directory
|
||||
// These should all be stored in 'plugins' at the moment relative
|
||||
// to the executable path, though they should appear somewhere else
|
||||
// when packaging is more//thought out
|
||||
// These should all be stored in 'plugins' at the moment relative
|
||||
// to the executable path, though they should appear somewhere else
|
||||
// when packaging is more//thought out
|
||||
|
||||
let mut loaded_config_ref = LOADED_CONFIG.lock().unwrap();
|
||||
|
||||
|
@ -117,7 +117,7 @@ impl PluginMiner {
|
|||
let mut index = 0;
|
||||
for f in plugin_vec_filters {
|
||||
// So this is built dynamically based on the plugin implementation
|
||||
// type and the consensus sizeshift
|
||||
// type and the consensus sizeshift
|
||||
let filter = format!("{}_{}", f, sz);
|
||||
|
||||
let caps = plugin_manager.get_available_plugins(&filter).unwrap();
|
||||
|
@ -141,7 +141,7 @@ impl PluginMiner {
|
|||
index += 1;
|
||||
}
|
||||
// Store this config now, because we just want one instance
|
||||
// of the plugin lib per invocation now
|
||||
// of the plugin lib per invocation now
|
||||
*loaded_config_ref = Some(cuckoo_configs.clone());
|
||||
|
||||
// this will load the associated plugin
|
||||
|
@ -158,7 +158,6 @@ impl PluginMiner {
|
|||
|
||||
/// Get the miner
|
||||
pub fn get_consumable(&mut self) -> CuckooMiner {
|
||||
|
||||
// this will load the associated plugin
|
||||
let result = CuckooMiner::new(self.config.clone());
|
||||
if let Err(e) = result {
|
||||
|
|
|
@ -77,7 +77,7 @@ impl Default for MinerConfig {
|
|||
cuckoo_miner_async_mode: None,
|
||||
cuckoo_miner_plugin_dir: None,
|
||||
cuckoo_miner_plugin_config: None,
|
||||
wallet_receiver_url: "http://localhost:13416".to_string(),
|
||||
wallet_receiver_url: "http://localhost:13415".to_string(),
|
||||
burn_reward: false,
|
||||
slow_down_in_millis: Some(0),
|
||||
attempt_time_per_block: 2,
|
||||
|
|
|
@ -14,34 +14,34 @@
|
|||
|
||||
//! Main for building the binary of a Grin peer-to-peer node.
|
||||
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate blake2_rfc as blake2;
|
||||
extern crate clap;
|
||||
extern crate daemonize;
|
||||
extern crate serde;
|
||||
extern crate serde_json;
|
||||
extern crate blake2_rfc as blake2;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
|
||||
extern crate grin_api as api;
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_wallet as wallet;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_config as config;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_grin as grin;
|
||||
extern crate grin_keychain as keychain;
|
||||
extern crate grin_util as util;
|
||||
extern crate grin_wallet as wallet;
|
||||
|
||||
use std::thread;
|
||||
use std::io::Read;
|
||||
use std::fs::File;
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::{Arg, App, SubCommand, ArgMatches};
|
||||
use clap::{App, Arg, ArgMatches, SubCommand};
|
||||
use daemonize::Daemonize;
|
||||
|
||||
use config::GlobalConfig;
|
||||
use wallet::WalletConfig;
|
||||
use core::global;
|
||||
use util::{LoggingConfig, LOGGER, init_logger};
|
||||
use util::{init_logger, LoggingConfig, LOGGER};
|
||||
|
||||
fn start_from_config_file(mut global_config: GlobalConfig) {
|
||||
info!(
|
||||
|
@ -68,16 +68,15 @@ fn start_from_config_file(mut global_config: GlobalConfig) {
|
|||
}
|
||||
|
||||
fn main() {
|
||||
|
||||
// First, load a global config object,
|
||||
// then modify that object with any switches
|
||||
// found so that the switches override the
|
||||
// global config file
|
||||
// then modify that object with any switches
|
||||
// found so that the switches override the
|
||||
// global config file
|
||||
|
||||
// This will return a global config object,
|
||||
// which will either contain defaults for all // of the config structures or a
|
||||
// configuration
|
||||
// read from a config file
|
||||
// which will either contain defaults for all // of the config structures or a
|
||||
// configuration
|
||||
// read from a config file
|
||||
|
||||
let mut global_config = GlobalConfig::new(None).unwrap_or_else(|e| {
|
||||
panic!("Error parsing config file: {}", e);
|
||||
|
@ -241,14 +240,12 @@ fn main() {
|
|||
}
|
||||
|
||||
// client commands and options
|
||||
("client", Some(client_args)) => {
|
||||
match client_args.subcommand() {
|
||||
("status", _) => {
|
||||
println!("status info...");
|
||||
}
|
||||
_ => panic!("Unknown client command, use 'grin help client' for details"),
|
||||
("client", Some(client_args)) => match client_args.subcommand() {
|
||||
("status", _) => {
|
||||
println!("status info...");
|
||||
}
|
||||
}
|
||||
_ => panic!("Unknown client command, use 'grin help client' for details"),
|
||||
},
|
||||
|
||||
// client commands and options
|
||||
("wallet", Some(wallet_args)) => {
|
||||
|
@ -263,7 +260,7 @@ fn main() {
|
|||
start_from_config_file(global_config);
|
||||
} else {
|
||||
// won't attempt to just start with defaults,
|
||||
// and will reject
|
||||
// and will reject
|
||||
println!("Unknown command, and no configuration file was found.");
|
||||
println!("Use 'grin help' for a list of all commands.");
|
||||
}
|
||||
|
@ -352,48 +349,44 @@ fn wallet_command(wallet_args: &ArgMatches) {
|
|||
}
|
||||
|
||||
// Derive the keychain based on seed from seed file and specified passphrase.
|
||||
// Generate the initial wallet seed if we are running "wallet init".
|
||||
// Generate the initial wallet seed if we are running "wallet init".
|
||||
if let ("init", Some(_)) = wallet_args.subcommand() {
|
||||
wallet::WalletSeed::init_file(&wallet_config)
|
||||
.expect("Failed to init wallet seed file.");
|
||||
wallet::WalletSeed::init_file(&wallet_config).expect("Failed to init wallet seed file.");
|
||||
|
||||
// we are done here with creating the wallet, so just return
|
||||
return;
|
||||
}
|
||||
|
||||
let wallet_seed = wallet::WalletSeed::from_file(&wallet_config)
|
||||
.expect("Failed to read wallet seed file.");
|
||||
let wallet_seed =
|
||||
wallet::WalletSeed::from_file(&wallet_config).expect("Failed to read wallet seed file.");
|
||||
let passphrase = wallet_args
|
||||
.value_of("pass")
|
||||
.expect("Failed to read passphrase.");
|
||||
let keychain = wallet_seed.derive_keychain(&passphrase)
|
||||
let keychain = wallet_seed
|
||||
.derive_keychain(&passphrase)
|
||||
.expect("Failed to derive keychain from seed file and passphrase.");
|
||||
|
||||
match wallet_args.subcommand() {
|
||||
("receive", Some(receive_args)) => {
|
||||
if let Some(f) = receive_args.value_of("input") {
|
||||
let mut file = File::open(f).expect("Unable to open transaction file.");
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents).expect(
|
||||
"Unable to read transaction file.",
|
||||
);
|
||||
wallet::receive_json_tx(&wallet_config, &keychain, contents.as_str()).unwrap();
|
||||
} else {
|
||||
wallet::server::start_rest_apis(wallet_config, keychain);
|
||||
}
|
||||
}
|
||||
("receive", Some(receive_args)) => if let Some(f) = receive_args.value_of("input") {
|
||||
let mut file = File::open(f).expect("Unable to open transaction file.");
|
||||
let mut contents = String::new();
|
||||
file.read_to_string(&mut contents)
|
||||
.expect("Unable to read transaction file.");
|
||||
wallet::receive_json_tx(&wallet_config, &keychain, contents.as_str()).unwrap();
|
||||
} else {
|
||||
wallet::server::start_rest_apis(wallet_config, keychain);
|
||||
},
|
||||
("send", Some(send_args)) => {
|
||||
let amount = send_args
|
||||
.value_of("amount")
|
||||
.expect("Amount to send required")
|
||||
.parse()
|
||||
.expect("Could not parse amount as a whole number.");
|
||||
let minimum_confirmations: u64 =
|
||||
send_args
|
||||
.value_of("minimum_confirmations")
|
||||
.unwrap_or("1")
|
||||
.parse()
|
||||
.expect("Could not parse minimum_confirmations as a whole number.");
|
||||
let minimum_confirmations: u64 = send_args
|
||||
.value_of("minimum_confirmations")
|
||||
.unwrap_or("1")
|
||||
.parse()
|
||||
.expect("Could not parse minimum_confirmations as a whole number.");
|
||||
let mut dest = "stdout";
|
||||
if let Some(d) = send_args.value_of("dest") {
|
||||
dest = d;
|
||||
|
@ -412,12 +405,11 @@ fn wallet_command(wallet_args: &ArgMatches) {
|
|||
.expect("Amount to burn required")
|
||||
.parse()
|
||||
.expect("Could not parse amount as a whole number.");
|
||||
let minimum_confirmations: u64 =
|
||||
send_args
|
||||
.value_of("minimum_confirmations")
|
||||
.unwrap_or("1")
|
||||
.parse()
|
||||
.expect("Could not parse minimum_confirmations as a whole number.");
|
||||
let minimum_confirmations: u64 = send_args
|
||||
.value_of("minimum_confirmations")
|
||||
.unwrap_or("1")
|
||||
.parse()
|
||||
.expect("Could not parse minimum_confirmations as a whole number.");
|
||||
wallet::issue_burn_tx(&wallet_config, &keychain, amount, minimum_confirmations)
|
||||
.unwrap();
|
||||
}
|
||||
|
|
|
@ -21,14 +21,14 @@
|
|||
#![warn(missing_docs)]
|
||||
|
||||
extern crate byteorder;
|
||||
extern crate env_logger;
|
||||
extern crate grin_core as core;
|
||||
extern crate grin_util as util;
|
||||
extern crate libc;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate env_logger;
|
||||
extern crate memmap;
|
||||
extern crate rocksdb;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
|
||||
pub mod sumtree;
|
||||
|
||||
|
@ -39,8 +39,8 @@ use std::iter::Iterator;
|
|||
use std::marker::PhantomData;
|
||||
use std::sync::RwLock;
|
||||
|
||||
use byteorder::{WriteBytesExt, BigEndian};
|
||||
use rocksdb::{DB, WriteBatch, DBCompactionStyle, DBIterator, IteratorMode, Direction};
|
||||
use byteorder::{BigEndian, WriteBytesExt};
|
||||
use rocksdb::{DBCompactionStyle, DBIterator, Direction, IteratorMode, WriteBatch, DB};
|
||||
|
||||
use core::ser;
|
||||
|
||||
|
@ -89,7 +89,9 @@ impl Store {
|
|||
opts.set_max_open_files(256);
|
||||
opts.set_use_fsync(false);
|
||||
let db = try!(DB::open(&opts, &path));
|
||||
Ok(Store { rdb: RwLock::new(db) })
|
||||
Ok(Store {
|
||||
rdb: RwLock::new(db),
|
||||
})
|
||||
}
|
||||
|
||||
/// Writes a single key/value pair to the db
|
||||
|
@ -125,10 +127,11 @@ impl Store {
|
|||
/// Gets a `Readable` value from the db, provided its key, allowing to
|
||||
/// extract only partial data. The underlying Readable size must align
|
||||
/// accordingly. Encapsulates serialization.
|
||||
pub fn get_ser_limited<T: ser::Readable>(&self,
|
||||
key: &[u8],
|
||||
len: usize)
|
||||
-> Result<Option<T>, Error> {
|
||||
pub fn get_ser_limited<T: ser::Readable>(
|
||||
&self,
|
||||
key: &[u8],
|
||||
len: usize,
|
||||
) -> Result<Option<T>, Error> {
|
||||
let data = try!(self.get(key));
|
||||
match data {
|
||||
Some(val) => {
|
||||
|
@ -213,14 +216,16 @@ impl<'a> Batch<'a> {
|
|||
/// An iterator thad produces Readable instances back. Wraps the lower level
|
||||
/// DBIterator and deserializes the returned values.
|
||||
pub struct SerIterator<T>
|
||||
where T: ser::Readable
|
||||
where
|
||||
T: ser::Readable,
|
||||
{
|
||||
iter: DBIterator,
|
||||
_marker: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Iterator for SerIterator<T>
|
||||
where T: ser::Readable
|
||||
where
|
||||
T: ser::Readable,
|
||||
{
|
||||
type Item = T;
|
||||
|
||||
|
|
|
@ -17,17 +17,17 @@ use memmap;
|
|||
|
||||
use std::cmp;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, Write, BufReader, BufRead, ErrorKind};
|
||||
use std::io::{self, BufRead, BufReader, ErrorKind, Write};
|
||||
use std::os::unix::io::AsRawFd;
|
||||
use std::path::Path;
|
||||
use std::io::Read;
|
||||
|
||||
#[cfg(any(target_os = "linux"))]
|
||||
use libc::{off64_t, ftruncate64};
|
||||
use libc::{ftruncate64, off64_t};
|
||||
#[cfg(not(any(target_os = "linux", target_os = "android")))]
|
||||
use libc::{off_t as off64_t, ftruncate as ftruncate64};
|
||||
use libc::{ftruncate as ftruncate64, off_t as off64_t};
|
||||
|
||||
use core::core::pmmr::{self, Summable, Backend, HashSum, VecBackend};
|
||||
use core::core::pmmr::{self, Backend, HashSum, Summable, VecBackend};
|
||||
use core::ser;
|
||||
use util::LOGGER;
|
||||
|
||||
|
@ -116,7 +116,7 @@ impl AppendOnlyFile {
|
|||
} as u64;
|
||||
|
||||
// write the buffer, except if we prune offsets in the current span,
|
||||
// in which case we skip
|
||||
// in which case we skip
|
||||
let mut buf_start = 0;
|
||||
while prune_offs[prune_pos] >= read && prune_offs[prune_pos] < read + len {
|
||||
let prune_at = prune_offs[prune_pos] as usize;
|
||||
|
@ -282,10 +282,8 @@ where
|
|||
/// Append the provided HashSums to the backend storage.
|
||||
#[allow(unused_variables)]
|
||||
fn append(&mut self, position: u64, data: Vec<HashSum<T>>) -> Result<(), String> {
|
||||
self.buffer.append(
|
||||
position - (self.buffer_index as u64),
|
||||
data.clone(),
|
||||
)?;
|
||||
self.buffer
|
||||
.append(position - (self.buffer_index as u64), data.clone())?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -330,9 +328,9 @@ where
|
|||
|
||||
fn rewind(&mut self, position: u64, index: u32) -> Result<(), String> {
|
||||
assert!(self.buffer.len() == 0, "Rewind on non empty buffer.");
|
||||
self.remove_log.truncate(index).map_err(|e| {
|
||||
format!("Could not truncate remove log: {}", e)
|
||||
})?;
|
||||
self.remove_log
|
||||
.truncate(index)
|
||||
.map_err(|e| format!("Could not truncate remove log: {}", e))?;
|
||||
self.rewind = Some((position, index, self.buffer_index));
|
||||
self.buffer_index = position as usize;
|
||||
Ok(())
|
||||
|
@ -343,8 +341,7 @@ where
|
|||
if self.buffer.used_size() > 0 {
|
||||
for position in &positions {
|
||||
let pos_sz = *position as usize;
|
||||
if pos_sz > self.buffer_index &&
|
||||
pos_sz - 1 < self.buffer_index + self.buffer.len()
|
||||
if pos_sz > self.buffer_index && pos_sz - 1 < self.buffer_index + self.buffer.len()
|
||||
{
|
||||
self.buffer.remove(vec![*position], index).unwrap();
|
||||
}
|
||||
|
@ -375,7 +372,9 @@ where
|
|||
remove_log: rm_log,
|
||||
buffer: VecBackend::new(),
|
||||
buffer_index: (sz as usize) / record_len,
|
||||
pruned_nodes: pmmr::PruneList { pruned_nodes: prune_list },
|
||||
pruned_nodes: pmmr::PruneList {
|
||||
pruned_nodes: prune_list,
|
||||
},
|
||||
rewind: None,
|
||||
})
|
||||
}
|
||||
|
@ -403,10 +402,7 @@ where
|
|||
if let Err(e) = self.hashsum_file.append(&ser::ser_vec(&hs).unwrap()[..]) {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::Interrupted,
|
||||
format!(
|
||||
"Could not write to log storage, disk full? {:?}",
|
||||
e
|
||||
),
|
||||
format!("Could not write to log storage, disk full? {:?}", e),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -442,28 +438,28 @@ where
|
|||
/// TODO whatever is calling this should also clean up the commit to
|
||||
/// position index in db
|
||||
pub fn check_compact(&mut self, max_len: usize) -> io::Result<()> {
|
||||
if !(max_len > 0 && self.remove_log.len() > max_len ||
|
||||
max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES)
|
||||
if !(max_len > 0 && self.remove_log.len() > max_len
|
||||
|| max_len == 0 && self.remove_log.len() > RM_LOG_MAX_NODES)
|
||||
{
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// 0. validate none of the nodes in the rm log are in the prune list (to
|
||||
// avoid accidental double compaction)
|
||||
// avoid accidental double compaction)
|
||||
for pos in &self.remove_log.removed[..] {
|
||||
if let None = self.pruned_nodes.pruned_pos(pos.0) {
|
||||
// TODO we likely can recover from this by directly jumping to 3
|
||||
error!(
|
||||
LOGGER,
|
||||
"The remove log contains nodes that are already in the pruned \
|
||||
list, a previous compaction likely failed."
|
||||
list, a previous compaction likely failed."
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// 1. save hashsum file to a compact copy, skipping data that's in the
|
||||
// remove list
|
||||
// remove list
|
||||
let tmp_prune_file = format!("{}/{}.prune", self.data_dir, PMMR_DATA_FILE);
|
||||
let record_len = (32 + T::sum_len()) as u64;
|
||||
let to_rm = self.remove_log
|
||||
|
@ -474,11 +470,8 @@ where
|
|||
(pos - 1 - shift.unwrap()) * record_len
|
||||
})
|
||||
.collect();
|
||||
self.hashsum_file.save_prune(
|
||||
tmp_prune_file.clone(),
|
||||
to_rm,
|
||||
record_len,
|
||||
)?;
|
||||
self.hashsum_file
|
||||
.save_prune(tmp_prune_file.clone(), to_rm, record_len)?;
|
||||
|
||||
// 2. update the prune list and save it in place
|
||||
for &(rm_pos, _) in &self.remove_log.removed[..] {
|
||||
|
@ -510,7 +503,6 @@ fn read_ordered_vec<T>(path: String) -> io::Result<Vec<T>>
|
|||
where
|
||||
T: ser::Readable + cmp::Ord,
|
||||
{
|
||||
|
||||
let file_path = Path::new(&path);
|
||||
let mut ovec = Vec::with_capacity(1000);
|
||||
if file_path.exists() {
|
||||
|
@ -524,20 +516,15 @@ where
|
|||
}
|
||||
let elmts_res: Result<Vec<T>, ser::Error> = ser::deserialize(&mut &buf[..]);
|
||||
match elmts_res {
|
||||
Ok(elmts) => {
|
||||
for elmt in elmts {
|
||||
if let Err(idx) = ovec.binary_search(&elmt) {
|
||||
ovec.insert(idx, elmt);
|
||||
}
|
||||
Ok(elmts) => for elmt in elmts {
|
||||
if let Err(idx) = ovec.binary_search(&elmt) {
|
||||
ovec.insert(idx, elmt);
|
||||
}
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
return Err(io::Error::new(
|
||||
io::ErrorKind::InvalidData,
|
||||
format!(
|
||||
"Corrupted storage, could not read file at {}",
|
||||
path
|
||||
),
|
||||
format!("Corrupted storage, could not read file at {}", path),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
@ -553,7 +540,6 @@ fn write_vec<T>(path: String, v: &Vec<T>) -> io::Result<()>
|
|||
where
|
||||
T: ser::Writeable,
|
||||
{
|
||||
|
||||
let mut file_path = File::create(&path)?;
|
||||
ser::serialize(&mut file_path, v).map_err(|_| {
|
||||
io::Error::new(
|
||||
|
|
|
@ -20,7 +20,7 @@ extern crate time;
|
|||
use std::fs;
|
||||
|
||||
use core::ser::*;
|
||||
use core::core::pmmr::{PMMR, Summable, HashSum, Backend};
|
||||
use core::core::pmmr::{Backend, HashSum, Summable, PMMR};
|
||||
use core::core::hash::Hashed;
|
||||
|
||||
#[test]
|
||||
|
@ -48,11 +48,16 @@ fn sumtree_append() {
|
|||
})
|
||||
);
|
||||
|
||||
let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>) + HashSum::from_summable(2, &elems[1], None::<TestElem>);
|
||||
let sum4 = sum2 + (HashSum::from_summable(4, &elems[2], None::<TestElem>) + HashSum::from_summable(5, &elems[3], None::<TestElem>));
|
||||
let sum8 = sum4 +
|
||||
((HashSum::from_summable(8, &elems[4], None::<TestElem>) + HashSum::from_summable(9, &elems[5], None::<TestElem>)) +
|
||||
(HashSum::from_summable(11, &elems[6], None::<TestElem>) + HashSum::from_summable(12, &elems[7], None::<TestElem>)));
|
||||
let sum2 = HashSum::from_summable(1, &elems[0], None::<TestElem>)
|
||||
+ HashSum::from_summable(2, &elems[1], None::<TestElem>);
|
||||
let sum4 = sum2
|
||||
+ (HashSum::from_summable(4, &elems[2], None::<TestElem>)
|
||||
+ HashSum::from_summable(5, &elems[3], None::<TestElem>));
|
||||
let sum8 = sum4
|
||||
+ ((HashSum::from_summable(8, &elems[4], None::<TestElem>)
|
||||
+ HashSum::from_summable(9, &elems[5], None::<TestElem>))
|
||||
+ (HashSum::from_summable(11, &elems[6], None::<TestElem>)
|
||||
+ HashSum::from_summable(12, &elems[7], None::<TestElem>)));
|
||||
let sum9 = sum8 + HashSum::from_summable(16, &elems[8], None::<TestElem>);
|
||||
|
||||
{
|
||||
|
@ -177,7 +182,7 @@ fn sumtree_rewind() {
|
|||
}
|
||||
backend.check_compact(1).unwrap();
|
||||
backend.sync().unwrap();
|
||||
|
||||
|
||||
// rewind and check the roots still match
|
||||
{
|
||||
let mut pmmr = PMMR::at(&mut backend, mmr_size);
|
||||
|
@ -223,7 +228,6 @@ fn setup() -> (String, Vec<TestElem>) {
|
|||
}
|
||||
|
||||
fn load(pos: u64, elems: &[TestElem], backend: &mut store::sumtree::PMMRBackend<TestElem>) -> u64 {
|
||||
|
||||
let mut pmmr = PMMR::at(backend, pos);
|
||||
for elem in elems {
|
||||
pmmr.push(elem.clone(), None::<TestElem>).unwrap();
|
||||
|
@ -237,9 +241,9 @@ impl Summable for TestElem {
|
|||
type Sum = u64;
|
||||
fn sum(&self) -> u64 {
|
||||
// sums are not allowed to overflow, so we use this simple
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10 +
|
||||
self.0[3] as u64
|
||||
// non-injective "sum" function that will still be homomorphic
|
||||
self.0[0] as u64 * 0x1000 + self.0[1] as u64 * 0x100 + self.0[2] as u64 * 0x10
|
||||
+ self.0[3] as u64
|
||||
}
|
||||
fn sum_len() -> usize {
|
||||
8
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Logging, as well as various low-level utilities that factor Rust
|
||||
//! Logging, as well as various low-level utilities that factor Rust
|
||||
//! patterns that are frequent within the grin codebase.
|
||||
|
||||
#![deny(non_upper_case_globals)]
|
||||
|
@ -23,8 +23,8 @@
|
|||
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate slog_term;
|
||||
extern crate slog_async;
|
||||
extern crate slog_term;
|
||||
|
||||
#[macro_use]
|
||||
extern crate lazy_static;
|
||||
|
@ -39,13 +39,13 @@ pub use secp_ as secp;
|
|||
|
||||
// Logging related
|
||||
pub mod logger;
|
||||
pub use logger::{LOGGER, init_logger, init_test_logger};
|
||||
pub use logger::{init_logger, init_test_logger, LOGGER};
|
||||
|
||||
pub mod types;
|
||||
pub use types::LoggingConfig;
|
||||
|
||||
// other utils
|
||||
use std::cell::{RefCell, Ref};
|
||||
use std::cell::{Ref, RefCell};
|
||||
#[allow(unused_imports)]
|
||||
use std::ops::Deref;
|
||||
|
||||
|
@ -68,7 +68,9 @@ unsafe impl<T> Send for OneTime<T> {}
|
|||
impl<T> OneTime<T> {
|
||||
/// Builds a new uninitialized OneTime.
|
||||
pub fn new() -> OneTime<T> {
|
||||
OneTime { inner: RefCell::new(None) }
|
||||
OneTime {
|
||||
inner: RefCell::new(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the OneTime, should only be called once after construction.
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
use std::fs::OpenOptions;
|
||||
use std::sync::Mutex;
|
||||
use std::ops::Deref;
|
||||
use slog::{Logger, Drain, Level, LevelFilter, Duplicate, Discard};
|
||||
use slog::{Discard, Drain, Duplicate, Level, LevelFilter, Logger};
|
||||
use slog_term;
|
||||
use slog_async;
|
||||
|
||||
|
@ -53,7 +53,7 @@ lazy_static! {
|
|||
if !config.log_to_stdout || !was_init {
|
||||
terminal_drain = slog_async::Async::new(Discard{}).build().fuse();
|
||||
}
|
||||
|
||||
|
||||
let mut file_drain_final = slog_async::Async::new(Discard{}).build().fuse();
|
||||
|
||||
if config.log_to_file && was_init {
|
||||
|
@ -93,12 +93,11 @@ pub fn init_logger(config: Option<LoggingConfig>) {
|
|||
|
||||
/// Initializes the logger for unit and integration tests
|
||||
pub fn init_test_logger() {
|
||||
let mut was_init_ref = WAS_INIT.lock().unwrap();
|
||||
let mut was_init_ref = WAS_INIT.lock().unwrap();
|
||||
if *was_init_ref.deref() {
|
||||
return;
|
||||
}
|
||||
let mut config_ref = LOGGING_CONFIG.lock().unwrap();
|
||||
*config_ref = LoggingConfig::default();
|
||||
*was_init_ref = true;
|
||||
let mut config_ref = LOGGING_CONFIG.lock().unwrap();
|
||||
*config_ref = LoggingConfig::default();
|
||||
*was_init_ref = true;
|
||||
}
|
||||
|
||||
|
|
|
@ -34,37 +34,34 @@ fn refresh_output(out: &mut OutputData, api_out: &api::Output) {
|
|||
match out.status {
|
||||
OutputStatus::Unconfirmed => {
|
||||
out.status = OutputStatus::Unspent;
|
||||
},
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
// Transitions a local wallet output (based on it not being in the node utxo set) -
|
||||
// Transitions a local wallet output (based on it not being in the node utxo
|
||||
// set) -
|
||||
// Unspent -> Spent
|
||||
// Locked -> Spent
|
||||
fn mark_spent_output(out: &mut OutputData) {
|
||||
match out.status {
|
||||
OutputStatus::Unspent | OutputStatus::Locked => {
|
||||
out.status = OutputStatus::Spent
|
||||
},
|
||||
OutputStatus::Unspent | OutputStatus::Locked => out.status = OutputStatus::Spent,
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
|
||||
/// Builds a single api query to retrieve the latest output data from the node.
|
||||
/// So we can refresh the local wallet outputs.
|
||||
pub fn refresh_outputs(
|
||||
config: &WalletConfig,
|
||||
keychain: &Keychain,
|
||||
) -> Result<(), Error> {
|
||||
pub fn refresh_outputs(config: &WalletConfig, keychain: &Keychain) -> Result<(), Error> {
|
||||
debug!(LOGGER, "Refreshing wallet outputs");
|
||||
let mut wallet_outputs: HashMap<pedersen::Commitment, Identifier> = HashMap::new();
|
||||
let mut commits: Vec<pedersen::Commitment> = vec![];
|
||||
|
||||
// build a local map of wallet outputs by commits
|
||||
// and a list of outputs we wantot query the node for
|
||||
// and a list of outputs we wantot query the node for
|
||||
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
|
||||
for out in wallet_data.outputs
|
||||
for out in wallet_data
|
||||
.outputs
|
||||
.values()
|
||||
.filter(|out| out.root_key_id == keychain.root_key_id())
|
||||
.filter(|out| out.status != OutputStatus::Spent)
|
||||
|
@ -77,7 +74,7 @@ pub fn refresh_outputs(
|
|||
});
|
||||
|
||||
// build the necessary query params -
|
||||
// ?id=xxx&id=yyy&id=zzz
|
||||
// ?id=xxx&id=yyy&id=zzz
|
||||
let query_params: Vec<String> = commits
|
||||
.iter()
|
||||
.map(|commit| {
|
||||
|
@ -88,7 +85,7 @@ pub fn refresh_outputs(
|
|||
let query_string = query_params.join("&");
|
||||
|
||||
let url = format!(
|
||||
"{}/v2/chain/utxos?{}",
|
||||
"{}/v1/chain/utxos?{}",
|
||||
config.check_node_api_http_addr,
|
||||
query_string,
|
||||
);
|
||||
|
@ -96,32 +93,28 @@ pub fn refresh_outputs(
|
|||
// build a map of api outputs by commit so we can look them up efficiently
|
||||
let mut api_outputs: HashMap<pedersen::Commitment, api::Output> = HashMap::new();
|
||||
match api::client::get::<Vec<api::Output>>(url.as_str()) {
|
||||
Ok(outputs) => {
|
||||
for out in outputs {
|
||||
api_outputs.insert(out.commit, out);
|
||||
}
|
||||
Ok(outputs) => for out in outputs {
|
||||
api_outputs.insert(out.commit, out);
|
||||
},
|
||||
Err(_) => {},
|
||||
Err(_) => {}
|
||||
};
|
||||
|
||||
// now for each commit, find the output in the wallet and
|
||||
// the corresponding api output (if it exists)
|
||||
// and refresh it in-place in the wallet.
|
||||
// Note: minimizing the time we spend holding the wallet lock.
|
||||
WalletData::with_wallet(&config.data_file_dir, |wallet_data| {
|
||||
for commit in commits {
|
||||
let id = wallet_outputs.get(&commit).unwrap();
|
||||
if let Entry::Occupied(mut output) = wallet_data.outputs.entry(id.to_hex()) {
|
||||
match api_outputs.get(&commit) {
|
||||
Some(api_output) => refresh_output(&mut output.get_mut(), api_output),
|
||||
None => mark_spent_output(&mut output.get_mut()),
|
||||
};
|
||||
}
|
||||
// the corresponding api output (if it exists)
|
||||
// and refresh it in-place in the wallet.
|
||||
// Note: minimizing the time we spend holding the wallet lock.
|
||||
WalletData::with_wallet(&config.data_file_dir, |wallet_data| for commit in commits {
|
||||
let id = wallet_outputs.get(&commit).unwrap();
|
||||
if let Entry::Occupied(mut output) = wallet_data.outputs.entry(id.to_hex()) {
|
||||
match api_outputs.get(&commit) {
|
||||
Some(api_output) => refresh_output(&mut output.get_mut(), api_output),
|
||||
None => mark_spent_output(&mut output.get_mut()),
|
||||
};
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_tip_from_node(config: &WalletConfig) -> Result<api::Tip, Error> {
|
||||
let url = format!("{}/v2/chain", config.check_node_api_http_addr);
|
||||
let url = format!("{}/v1/chain", config.check_node_api_http_addr);
|
||||
api::client::get::<api::Tip>(url.as_str()).map_err(|e| Error::Node(e))
|
||||
}
|
||||
|
|
|
@ -33,7 +33,10 @@ pub fn create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Erro
|
|||
retry_backoff_forever(|| {
|
||||
let res = single_create_coinbase(&url, &block_fees);
|
||||
if let Err(_) = res {
|
||||
error!(LOGGER, "Failed to get coinbase via wallet API (will retry)...");
|
||||
error!(
|
||||
LOGGER,
|
||||
"Failed to get coinbase via wallet API (will retry)..."
|
||||
);
|
||||
}
|
||||
res
|
||||
})
|
||||
|
@ -41,11 +44,12 @@ pub fn create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, Erro
|
|||
|
||||
/// Runs the specified function wrapped in some basic retry logic.
|
||||
fn retry_backoff_forever<F, R>(f: F) -> Result<R, Error>
|
||||
where F: (FnMut() -> Result<R, Error>)
|
||||
where
|
||||
F: FnMut() -> Result<R, Error>,
|
||||
{
|
||||
let mut core = reactor::Core::new()?;
|
||||
let retry_strategy = FibonacciBackoff::from_millis(100)
|
||||
.max_delay(time::Duration::from_secs(10));
|
||||
let retry_strategy =
|
||||
FibonacciBackoff::from_millis(100).max_delay(time::Duration::from_secs(10));
|
||||
let retry_future = Retry::spawn(core.handle(), retry_strategy, f);
|
||||
let res = core.run(retry_future).unwrap();
|
||||
Ok(res)
|
||||
|
@ -63,8 +67,8 @@ fn single_create_coinbase(url: &str, block_fees: &BlockFees) -> Result<CbData, E
|
|||
|
||||
let work = client.request(req).and_then(|res| {
|
||||
res.body().concat2().and_then(move |body| {
|
||||
let coinbase: CbData = serde_json::from_slice(&body)
|
||||
.map_err(|e| {io::Error::new(io::ErrorKind::Other, e)})?;
|
||||
let coinbase: CbData =
|
||||
serde_json::from_slice(&body).map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
|
||||
Ok(coinbase)
|
||||
})
|
||||
});
|
||||
|
|
|
@ -33,13 +33,10 @@ pub struct CoinbaseHandler {
|
|||
|
||||
impl CoinbaseHandler {
|
||||
fn build_coinbase(&self, block_fees: &BlockFees) -> Result<CbData, Error> {
|
||||
let (out, kern, block_fees) = receive_coinbase(
|
||||
&self.config,
|
||||
&self.keychain,
|
||||
block_fees,
|
||||
).map_err(|e| {
|
||||
api::Error::Internal(format!("Error building coinbase: {:?}", e))
|
||||
})?;
|
||||
let (out, kern, block_fees) = receive_coinbase(&self.config, &self.keychain, block_fees)
|
||||
.map_err(|e| {
|
||||
api::Error::Internal(format!("Error building coinbase: {:?}", e))
|
||||
})?;
|
||||
|
||||
let out_bin = ser::ser_vec(&out).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing output: {:?}", e))
|
||||
|
@ -50,13 +47,9 @@ impl CoinbaseHandler {
|
|||
})?;
|
||||
|
||||
let key_id_bin = match block_fees.key_id {
|
||||
Some(key_id) => {
|
||||
ser::ser_vec(&key_id).map_err(|e| {
|
||||
api::Error::Internal(
|
||||
format!("Error serializing kernel: {:?}", e),
|
||||
)
|
||||
})?
|
||||
}
|
||||
Some(key_id) => ser::ser_vec(&key_id).map_err(|e| {
|
||||
api::Error::Internal(format!("Error serializing kernel: {:?}", e))
|
||||
})?,
|
||||
None => vec![],
|
||||
};
|
||||
|
||||
|
@ -68,7 +61,8 @@ impl CoinbaseHandler {
|
|||
}
|
||||
}
|
||||
|
||||
// TODO - error handling - what to return if we fail to get the wallet lock for some reason...
|
||||
// TODO - error handling - what to return if we fail to get the wallet lock for
|
||||
// some reason...
|
||||
impl Handler for CoinbaseHandler {
|
||||
fn handle(&self, req: &mut Request) -> IronResult<Response> {
|
||||
let struct_body = req.get::<bodyparser::Struct<BlockFees>>();
|
||||
|
|
|
@ -22,17 +22,14 @@ pub fn show_info(config: &WalletConfig, keychain: &Keychain) {
|
|||
|
||||
// just read the wallet here, no need for a write lock
|
||||
let _ = WalletData::read_wallet(&config.data_file_dir, |wallet_data| {
|
||||
|
||||
// get the current height via the api
|
||||
// if we cannot get the current height use the max height known to the wallet
|
||||
// if we cannot get the current height use the max height known to the wallet
|
||||
let current_height = match checker::get_tip_from_node(config) {
|
||||
Ok(tip) => tip.height,
|
||||
Err(_) => {
|
||||
match wallet_data.outputs.values().map(|out| out.height).max() {
|
||||
Some(height) => height,
|
||||
None => 0,
|
||||
}
|
||||
}
|
||||
Err(_) => match wallet_data.outputs.values().map(|out| out.height).max() {
|
||||
Some(height) => height,
|
||||
None => 0,
|
||||
},
|
||||
};
|
||||
|
||||
println!("Outputs - ");
|
||||
|
|
|
@ -14,24 +14,24 @@
|
|||
|
||||
//! Library module for the main wallet functionalities provided by Grin.
|
||||
|
||||
extern crate byteorder;
|
||||
extern crate blake2_rfc as blake2;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
extern crate byteorder;
|
||||
extern crate rand;
|
||||
extern crate serde;
|
||||
#[macro_use]
|
||||
extern crate serde_derive;
|
||||
extern crate serde_json;
|
||||
#[macro_use]
|
||||
extern crate slog;
|
||||
|
||||
extern crate bodyparser;
|
||||
extern crate futures;
|
||||
extern crate tokio_core;
|
||||
extern crate tokio_retry;
|
||||
extern crate hyper;
|
||||
extern crate iron;
|
||||
#[macro_use]
|
||||
extern crate router;
|
||||
extern crate tokio_core;
|
||||
extern crate tokio_retry;
|
||||
|
||||
extern crate grin_api as api;
|
||||
extern crate grin_core as core;
|
||||
|
@ -48,6 +48,6 @@ pub mod client;
|
|||
pub mod server;
|
||||
|
||||
pub use info::show_info;
|
||||
pub use receiver::{WalletReceiver, receive_json_tx};
|
||||
pub use sender::{issue_send_tx, issue_burn_tx};
|
||||
pub use receiver::{receive_json_tx, WalletReceiver};
|
||||
pub use sender::{issue_burn_tx, issue_send_tx};
|
||||
pub use types::{BlockFees, CbData, Error, WalletConfig, WalletReceiveRequest, WalletSeed};
|
||||
|
|
|
@ -15,45 +15,18 @@
|
|||
//! Provides the JSON/HTTP API for wallets to receive payments. Because
|
||||
//! receiving money in MimbleWimble requires an interactive exchange, a
|
||||
//! wallet server that's running at all time is required in many cases.
|
||||
//!
|
||||
//! The API looks like this:
|
||||
//!
|
||||
//! POST /v1/wallet/receive
|
||||
//! > {
|
||||
//! > "amount": 10,
|
||||
//! > "blind_sum": "a12b7f...",
|
||||
//! > "tx": "f083de...",
|
||||
//! > }
|
||||
//!
|
||||
//! < {
|
||||
//! < "tx": "f083de...",
|
||||
//! < "status": "ok"
|
||||
//! < }
|
||||
//!
|
||||
//! POST /v1/wallet/finalize
|
||||
//! > {
|
||||
//! > "tx": "f083de...",
|
||||
//! > }
|
||||
//!
|
||||
//! POST /v1/wallet/receive_coinbase
|
||||
//! > {
|
||||
//! > "amount": 1,
|
||||
//! > }
|
||||
//!
|
||||
//! < {
|
||||
//! < "output": "8a90bc...",
|
||||
//! < "kernel": "f083de...",
|
||||
//! < }
|
||||
//!
|
||||
//! Note that while at this point the finalize call is completely unecessary, a
|
||||
//! double-exchange will be required as soon as we support Schnorr signatures.
|
||||
//! So we may as well have it in place already.
|
||||
|
||||
use std::io::Read;
|
||||
|
||||
use core::consensus::reward;
|
||||
use core::core::{Block, Transaction, TxKernel, Output, build};
|
||||
use core::core::{build, Block, Output, Transaction, TxKernel};
|
||||
use core::ser;
|
||||
use api::{self, ApiEndpoint, Operation, ApiResult};
|
||||
use api;
|
||||
use iron::prelude::*;
|
||||
use iron::Handler;
|
||||
use iron::status;
|
||||
use keychain::{BlindingFactor, Identifier, Keychain};
|
||||
use serde_json;
|
||||
use types::*;
|
||||
use util;
|
||||
use util::LOGGER;
|
||||
|
@ -77,8 +50,8 @@ pub fn receive_json_tx(
|
|||
let tx_hex = util::to_hex(ser::ser_vec(&final_tx).unwrap());
|
||||
|
||||
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
|
||||
let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })
|
||||
.map_err(|e| Error::Node(e))?;
|
||||
let _: () =
|
||||
api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).map_err(|e| Error::Node(e))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -90,46 +63,27 @@ pub struct WalletReceiver {
|
|||
pub config: WalletConfig,
|
||||
}
|
||||
|
||||
impl ApiEndpoint for WalletReceiver {
|
||||
type ID = String;
|
||||
type T = String;
|
||||
type OP_IN = WalletReceiveRequest;
|
||||
type OP_OUT = CbData;
|
||||
impl Handler for WalletReceiver {
|
||||
fn handle(&self, req: &mut Request) -> IronResult<Response> {
|
||||
let receive: WalletReceiveRequest = serde_json::from_reader(req.body.by_ref())
|
||||
.map_err(|e| IronError::new(e, status::BadRequest))?;
|
||||
|
||||
fn operations(&self) -> Vec<Operation> {
|
||||
vec![Operation::Custom("receive_json_tx".to_string())]
|
||||
}
|
||||
match receive {
|
||||
WalletReceiveRequest::PartialTransaction(partial_tx_str) => {
|
||||
debug!(LOGGER, "Receive with transaction {}", &partial_tx_str,);
|
||||
receive_json_tx(&self.config, &self.keychain, &partial_tx_str)
|
||||
.map_err(|e| {
|
||||
api::Error::Internal(
|
||||
format!("Error processing partial transaction: {:?}", e),
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
fn operation(&self, op: String, input: WalletReceiveRequest) -> ApiResult<CbData> {
|
||||
match op.as_str() {
|
||||
"receive_json_tx" => {
|
||||
match input {
|
||||
WalletReceiveRequest::PartialTransaction(partial_tx_str) => {
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Operation {} with transaction {}",
|
||||
op,
|
||||
&partial_tx_str,
|
||||
);
|
||||
receive_json_tx(&self.config, &self.keychain, &partial_tx_str)
|
||||
.map_err(|e| {
|
||||
api::Error::Internal(
|
||||
format!("Error processing partial transaction: {:?}", e),
|
||||
)
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// TODO: Return emptiness for now, should be a proper enum return type
|
||||
Ok(CbData {
|
||||
output: String::from(""),
|
||||
kernel: String::from(""),
|
||||
key_id: String::from(""),
|
||||
})
|
||||
}
|
||||
_ => Err(api::Error::Argument(format!("Incorrect request data: {}", op))),
|
||||
}
|
||||
Ok(Response::with(status::Ok))
|
||||
}
|
||||
_ => Err(api::Error::Argument(format!("Unknown operation: {}", op))),
|
||||
_ => Ok(Response::with(
|
||||
(status::BadRequest, format!("Incorrect request data.")),
|
||||
)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -168,7 +122,7 @@ fn next_available_key(
|
|||
pub fn receive_coinbase(
|
||||
config: &WalletConfig,
|
||||
keychain: &Keychain,
|
||||
block_fees: &BlockFees
|
||||
block_fees: &BlockFees,
|
||||
) -> Result<(Output, TxKernel, BlockFees), Error> {
|
||||
let root_key_id = keychain.root_key_id();
|
||||
let key_id = block_fees.key_id();
|
||||
|
@ -208,11 +162,7 @@ pub fn receive_coinbase(
|
|||
|
||||
debug!(LOGGER, "block_fees updated - {:?}", block_fees);
|
||||
|
||||
let (out, kern) = Block::reward_output(
|
||||
&keychain,
|
||||
&key_id,
|
||||
block_fees.fees,
|
||||
)?;
|
||||
let (out, kern) = Block::reward_output(&keychain, &key_id, block_fees.fees)?;
|
||||
Ok((out, kern, block_fees))
|
||||
}
|
||||
|
||||
|
@ -229,8 +179,8 @@ fn receive_transaction(
|
|||
let (key_id, derivation) = next_available_key(config, keychain)?;
|
||||
|
||||
// double check the fee amount included in the partial tx
|
||||
// we don't necessarily want to just trust the sender
|
||||
// we could just overwrite the fee here (but we won't) due to the ecdsa sig
|
||||
// we don't necessarily want to just trust the sender
|
||||
// we could just overwrite the fee here (but we won't) due to the ecdsa sig
|
||||
let fee = tx_fee(partial.inputs.len(), partial.outputs.len() + 1, None);
|
||||
if fee != partial.fee {
|
||||
return Err(Error::FeeDispute {
|
||||
|
@ -241,14 +191,18 @@ fn receive_transaction(
|
|||
|
||||
let out_amount = amount - fee;
|
||||
|
||||
let (tx_final, _) = build::transaction(vec![
|
||||
build::initial_tx(partial),
|
||||
build::with_excess(blinding),
|
||||
build::output(out_amount, key_id.clone()),
|
||||
let (tx_final, _) = build::transaction(
|
||||
vec![
|
||||
build::initial_tx(partial),
|
||||
build::with_excess(blinding),
|
||||
build::output(out_amount, key_id.clone()),
|
||||
// build::with_fee(fee_amount),
|
||||
], keychain)?;
|
||||
],
|
||||
keychain,
|
||||
)?;
|
||||
|
||||
// make sure the resulting transaction is valid (could have been lied to on excess).
|
||||
// make sure the resulting transaction is valid (could have been lied to on
|
||||
// excess).
|
||||
tx_final.validate(&keychain.secp())?;
|
||||
|
||||
// operate within a lock on wallet data
|
||||
|
|
|
@ -14,9 +14,9 @@
|
|||
|
||||
use api;
|
||||
use checker;
|
||||
use core::core::{Transaction, build};
|
||||
use core::core::{build, Transaction};
|
||||
use core::ser;
|
||||
use keychain::{BlindingFactor, Keychain, Identifier};
|
||||
use keychain::{BlindingFactor, Identifier, Keychain};
|
||||
use receiver::TxWrapper;
|
||||
use types::*;
|
||||
use util::LOGGER;
|
||||
|
@ -55,7 +55,7 @@ pub fn issue_send_tx(
|
|||
if dest == "stdout" {
|
||||
println!("{}", json_tx);
|
||||
} else if &dest[..4] == "http" {
|
||||
let url = format!("{}/v1/receive/receive_json_tx", &dest);
|
||||
let url = format!("{}/v1/receive/transaction", &dest);
|
||||
debug!(LOGGER, "Posting partial transaction to {}", url);
|
||||
let request = WalletReceiveRequest::PartialTransaction(json_tx);
|
||||
let _: CbData = api::client::post(url.as_str(), &request).expect(&format!(
|
||||
|
@ -90,7 +90,7 @@ fn build_send_tx(
|
|||
let mut parts = inputs_and_change(&coins, config, keychain, key_id, amount)?;
|
||||
|
||||
// This is more proof of concept than anything but here we set lock_height
|
||||
// on tx being sent (based on current chain height via api).
|
||||
// on tx being sent (based on current chain height via api).
|
||||
parts.push(build::with_lock_height(lock_height));
|
||||
|
||||
let (tx, blind) = build::transaction(parts, &keychain)?;
|
||||
|
@ -130,8 +130,8 @@ pub fn issue_burn_tx(
|
|||
|
||||
let tx_hex = util::to_hex(ser::ser_vec(&tx_burn).unwrap());
|
||||
let url = format!("{}/v1/pool/push", config.check_node_api_http_addr.as_str());
|
||||
let _: () = api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex })
|
||||
.map_err(|e| Error::Node(e))?;
|
||||
let _: () =
|
||||
api::client::post(url.as_str(), &TxWrapper { tx_hex: tx_hex }).map_err(|e| Error::Node(e))?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
@ -165,15 +165,15 @@ fn inputs_and_change(
|
|||
}
|
||||
|
||||
// sender is responsible for setting the fee on the partial tx
|
||||
// recipient should double check the fee calculation and not blindly trust the
|
||||
// sender
|
||||
// recipient should double check the fee calculation and not blindly trust the
|
||||
// sender
|
||||
let fee = tx_fee(coins.len(), 2, None);
|
||||
parts.push(build::with_fee(fee));
|
||||
|
||||
// if we are spending 10,000 coins to send 1,000 then our change will be 9,000
|
||||
// the fee will come out of the amount itself
|
||||
// if the fee is 80 then the recipient will only receive 920
|
||||
// but our change will still be 9,000
|
||||
// the fee will come out of the amount itself
|
||||
// if the fee is 80 then the recipient will only receive 920
|
||||
// but our change will still be 9,000
|
||||
let change = total - amount;
|
||||
|
||||
// build inputs using the appropriate derived key_ids
|
||||
|
@ -200,7 +200,8 @@ fn inputs_and_change(
|
|||
is_coinbase: false,
|
||||
});
|
||||
|
||||
// now lock the ouputs we're spending so we avoid accidental double spend attempt
|
||||
// now lock the ouputs we're spending so we avoid accidental double spend
|
||||
// attempt
|
||||
for coin in coins {
|
||||
wallet_data.lock_output(coin);
|
||||
}
|
||||
|
@ -216,7 +217,7 @@ mod test {
|
|||
|
||||
#[test]
|
||||
// demonstrate that input.commitment == referenced output.commitment
|
||||
// based on the public key and amount begin spent
|
||||
// based on the public key and amount begin spent
|
||||
fn output_commitment_equals_input_commitment_on_spend() {
|
||||
let keychain = Keychain::from_random_seed().unwrap();
|
||||
let key_id1 = keychain.derive_key_id(1).unwrap();
|
||||
|
|
|
@ -27,28 +27,22 @@ pub fn start_rest_apis(wallet_config: WalletConfig, keychain: Keychain) {
|
|||
wallet_config.api_http_addr
|
||||
);
|
||||
|
||||
let mut apis = ApiServer::new("/v1".to_string());
|
||||
|
||||
apis.register_endpoint(
|
||||
"/receive".to_string(),
|
||||
WalletReceiver {
|
||||
config: wallet_config.clone(),
|
||||
keychain: keychain.clone(),
|
||||
},
|
||||
);
|
||||
|
||||
let receive_tx_handler = WalletReceiver {
|
||||
config: wallet_config.clone(),
|
||||
keychain: keychain.clone(),
|
||||
};
|
||||
let coinbase_handler = CoinbaseHandler {
|
||||
config: wallet_config.clone(),
|
||||
keychain: keychain.clone(),
|
||||
};
|
||||
// let tx_handler = TxHandler{};
|
||||
|
||||
let router = router!(
|
||||
receive_tx: get "/receive/transaction" => receive_tx_handler,
|
||||
receive_coinbase: post "/receive/coinbase" => coinbase_handler,
|
||||
// receive_tx: post "/receive/tx" => tx_handler,
|
||||
);
|
||||
apis.register_handler("/v2", router);
|
||||
);
|
||||
|
||||
let mut apis = ApiServer::new("/v1".to_string());
|
||||
apis.register_handler(router);
|
||||
apis.start(wallet_config.api_http_addr).unwrap_or_else(|e| {
|
||||
error!(LOGGER, "Failed to start Grin wallet receiver: {}.", e);
|
||||
});
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
use blake2;
|
||||
use rand::{thread_rng, Rng};
|
||||
use std::{fmt, num, error};
|
||||
use std::{error, fmt, num};
|
||||
use std::convert::From;
|
||||
use std::fs::{self, File, OpenOptions};
|
||||
use std::io::{self, Read, Write};
|
||||
|
@ -32,7 +32,7 @@ use tokio_retry::strategy::FibonacciBackoff;
|
|||
|
||||
|
||||
use api;
|
||||
use core::core::{Transaction, transaction};
|
||||
use core::core::{transaction, Transaction};
|
||||
use core::ser;
|
||||
use keychain;
|
||||
use util;
|
||||
|
@ -62,7 +62,7 @@ pub fn tx_fee(input_len: usize, output_len: usize, base_fee: Option<u64>) -> u64
|
|||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
NotEnoughFunds(u64),
|
||||
FeeDispute{sender_fee: u64, recipient_fee: u64},
|
||||
FeeDispute { sender_fee: u64, recipient_fee: u64 },
|
||||
Keychain(keychain::Error),
|
||||
Transaction(transaction::Error),
|
||||
Secp(secp::Error),
|
||||
|
@ -166,7 +166,7 @@ impl Default for WalletConfig {
|
|||
fn default() -> WalletConfig {
|
||||
WalletConfig {
|
||||
enable_wallet: false,
|
||||
api_http_addr: "0.0.0.0:13416".to_string(),
|
||||
api_http_addr: "0.0.0.0:13415".to_string(),
|
||||
check_node_api_http_addr: "http://127.0.0.1:13413".to_string(),
|
||||
data_file_dir: ".".to_string(),
|
||||
}
|
||||
|
@ -226,8 +226,10 @@ impl OutputData {
|
|||
}
|
||||
|
||||
/// How many confirmations has this output received?
|
||||
/// If height == 0 then we are either Unconfirmed or the output was cut-through
|
||||
/// so we do not actually know how many confirmations this output had (and never will).
|
||||
/// If height == 0 then we are either Unconfirmed or the output was
|
||||
/// cut-through
|
||||
/// so we do not actually know how many confirmations this output had (and
|
||||
/// never will).
|
||||
pub fn num_confirmations(&self, current_height: u64) -> u64 {
|
||||
if self.status == OutputStatus::Unconfirmed {
|
||||
0
|
||||
|
@ -239,21 +241,16 @@ impl OutputData {
|
|||
}
|
||||
|
||||
/// Check if output is eligible for spending based on state and height.
|
||||
pub fn eligible_to_spend(
|
||||
&self,
|
||||
current_height: u64,
|
||||
minimum_confirmations: u64
|
||||
) -> bool {
|
||||
if [
|
||||
OutputStatus::Spent,
|
||||
OutputStatus::Locked,
|
||||
].contains(&self.status) {
|
||||
pub fn eligible_to_spend(&self, current_height: u64, minimum_confirmations: u64) -> bool {
|
||||
if [OutputStatus::Spent, OutputStatus::Locked].contains(&self.status) {
|
||||
return false;
|
||||
} else if self.status == OutputStatus::Unconfirmed && self.is_coinbase {
|
||||
return false;
|
||||
} else if self.lock_height > current_height {
|
||||
return false;
|
||||
} else if self.status == OutputStatus::Unspent && self.height + minimum_confirmations <= current_height {
|
||||
} else if self.status == OutputStatus::Unspent
|
||||
&& self.height + minimum_confirmations <= current_height
|
||||
{
|
||||
return true;
|
||||
} else if self.status == OutputStatus::Unconfirmed && minimum_confirmations == 0 {
|
||||
return true;
|
||||
|
@ -306,11 +303,7 @@ impl WalletSeed {
|
|||
SEED_FILE,
|
||||
);
|
||||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Generating wallet seed file at: {}",
|
||||
seed_file_path,
|
||||
);
|
||||
debug!(LOGGER, "Generating wallet seed file at: {}", seed_file_path,);
|
||||
|
||||
if Path::new(seed_file_path).exists() {
|
||||
panic!("wallet seed file already exists");
|
||||
|
@ -333,11 +326,7 @@ impl WalletSeed {
|
|||
SEED_FILE,
|
||||
);
|
||||
|
||||
debug!(
|
||||
LOGGER,
|
||||
"Using wallet seed file at: {}",
|
||||
seed_file_path,
|
||||
);
|
||||
debug!(LOGGER, "Using wallet seed file at: {}", seed_file_path,);
|
||||
|
||||
if Path::new(seed_file_path).exists() {
|
||||
let mut file = File::open(seed_file_path)?;
|
||||
|
@ -369,10 +358,11 @@ pub struct WalletData {
|
|||
}
|
||||
|
||||
impl WalletData {
|
||||
|
||||
/// Allows for reading wallet data (without needing to acquire the write lock).
|
||||
/// Allows for reading wallet data (without needing to acquire the write
|
||||
/// lock).
|
||||
pub fn read_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error>
|
||||
where F: FnOnce(&WalletData) -> T
|
||||
where
|
||||
F: FnOnce(&WalletData) -> T,
|
||||
{
|
||||
// open the wallet readonly and do what needs to be done with it
|
||||
let data_file_path = &format!("{}{}{}", data_file_dir, MAIN_SEPARATOR, DAT_FILE);
|
||||
|
@ -388,7 +378,8 @@ impl WalletData {
|
|||
/// across operating systems, this just creates a lock file with a "should
|
||||
/// not exist" option.
|
||||
pub fn with_wallet<T, F>(data_file_dir: &str, f: F) -> Result<T, Error>
|
||||
where F: FnOnce(&mut WalletData) -> T
|
||||
where
|
||||
F: FnOnce(&mut WalletData) -> T,
|
||||
{
|
||||
// create directory if it doesn't exist
|
||||
fs::create_dir_all(data_file_dir).unwrap_or_else(|why| {
|
||||
|
@ -415,7 +406,7 @@ impl WalletData {
|
|||
let retry_result = core.run(retry_future);
|
||||
|
||||
match retry_result {
|
||||
Ok(_) => {},
|
||||
Ok(_) => {}
|
||||
Err(_) => {
|
||||
error!(
|
||||
LOGGER,
|
||||
|
@ -448,31 +439,33 @@ impl WalletData {
|
|||
WalletData::read(data_file_path)
|
||||
} else {
|
||||
// just create a new instance, it will get written afterward
|
||||
Ok(WalletData { outputs: HashMap::new() })
|
||||
Ok(WalletData {
|
||||
outputs: HashMap::new(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Read the wallet data from disk.
|
||||
fn read(data_file_path: &str) -> Result<WalletData, Error> {
|
||||
let data_file =
|
||||
File::open(data_file_path)
|
||||
.map_err(|e| Error::WalletData(format!("Could not open {}: {}", data_file_path, e)))?;
|
||||
serde_json::from_reader(data_file)
|
||||
.map_err(|e| Error::WalletData(format!("Error reading {}: {}", data_file_path, e)))
|
||||
let data_file = File::open(data_file_path).map_err(|e| {
|
||||
Error::WalletData(format!("Could not open {}: {}", data_file_path, e))
|
||||
})?;
|
||||
serde_json::from_reader(data_file).map_err(|e| {
|
||||
Error::WalletData(format!("Error reading {}: {}", data_file_path, e))
|
||||
})
|
||||
}
|
||||
|
||||
/// Write the wallet data to disk.
|
||||
fn write(&self, data_file_path: &str) -> Result<(), Error> {
|
||||
let mut data_file =
|
||||
File::create(data_file_path)
|
||||
.map_err(|e| {
|
||||
Error::WalletData(format!("Could not create {}: {}", data_file_path, e))
|
||||
})?;
|
||||
let res_json = serde_json::to_vec_pretty(self)
|
||||
.map_err(|e| Error::WalletData(format!("Error serializing wallet data: {}", e)))?;
|
||||
data_file
|
||||
.write_all(res_json.as_slice())
|
||||
.map_err(|e| Error::WalletData(format!("Error writing {}: {}", data_file_path, e)))
|
||||
let mut data_file = File::create(data_file_path).map_err(|e| {
|
||||
Error::WalletData(format!("Could not create {}: {}", data_file_path, e))
|
||||
})?;
|
||||
let res_json = serde_json::to_vec_pretty(self).map_err(|e| {
|
||||
Error::WalletData(format!("Error serializing wallet data: {}", e))
|
||||
})?;
|
||||
data_file.write_all(res_json.as_slice()).map_err(|e| {
|
||||
Error::WalletData(format!("Error writing {}: {}", data_file_path, e))
|
||||
})
|
||||
}
|
||||
|
||||
/// Append a new output data to the wallet data.
|
||||
|
@ -503,7 +496,6 @@ impl WalletData {
|
|||
current_height: u64,
|
||||
minimum_confirmations: u64,
|
||||
) -> Vec<OutputData> {
|
||||
|
||||
self.outputs
|
||||
.values()
|
||||
.filter(|out| {
|
||||
|
@ -537,10 +529,11 @@ struct JSONPartialTx {
|
|||
|
||||
/// Encodes the information for a partial transaction (not yet completed by the
|
||||
/// receiver) into JSON.
|
||||
pub fn partial_tx_to_json(receive_amount: u64,
|
||||
blind_sum: keychain::BlindingFactor,
|
||||
tx: Transaction)
|
||||
-> String {
|
||||
pub fn partial_tx_to_json(
|
||||
receive_amount: u64,
|
||||
blind_sum: keychain::BlindingFactor,
|
||||
tx: Transaction,
|
||||
) -> String {
|
||||
let partial_tx = JSONPartialTx {
|
||||
amount: receive_amount,
|
||||
blind_sum: util::to_hex(blind_sum.secret_key().as_ref().to_vec()),
|
||||
|
@ -551,22 +544,22 @@ pub fn partial_tx_to_json(receive_amount: u64,
|
|||
|
||||
/// Reads a partial transaction encoded as JSON into the amount, sum of blinding
|
||||
/// factors and the transaction itself.
|
||||
pub fn partial_tx_from_json(keychain: &keychain::Keychain,
|
||||
json_str: &str)
|
||||
-> Result<(u64, keychain::BlindingFactor, Transaction), Error> {
|
||||
pub fn partial_tx_from_json(
|
||||
keychain: &keychain::Keychain,
|
||||
json_str: &str,
|
||||
) -> Result<(u64, keychain::BlindingFactor, Transaction), Error> {
|
||||
let partial_tx: JSONPartialTx = serde_json::from_str(json_str)?;
|
||||
|
||||
let blind_bin = util::from_hex(partial_tx.blind_sum)?;
|
||||
|
||||
// TODO - turn some data into a blinding factor here somehow
|
||||
// let blinding = SecretKey::from_slice(&secp, &blind_bin[..])?;
|
||||
// let blinding = SecretKey::from_slice(&secp, &blind_bin[..])?;
|
||||
let blinding = keychain::BlindingFactor::from_slice(keychain.secp(), &blind_bin[..])?;
|
||||
|
||||
let tx_bin = util::from_hex(partial_tx.tx)?;
|
||||
let tx = ser::deserialize(&mut &tx_bin[..])
|
||||
.map_err(|_| {
|
||||
Error::Format("Could not deserialize transaction, invalid format.".to_string())
|
||||
})?;
|
||||
let tx = ser::deserialize(&mut &tx_bin[..]).map_err(|_| {
|
||||
Error::Format("Could not deserialize transaction, invalid format.".to_string())
|
||||
})?;
|
||||
|
||||
Ok((partial_tx.amount, blinding, tx))
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue